Quellcode durchsuchen

vendor: cloud.google.com/go v0.59.0 to remove some test-deps

commit https://github.com/googleapis/google-cloud-go/commit/ad4f9324cdd7f95cfe74299773711a068d7a3c85
removes some of the test-dependencies from cloud.google.com.

only other relevant changes in vendored code are from this commit:
https://github.com/googleapis/google-cloud-go/commit/dccc6b4b7113c54191c37cc0c75a1ad3d632b9c8

Full diff: https://github.com/googleapis/google-cloud-go/compare/v0.44.3...v0.59.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Sebastiaan van Stijn vor 3 Jahren
Ursprung
Commit
ace8c7896c
100 geänderte Dateien mit 566 neuen und 17261 gelöschten Zeilen
  1. 1 5
      vendor.mod
  2. 30 19
      vendor.sum
  3. 11 0
      vendor/cloud.google.com/go/.gitignore
  4. 223 0
      vendor/cloud.google.com/go/CHANGES.md
  5. 42 30
      vendor/cloud.google.com/go/CONTRIBUTING.md
  6. 89 138
      vendor/cloud.google.com/go/README.md
  7. 120 10
      vendor/cloud.google.com/go/RELEASING.md
  8. 36 31
      vendor/cloud.google.com/go/compute/metadata/metadata.go
  9. 0 0
      vendor/cloud.google.com/go/doc.go
  10. 13 0
      vendor/cloud.google.com/go/internal/version/update_version.sh
  11. 1 1
      vendor/cloud.google.com/go/internal/version/version.go
  12. 0 17
      vendor/cloud.google.com/go/issue_template.md
  13. 0 150
      vendor/cloud.google.com/go/regen-gapic.sh
  14. 0 2
      vendor/cloud.google.com/go/tools.go
  15. 0 5
      vendor/github.com/BurntSushi/toml/.gitignore
  16. 0 15
      vendor/github.com/BurntSushi/toml/.travis.yml
  17. 0 3
      vendor/github.com/BurntSushi/toml/COMPATIBLE
  18. 0 21
      vendor/github.com/BurntSushi/toml/COPYING
  19. 0 19
      vendor/github.com/BurntSushi/toml/Makefile
  20. 0 218
      vendor/github.com/BurntSushi/toml/README.md
  21. 0 509
      vendor/github.com/BurntSushi/toml/decode.go
  22. 0 121
      vendor/github.com/BurntSushi/toml/decode_meta.go
  23. 0 27
      vendor/github.com/BurntSushi/toml/doc.go
  24. 0 568
      vendor/github.com/BurntSushi/toml/encode.go
  25. 0 19
      vendor/github.com/BurntSushi/toml/encoding_types.go
  26. 0 18
      vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
  27. 0 953
      vendor/github.com/BurntSushi/toml/lex.go
  28. 0 592
      vendor/github.com/BurntSushi/toml/parse.go
  29. 0 1
      vendor/github.com/BurntSushi/toml/session.vim
  30. 0 91
      vendor/github.com/BurntSushi/toml/type_check.go
  31. 0 242
      vendor/github.com/BurntSushi/toml/type_fields.go
  32. 0 3
      vendor/golang.org/x/exp/AUTHORS
  33. 0 3
      vendor/golang.org/x/exp/CONTRIBUTORS
  34. 0 27
      vendor/golang.org/x/exp/LICENSE
  35. 0 22
      vendor/golang.org/x/exp/PATENTS
  36. 0 624
      vendor/golang.org/x/exp/apidiff/README.md
  37. 0 220
      vendor/golang.org/x/exp/apidiff/apidiff.go
  38. 0 361
      vendor/golang.org/x/exp/apidiff/compatibility.go
  39. 0 219
      vendor/golang.org/x/exp/apidiff/correspondence.go
  40. 0 79
      vendor/golang.org/x/exp/apidiff/messageset.go
  41. 0 71
      vendor/golang.org/x/exp/apidiff/report.go
  42. 0 142
      vendor/golang.org/x/exp/cmd/apidiff/main.go
  43. 0 186
      vendor/golang.org/x/tools/go/ast/inspector/inspector.go
  44. 0 220
      vendor/golang.org/x/tools/go/ast/inspector/typeof.go
  45. 0 198
      vendor/golang.org/x/tools/go/buildutil/allpackages.go
  46. 0 113
      vendor/golang.org/x/tools/go/buildutil/fakecontext.go
  47. 0 103
      vendor/golang.org/x/tools/go/buildutil/overlay.go
  48. 0 79
      vendor/golang.org/x/tools/go/buildutil/tags.go
  49. 0 212
      vendor/golang.org/x/tools/go/buildutil/util.go
  50. 0 220
      vendor/golang.org/x/tools/go/internal/cgo/cgo.go
  51. 0 39
      vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
  52. 0 49
      vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
  53. 0 204
      vendor/golang.org/x/tools/go/loader/doc.go
  54. 0 1086
      vendor/golang.org/x/tools/go/loader/loader.go
  55. 0 124
      vendor/golang.org/x/tools/go/loader/util.go
  56. 0 221
      vendor/golang.org/x/tools/go/packages/doc.go
  57. 0 101
      vendor/golang.org/x/tools/go/packages/external.go
  58. 0 1096
      vendor/golang.org/x/tools/go/packages/golist.go
  59. 0 575
      vendor/golang.org/x/tools/go/packages/golist_overlay.go
  60. 0 57
      vendor/golang.org/x/tools/go/packages/loadmode_string.go
  61. 0 1233
      vendor/golang.org/x/tools/go/packages/packages.go
  62. 0 59
      vendor/golang.org/x/tools/go/packages/visit.go
  63. 0 46
      vendor/golang.org/x/tools/go/types/typeutil/callee.go
  64. 0 31
      vendor/golang.org/x/tools/go/types/typeutil/imports.go
  65. 0 313
      vendor/golang.org/x/tools/go/types/typeutil/map.go
  66. 0 72
      vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
  67. 0 52
      vendor/golang.org/x/tools/go/types/typeutil/ui.go
  68. 0 21
      vendor/golang.org/x/tools/internal/packagesinternal/packages.go
  69. 0 1358
      vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
  70. 0 152
      vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go
  71. 0 45
      vendor/golang.org/x/tools/internal/typesinternal/types.go
  72. 0 20
      vendor/honnef.co/go/tools/LICENSE
  73. 0 48
      vendor/honnef.co/go/tools/arg/arg.go
  74. 0 129
      vendor/honnef.co/go/tools/callgraph/callgraph.go
  75. 0 35
      vendor/honnef.co/go/tools/callgraph/static/static.go
  76. 0 181
      vendor/honnef.co/go/tools/callgraph/util.go
  77. 0 15
      vendor/honnef.co/go/tools/cmd/staticcheck/README.md
  78. 0 27
      vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go
  79. 0 162
      vendor/honnef.co/go/tools/config/config.go
  80. 0 10
      vendor/honnef.co/go/tools/config/example.conf
  81. 0 112
      vendor/honnef.co/go/tools/deprecated/stdlib.go
  82. 0 56
      vendor/honnef.co/go/tools/functions/concrete.go
  83. 0 150
      vendor/honnef.co/go/tools/functions/functions.go
  84. 0 50
      vendor/honnef.co/go/tools/functions/loops.go
  85. 0 123
      vendor/honnef.co/go/tools/functions/pure.go
  86. 0 24
      vendor/honnef.co/go/tools/functions/terminates.go
  87. 0 46
      vendor/honnef.co/go/tools/go/types/typeutil/callee.go
  88. 0 29
      vendor/honnef.co/go/tools/go/types/typeutil/identical.go
  89. 0 31
      vendor/honnef.co/go/tools/go/types/typeutil/imports.go
  90. 0 315
      vendor/honnef.co/go/tools/go/types/typeutil/map.go
  91. 0 72
      vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go
  92. 0 52
      vendor/honnef.co/go/tools/go/types/typeutil/ui.go
  93. 0 68
      vendor/honnef.co/go/tools/internal/sharedcheck/lint.go
  94. 0 28
      vendor/honnef.co/go/tools/lint/LICENSE
  95. 0 44
      vendor/honnef.co/go/tools/lint/generated.go
  96. 0 679
      vendor/honnef.co/go/tools/lint/lint.go
  97. 0 361
      vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
  98. 0 128
      vendor/honnef.co/go/tools/lint/lintutil/format/format.go
  99. 0 394
      vendor/honnef.co/go/tools/lint/lintutil/util.go
  100. 0 11
      vendor/honnef.co/go/tools/printf/fuzz.go

+ 1 - 5
vendor.mod

@@ -87,10 +87,8 @@ require (
 )
 
 require (
-	cloud.google.com/go/bigquery v0.0.0-00010101000000-000000000000 // indirect
 	code.cloudfoundry.org/clock v1.0.0 // indirect
 	github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
-	github.com/BurntSushi/toml v0.3.1 // indirect
 	github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
@@ -138,7 +136,6 @@ require (
 	github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect
 	go.opencensus.io v0.23.0 // indirect
 	golang.org/x/crypto v0.0.0-20211202192323-5770296d904e // indirect
-	golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 // indirect
 	golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect
 	golang.org/x/mod v0.4.1 // indirect
 	golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect
@@ -147,12 +144,11 @@ require (
 	golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
 	google.golang.org/api v0.46.0 // indirect
 	google.golang.org/appengine v1.6.7 // indirect
-	honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc // indirect
 	labix.org/v2/mgo v0.0.0-20140701140051-000000000287 // indirect
 )
 
 replace (
-	cloud.google.com/go => cloud.google.com/go v0.44.3
+	cloud.google.com/go => cloud.google.com/go v0.59.0
 	cloud.google.com/go/bigquery => cloud.google.com/go/bigquery v1.0.0
 	cloud.google.com/go/logging => cloud.google.com/go/logging v1.0.1-0.20190813144457-ceeb313ad77b
 	github.com/armon/go-metrics => github.com/armon/go-metrics v0.0.0-20150106224455-eb0af217e5e9

+ 30 - 19
vendor.sum

@@ -1,14 +1,18 @@
 bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
-cloud.google.com/go v0.44.3 h1:0sMegbmn/8uTwpNkB0q9cLEpZ2W5a6kl+wtBQgPWBJQ=
-cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.59.0 h1:BM3svUDU3itpc2m5cu5wCyThIYNDlFlts9GASw31GW8=
+cloud.google.com/go v0.59.0/go.mod h1:qJxNOVCRTxHfwLhvDxxSI9vQc1zI59b9pEglp1Iv60E=
 cloud.google.com/go/bigquery v1.0.0 h1:uzb+IRbJNYyU4lgbpqz7KKVjKO8XcF04rVFk6qbNTbM=
 cloud.google.com/go/bigquery v1.0.0/go.mod h1:W6nZUO55RX1ze8f54muIveLNA7ouiqcTlNELudKtFaM=
-cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
 cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
 cloud.google.com/go/logging v1.0.1-0.20190813144457-ceeb313ad77b h1:qJHA070EiswSCrfU+IRfsFgK6laSpbaVMknaQmvMLUA=
 cloud.google.com/go/logging v1.0.1-0.20190813144457-ceeb313ad77b/go.mod h1:V1cc3ogwobYzQq5f2R7DS/GvRIrI4FKj01Gs5glwAls=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
 cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
 code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o=
 code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
@@ -24,7 +28,6 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935
 github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
 github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
 github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
 github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0 h1:cOjLyhBhe91glgZZNbQUg9BJC57l6BiSKov0Ivv7k0U=
@@ -72,6 +75,9 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+
 github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
 github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
 github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
 github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
 github.com/cilium/ebpf v0.6.2 h1:iHsfF/t4aW4heW2YKfeHrVPGdtYTL4C4KocpM8KTSnI=
@@ -194,6 +200,7 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
 github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
 github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
@@ -229,7 +236,7 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er
 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
 github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
 github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -243,7 +250,7 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
 github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
@@ -296,6 +303,7 @@ github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c h1:nQcv325vxv2fFHJs
 github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c/go.mod h1:fHzc09UnyJyqyW+bFuq864eh+wC7dj65aXmXLRe5to0=
 github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
 github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
 github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
 github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
@@ -313,7 +321,6 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
 github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
 github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
 github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
 github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
@@ -549,17 +556,17 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm
 golang.org/x/crypto v0.0.0-20211202192323-5770296d904e h1:MUP6MR3rJ7Gk9LEia0LP2ytiH6MuCfs7qYz+47jGdD8=
 golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw=
 golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
 golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
 golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
 golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
 golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
 golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=
 golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
 golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
 golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
 golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
@@ -590,23 +597,25 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -634,22 +643,25 @@ golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGm
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
 golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
 golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
 golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
 golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
 golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
 golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
 golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
@@ -662,7 +674,6 @@ google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E=
 google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
 google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
 google.golang.org/genproto v0.0.0-20200227132054-3f1135a288c9 h1:Koy0f8zyrEVfIHetH7wjP5mQLUXiqDpubSg8V1fAxqc=
@@ -705,7 +716,6 @@ gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
 gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
 gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
 honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
 k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
@@ -720,7 +730,8 @@ k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAG
 k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
 labix.org/v2/mgo v0.0.0-20140701140051-000000000287 h1:L0cnkNl4TfAXzvdrqsYEmxOHOCv2p5I3taaReO8BWFs=
 labix.org/v2/mgo v0.0.0-20140701140051-000000000287/go.mod h1:Lg7AYkt1uXJoR9oeSZ3W/8IXLdvOfIITgZnommstyz4=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
 sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
 sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=

+ 11 - 0
vendor/cloud.google.com/go/.gitignore

@@ -0,0 +1,11 @@
+# Editors
+.idea
+.vscode
+*.swp
+
+# Test files
+*.test
+coverage.txt
+
+# Other
+.DS_Store

+ 223 - 0
vendor/cloud.google.com/go/CHANGES.md

@@ -1,5 +1,228 @@
 # Changes
 
+## v0.59.0
+
+### Announcements
+
+goolgeapis/google-cloud-go has moved its source of truth to GitHub and is no longer a mirror. This means that our
+contributing process has changed a bit. We will now be conducting all code reviews on GitHub which means we now accept
+pull requests! If you have a version of the codebase previously checked out you may wish to update your git remote to
+point to GitHub.
+
+### Changes
+
+- all:
+  - Remove dependency on honnef.co/go/tools.
+  - Update our contributing instructions now that we use GitHub for reviews.
+  - Remove some un-inclusive terminology.
+- compute/metadata: 
+  - Pass cancelable context to DNS lookup.
+- .github:
+  - Update templates issue/PR templates.
+- internal:
+  - Bump several clients to GA.
+  - Fix GoDoc badge source.
+  - Several automation changes related to the move to GitHub.
+  - Start generating a client for asset v1p5beta1.
+- Various updates to autogenerated clients.
+
+## v0.58.0
+
+### Deprecation notice
+
+- `cloud.google.com/go/monitoring/apiv3` has been deprecated due to breaking
+  changes in the API. Please migrate to `cloud.google.com/go/monitoring/apiv3/v2`.
+
+### Changes
+
+- all:
+  - The remaining uses of gtransport.Dial have been removed.
+  - The `genproto` dependency has been updated to a version that makes use of
+    new `protoreflect` library. For more information on these protobuf changes
+    please see the following post from the official Go blog:
+    https://blog.golang.org/protobuf-apiv2.
+- internal:
+  - Started generation of datastore admin v1 client.
+  - Updated protofuf version used for generation to 3.12.X.
+  - Update the release levels for several APIs.
+  - Generate clients with protoc-gen-go@v1.4.1.
+- monitoring:
+  - Re-enable generation of monitoring/apiv3 under v2 directory (see deprecation
+    notice above).
+- profiler:
+  - Fixed flakiness in tests.
+- Various updates to autogenerated clients.
+
+## v0.57.0
+
+- all:
+  - Update module dependency `google.golang.org/api` to `v0.21.0`.
+- errorreporting:
+  - Add exported SetGoogleClientInfo wrappers to manual file.
+- expr/v1alpha1:
+  - Deprecate client. This client will be removed in a future release.
+- internal:
+  - Fix possible data race in TestTracer.
+  - Pin versions of tools used for generation.
+  - Correct the release levels for BigQuery APIs.
+  - Start generation osconfig v1.
+- longrunning:
+  - Add exported SetGoogleClientInfo wrappers to manual file.
+- monitoring:
+  - Stop generation of monitoring/apiv3 because of incoming breaking change.
+- trace:
+  - Add exported SetGoogleClientInfo wrappers to manual file.
+- Various updates to autogenerated clients.
+
+## v0.56.0
+
+- secretmanager:
+  - add IAM helper
+- profiler:
+  - try all us-west1 zones for integration tests
+- internal:
+  - add config to generate webrisk v1
+  - add repo and commit to buildcop invocation
+  - add recaptchaenterprise v1 generation config
+  - update microgenerator to v0.12.5
+  - add datacatalog client
+  - start generating security center settings v1beta
+  - start generating osconfig agentendpoint v1
+  - setup generation for bigquery/connection/v1beta1
+- all:
+  - increase continous testing timeout to 45m
+  - various updates to autogenerated clients.
+
+## v0.55.0
+
+- Various updates to autogenerated clients.
+
+## v0.54.0
+
+- all:
+  - remove unused golang.org/x/exp from mod file
+  - update godoc.org links to pkg.go.dev
+- compute/metadata:
+  - use defaultClient when http.Client is nil
+  - remove subscribeClient
+- iam:
+  - add support for v3 policy and IAM conditions
+- Various updates to autogenerated clients.
+
+## v0.53.0
+
+- all: most clients now use transport/grpc.DialPool rather than Dial (see #1777 for outliers).
+  - Connection pooling now does not use the deprecated (and soon to be removed) gRPC load balancer API.
+- profiler: remove symbolization (drops support for go1.10)
+- Various updates to autogenerated clients.
+
+## v0.52.0
+
+- internal/gapicgen: multiple improvements related to library generation.
+- compute/metadata: unset ResponseHeaderTimeout in defaultClient
+- docs: fix link to KMS in README.md
+- Various updates to autogenerated clients.
+
+## v0.51.0
+
+- secretmanager:
+  - add IAM helper for generic resource IAM handle
+- cloudbuild:
+  - migrate to microgen in a major version
+- Various updates to autogenerated clients.
+
+## v0.50.0
+
+- profiler:
+  - Support disabling CPU profile collection.
+  - Log when a profile creation attempt begins.
+- compute/metadata:
+  - Fix panic on malformed URLs.
+  - InstanceName returns actual instance name.
+- Various updates to autogenerated clients.
+
+## v0.49.0
+
+- functions/metadata:
+  - Handle string resources in JSON unmarshaller.
+- Various updates to autogenerated clients.
+
+## v0.48.0
+
+- Various updates to autogenerated clients
+
+## v0.47.0
+
+This release drops support for Go 1.9 and Go 1.10: we continue to officially
+support Go 1.11, Go 1.12, and Go 1.13.
+
+- Various updates to autogenerated clients.
+- Add cloudbuild/apiv1 client.
+
+## v0.46.3
+
+This is an empty release that was created solely to aid in storage's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## v0.46.2
+
+This is an empty release that was created solely to aid in spanner's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## v0.46.1
+
+This is an empty release that was created solely to aid in firestore's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## v0.46.0
+
+- spanner:
+  - Retry "Session not found" for read-only transactions.
+  - Retry aborted PDMLs.
+- spanner/spannertest:
+  - Fix a bug that was causing 0X-prefixed number to be parsed incorrectly.
+- storage:
+  - Add HMACKeyOptions.
+  - Remove *REGIONAL from StorageClass documentation. Using MULTI_REGIONAL,
+    DURABLE_REDUCED_AVAILABILITY, and REGIONAL are no longer best practice
+    StorageClasses but they are still acceptable values.
+- trace:
+  - Remove cloud.google.com/go/trace. Package cloud.google.com/go/trace has been
+    marked OBSOLETE for several years: it is now no longer provided. If you
+    relied on this package, please vendor it or switch to using
+    https://cloud.google.com/trace/docs/setup/go (which obsoleted it).
+
+## v0.45.1
+
+This is an empty release that was created solely to aid in pubsub's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## v0.45.0
+
+- compute/metadata:
+  - Add Email method.
+- storage:
+  - Fix duplicated retry logic.
+  - Add ReaderObjectAttrs.StartOffset.
+  - Support reading last N bytes of a file when a negative range is given, such
+    as `obj.NewRangeReader(ctx, -10, -1)`.
+  - Add HMACKey listing functionality.
+- spanner/spannertest:
+  - Support primary keys with no columns.
+  - Fix MinInt64 parsing.
+  - Implement deletion of key ranges.
+  - Handle reads during a read-write transaction.
+  - Handle returning DATE values.
+- pubsub:
+  - Fix Ack/Modack request size calculation.
+- logging:
+  - Add auto-detection of monitored resources on GAE Standard.
+
+## v0.44.3
+
+This is an empty release that was created solely to aid in bigtable's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
 ## v0.44.2
 
 This is an empty release that was created solely to aid in bigquery's module

+ 42 - 30
vendor/cloud.google.com/go/CONTRIBUTING.md

@@ -1,35 +1,47 @@
 # Contributing
 
-1. Sign one of the contributor license agreements below.
-1. `go get golang.org/x/review/git-codereview` to install the code reviewing
-tool.
-    1. You will need to ensure that your `GOBIN` directory (by default
-    `$GOPATH/bin`) is in your `PATH` so that git can find the command.
-    1. If you would like, you may want to set up aliases for git-codereview,
-    such that `git codereview change` becomes `git change`. See the
-    [godoc](https://godoc.org/golang.org/x/review/git-codereview) for details.
-    1. Should you run into issues with the git-codereview tool, please note
-    that all error messages will assume that you have set up these aliases.
-1. Get the cloud package by running `go get -d cloud.google.com/go`.
-    1. If you have already checked out the source, make sure that the remote
-    git origin is https://code.googlesource.com/gocloud:
-
-        ```
-        git remote set-url origin https://code.googlesource.com/gocloud
-        ```
-
-1. Make sure your auth is configured correctly by visiting
-https://code.googlesource.com, clicking "Generate Password", and following the
-directions.
-1. Make changes and create a change by running `git codereview change <name>`,
-provide a commit message, and use `git codereview mail` to create a Gerrit CL.
-1. Keep amending to the change with `git codereview change` and mail as your
-receive feedback. Each new mailed amendment will create a new patch set for
-your change in Gerrit.
-    - Note: if your change includes a breaking change, our breaking change
-    detector will cause CI/CD to fail. If your breaking change is acceptable
-    in some way, add BREAKING_CHANGE_ACCEPTABLE=<reason> to cause the
-    detector not to be run and to make it clear why that is acceptable.
+1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose).
+   The issue will be used to discuss the bug or feature and should be created
+   before sending a CL.
+
+1. [Install Go](https://golang.org/dl/).
+    1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`)
+    is in your `PATH`.
+    1. Check it's working by running `go version`.
+        * If it doesn't work, check the install location, usually
+        `/usr/local/go`, is on your `PATH`.
+
+1. Sign one of the
+[contributor license agreements](#contributor-license-agreements) below.
+
+1. Clone the repo:
+    `git clone https://github.com/googleapis/google-cloud-go`
+
+1. Change into the checked out source:
+    `cd google-cloud-go`
+
+1. Fork the repo.
+   
+1. Set your fork as a remote:
+    `git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git`
+
+1. Make changes (see [Formatting](#formatting) and [Style](#style)), commit to
+   your fork.
+
+   Commit messages should follow the
+   [Go project style](https://github.com/golang/go/wiki/CommitMessage). For example:
+   ```
+   functions: add gophers codelab
+   ```
+
+1. Send a pull request with your changes.
+
+1. A maintainer will review the pull request and make comments.
+
+   Prefer adding additional commits over amending and force-pushing since it can
+   be difficult to follow code reviews when the commit history changes.
+
+   Commits will be squashed when they're merged.
 
 ## Integration Tests
 

+ 89 - 138
vendor/cloud.google.com/go/README.md

@@ -1,6 +1,6 @@
 # Google Cloud Client Libraries for Go
 
-[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go)
+[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://pkg.go.dev/cloud.google.com/go)
 
 Go packages for [Google Cloud Platform](https://cloud.google.com) services.
 
@@ -8,11 +8,19 @@ Go packages for [Google Cloud Platform](https://cloud.google.com) services.
 import "cloud.google.com/go"
 ```
 
-To install the packages on your system, *do not clone the repo*. Instead use
+To install the packages on your system, *do not clone the repo*. Instead:
 
-```
-$ go get -u cloud.google.com/go/...
-```
+1. Change to your project directory:
+
+   ```
+   cd /my/cloud/project
+   ```
+1. Get the package you want to use. Some products have their own module, so it's
+   best to `go get` the package(s) you want to use:
+
+   ```
+   $ go get cloud.google.com/go/firestore # Replace with the package you want to use.
+   ```
 
 **NOTE:** Some of these packages are under development, and may occasionally
 make backwards-incompatible changes.
@@ -23,40 +31,46 @@ make backwards-incompatible changes.
 
 Google API                                      | Status       | Package
 ------------------------------------------------|--------------|-----------------------------------------------------------
-[Asset][cloud-asset]                            | alpha        | [`cloud.google.com/go/asset/v1beta`][cloud-asset-ref]
-[BigQuery][cloud-bigquery]                      | stable       | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
-[Bigtable][cloud-bigtable]                      | stable       | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
-[Cloudtasks][cloud-tasks]                       | stable       | [`cloud.google.com/go/cloudtasks/apiv2`][cloud-tasks-ref]
-[Container][cloud-container]                    | stable       | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
-[ContainerAnalysis][cloud-containeranalysis]    | beta         | [`cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref]
-[Dataproc][cloud-dataproc]                      | stable       | [`cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref]
-[Datastore][cloud-datastore]                    | stable       | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
-[Debugger][cloud-debugger]                      | alpha        | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref]
-[Dialogflow][cloud-dialogflow]                  | alpha        | [`cloud.google.com/go/dialogflow/apiv2`][cloud-dialogflow-ref]
-[Data Loss Prevention][cloud-dlp]               | alpha        | [`cloud.google.com/go/dlp/apiv2`][cloud-dlp-ref]
-[ErrorReporting][cloud-errors]                  | alpha        | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
-[Firestore][cloud-firestore]                    | stable       | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
-[IAM][cloud-iam]                                | stable       | [`cloud.google.com/go/iam`][cloud-iam-ref]
-[IoT][cloud-iot]                                | alpha        | [`cloud.google.com/iot/apiv1`][cloud-iot-ref]
-[KMS][cloud-kms]                                | stable       | [`cloud.google.com/go/kms`][cloud-kms-ref]
-[Natural Language][cloud-natural-language]      | stable       | [`cloud.google.com/go/language/apiv1`][cloud-natural-language-ref]
-[Logging][cloud-logging]                        | stable       | [`cloud.google.com/go/logging`][cloud-logging-ref]
-[Monitoring][cloud-monitoring]                  | alpha        | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
-[OS Login][cloud-oslogin]                       | alpha        | [`cloud.google.com/go/oslogin/apiv1`][cloud-oslogin-ref]
-[Pub/Sub][cloud-pubsub]                         | stable       | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
-[Phishing Protection][cloud-phishingprotection] | alpha        | [`cloud.google.com/go/phishingprotection/apiv1betad1`][cloud-phishingprotection-ref]
-[reCAPTCHA Enterprise][cloud-recaptcha]         | alpha        | [`cloud.google.com/go/recaptchaenterprise/apiv1betad1`][cloud-recaptcha-ref]
-[Memorystore][cloud-memorystore]                | alpha        | [`cloud.google.com/go/redis/apiv1`][cloud-memorystore-ref]
-[Scheduler][cloud-scheduler]                    | stable       | [`cloud.google.com/go/scheduler/apiv1`][cloud-scheduler-ref]
-[Spanner][cloud-spanner]                        | stable       | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
-[Speech][cloud-speech]                          | stable       | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
-[Storage][cloud-storage]                        | stable       | [`cloud.google.com/go/storage`][cloud-storage-ref]
-[Talent][cloud-talent]                          | alpha        | [`cloud.google.com/go/talent/apiv4beta1`][cloud-talent-ref]
-[Text To Speech][cloud-texttospeech]            | alpha        | [`cloud.google.com/go/texttospeech/apiv1`][cloud-texttospeech-ref]
-[Trace][cloud-trace]                            | alpha        | [`cloud.google.com/go/trace/apiv2`][cloud-trace-ref]
-[Translate][cloud-translate]                    | stable       | [`cloud.google.com/go/translate`][cloud-translate-ref]
-[Video Intelligence][cloud-video]               | alpha        | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
-[Vision][cloud-vision]                          | stable       | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
+[Asset][cloud-asset]                            | stable       | [`cloud.google.com/go/asset/apiv1`](https://pkg.go.dev/cloud.google.com/go/asset/v1beta)
+[Automl][cloud-automl]                          | stable       | [`cloud.google.com/go/automl/apiv1`](https://pkg.go.dev/cloud.google.com/go/automl/apiv1)
+[BigQuery][cloud-bigquery]                      | stable       | [`cloud.google.com/go/bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery)
+[Bigtable][cloud-bigtable]                      | stable       | [`cloud.google.com/go/bigtable`](https://pkg.go.dev/cloud.google.com/go/bigtable)
+[Cloudbuild][cloud-build]                       | stable       | [`cloud.google.com/go/cloudbuild/apiv1`](https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1)
+[Cloudtasks][cloud-tasks]                       | stable       | [`cloud.google.com/go/cloudtasks/apiv2`](https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2)
+[Container][cloud-container]                    | stable       | [`cloud.google.com/go/container/apiv1`](https://pkg.go.dev/cloud.google.com/go/container/apiv1)
+[ContainerAnalysis][cloud-containeranalysis]    | beta         | [`cloud.google.com/go/containeranalysis/apiv1`](https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1)
+[Dataproc][cloud-dataproc]                      | stable       | [`cloud.google.com/go/dataproc/apiv1`](https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1)
+[Datastore][cloud-datastore]                    | stable       | [`cloud.google.com/go/datastore`](https://pkg.go.dev/cloud.google.com/go/datastore)
+[Debugger][cloud-debugger]                      | stable       | [`cloud.google.com/go/debugger/apiv2`](https://pkg.go.dev/cloud.google.com/go/debugger/apiv2)
+[Dialogflow][cloud-dialogflow]                  | stable       | [`cloud.google.com/go/dialogflow/apiv2`](https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2)
+[Data Loss Prevention][cloud-dlp]               | stable       | [`cloud.google.com/go/dlp/apiv2`](https://pkg.go.dev/cloud.google.com/go/dlp/apiv2)
+[ErrorReporting][cloud-errors]                  | alpha        | [`cloud.google.com/go/errorreporting`](https://pkg.go.dev/cloud.google.com/go/errorreporting)
+[Firestore][cloud-firestore]                    | stable       | [`cloud.google.com/go/firestore`](https://pkg.go.dev/cloud.google.com/go/firestore)
+[IAM][cloud-iam]                                | stable       | [`cloud.google.com/go/iam`](https://pkg.go.dev/cloud.google.com/go/iam)
+[IoT][cloud-iot]                                | stable       | [`cloud.google.com/go/iot/apiv1`](https://pkg.go.dev/cloud.google.com/go/iot/apiv1)
+[IRM][cloud-irm]                                | alpha        | [`cloud.google.com/go/irm/apiv1alpha2`](https://pkg.go.dev/cloud.google.com/go/irm/apiv1alpha2)
+[KMS][cloud-kms]                                | stable       | [`cloud.google.com/go/kms/apiv1`](https://pkg.go.dev/cloud.google.com/go/kms/apiv1)
+[Natural Language][cloud-natural-language]      | stable       | [`cloud.google.com/go/language/apiv1`](https://pkg.go.dev/cloud.google.com/go/language/apiv1)
+[Logging][cloud-logging]                        | stable       | [`cloud.google.com/go/logging`](https://pkg.go.dev/cloud.google.com/go/logging)
+[Memorystore][cloud-memorystore]                | alpha        | [`cloud.google.com/go/redis/apiv1`](https://pkg.go.dev/cloud.google.com/go/redis/apiv1)
+[Monitoring][cloud-monitoring]                  | stable       | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3)
+[OS Login][cloud-oslogin]                       | stable       | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1)
+[Pub/Sub][cloud-pubsub]                         | stable       | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub)
+[Phishing Protection][cloud-phishingprotection] | alpha        | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1)
+[reCAPTCHA Enterprise][cloud-recaptcha]         | alpha        | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1)
+[Recommender][cloud-recommender]                | beta         | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1)
+[Scheduler][cloud-scheduler]                    | stable       | [`cloud.google.com/go/scheduler/apiv1`](https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1)
+[Securitycenter][cloud-securitycenter]          | stable       | [`cloud.google.com/go/securitycenter/apiv1`](https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1)
+[Spanner][cloud-spanner]                        | stable       | [`cloud.google.com/go/spanner`](https://pkg.go.dev/cloud.google.com/go/spanner)
+[Speech][cloud-speech]                          | stable       | [`cloud.google.com/go/speech/apiv1`](https://pkg.go.dev/cloud.google.com/go/speech/apiv1)
+[Storage][cloud-storage]                        | stable       | [`cloud.google.com/go/storage`](https://pkg.go.dev/cloud.google.com/go/storage)
+[Talent][cloud-talent]                          | alpha        | [`cloud.google.com/go/talent/apiv4beta1`](https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1)
+[Text To Speech][cloud-texttospeech]            | stable       | [`cloud.google.com/go/texttospeech/apiv1`](https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1)
+[Trace][cloud-trace]                            | stable       | [`cloud.google.com/go/trace/apiv2`](https://pkg.go.dev/cloud.google.com/go/trace/apiv2)
+[Translate][cloud-translate]                    | stable       | [`cloud.google.com/go/translate`](https://pkg.go.dev/cloud.google.com/go/translate)
+[Video Intelligence][cloud-video]               | beta         | [`cloud.google.com/go/videointelligence/apiv1beta2`](https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2)
+[Vision][cloud-vision]                          | stable       | [`cloud.google.com/go/vision/apiv1`](https://pkg.go.dev/cloud.google.com/go/vision/apiv1)
+[Webrisk][cloud-webrisk]                        | alpha        | [`cloud.google.com/go/webrisk/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1)
 
 > **Alpha status**: the API is still being actively developed. As a
 > result, it might change in backward-incompatible ways and is not recommended
@@ -69,7 +83,7 @@ Google API                                      | Status       | Package
 > **Stable status**: the API is mature and ready for production use. We will
 > continue addressing bugs and feature requests.
 
-Documentation and examples are available at [godoc.org/cloud.google.com/go](godoc.org/cloud.google.com/go)
+Documentation and examples are available at [pkg.go.dev/cloud.google.com/go](https://pkg.go.dev/cloud.google.com/go)
 
 ## Go Versions Supported
 
@@ -90,7 +104,7 @@ client, err := storage.NewClient(ctx)
 To authorize using a
 [JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
 pass
-[`option.WithCredentialsFile`](https://godoc.org/google.golang.org/api/option#WithCredentialsFile)
+[`option.WithCredentialsFile`](https://pkg.go.dev/google.golang.org/api/option#WithCredentialsFile)
 to the `NewClient` function of the desired package. For example:
 
 [snip]:# (auth-JSON)
@@ -99,9 +113,9 @@ client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfil
 ```
 
 You can exert more control over authorization by using the
-[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
+[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to
 create an `oauth2.TokenSource`. Then pass
-[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource)
+[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource)
 to the `NewClient` function:
 [snip]:# (auth-ts)
 ```go
@@ -113,115 +127,52 @@ client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
 
 Contributions are welcome. Please, see the
 [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md)
-document for details. We're using Gerrit for our code reviews. Please don't open pull
-requests against this repo, new pull requests will be automatically closed.
+document for details.
 
 Please note that this project is released with a Contributor Code of Conduct.
 By participating in this project you agree to abide by its terms.
 See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
 for more information.
 
-[cloud-datastore]: https://cloud.google.com/datastore/
-[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore
-
-[cloud-firestore]: https://cloud.google.com/firestore/
-[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore
-
-[cloud-pubsub]: https://cloud.google.com/pubsub/
-[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
-
-[cloud-storage]: https://cloud.google.com/storage/
-[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage
-
-[cloud-bigtable]: https://cloud.google.com/bigtable/
-[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable
-
+[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
+[cloud-automl]: https://cloud.google.com/automl
+[cloud-build]: https://cloud.google.com/cloud-build/
 [cloud-bigquery]: https://cloud.google.com/bigquery/
-[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery
-
-[cloud-logging]: https://cloud.google.com/logging/
-[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging
-
-[cloud-monitoring]: https://cloud.google.com/monitoring/
-[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3
-
-[cloud-vision]: https://cloud.google.com/vision
-[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1
-
-[cloud-language]: https://cloud.google.com/natural-language
-[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
-
-[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest
-[cloud-oslogin-ref]: https://cloud.google.com/go/oslogin/apiv1
-
-[cloud-speech]: https://cloud.google.com/speech
-[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1
-
-[cloud-spanner]: https://cloud.google.com/spanner/
-[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner
-
-[cloud-translate]: https://cloud.google.com/translate
-[cloud-translate-ref]: https://godoc.org/cloud.google.com/go/translate
-
-[cloud-video]: https://cloud.google.com/video-intelligence/
-[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1
-
-[cloud-errors]: https://cloud.google.com/error-reporting/
-[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting
-
+[cloud-bigtable]: https://cloud.google.com/bigtable/
 [cloud-container]: https://cloud.google.com/containers/
-[cloud-container-ref]: https://godoc.org/cloud.google.com/go/container/apiv1
-
+[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis
+[cloud-dataproc]: https://cloud.google.com/dataproc/
+[cloud-datastore]: https://cloud.google.com/datastore/
+[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/
 [cloud-debugger]: https://cloud.google.com/debugger/
-[cloud-debugger-ref]: https://godoc.org/cloud.google.com/go/debugger/apiv2
-
 [cloud-dlp]: https://cloud.google.com/dlp/
-[cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1
-
-[cloud-dataproc]: https://cloud.google.com/dataproc/
-[cloud-dataproc-ref]: https://godoc.org/cloud.google.com/go/dataproc/apiv1
-
+[cloud-errors]: https://cloud.google.com/error-reporting/
+[cloud-firestore]: https://cloud.google.com/firestore/
 [cloud-iam]: https://cloud.google.com/iam/
-[cloud-iam-ref]: https://godoc.org/cloud.google.com/go/iam
-
+[cloud-iot]: https://cloud.google.com/iot-core/
+[cloud-irm]: https://cloud.google.com/incident-response/docs/concepts
 [cloud-kms]: https://cloud.google.com/kms/
-[cloud-kms-ref]: https://godoc.org/cloud.google.com/go/kms/apiv1
-
+[cloud-pubsub]: https://cloud.google.com/pubsub/
+[cloud-storage]: https://cloud.google.com/storage/
+[cloud-language]: https://cloud.google.com/natural-language
+[cloud-logging]: https://cloud.google.com/logging/
 [cloud-natural-language]: https://cloud.google.com/natural-language/
-[cloud-natural-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
-
 [cloud-memorystore]: https://cloud.google.com/memorystore/
-[cloud-memorystore-ref]: https://godoc.org/cloud.google.com/go/redis/apiv1
-
+[cloud-monitoring]: https://cloud.google.com/monitoring/
+[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest
+[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/
+[cloud-securitycenter]: https://cloud.google.com/security-command-center/
+[cloud-scheduler]: https://cloud.google.com/scheduler
+[cloud-spanner]: https://cloud.google.com/spanner/
+[cloud-speech]: https://cloud.google.com/speech
+[cloud-talent]: https://cloud.google.com/solutions/talent-solution/
+[cloud-tasks]: https://cloud.google.com/tasks/
 [cloud-texttospeech]: https://cloud.google.com/texttospeech/
-[cloud-texttospeech-ref]: https://godoc.org/cloud.google.com/go/texttospeech/apiv1
-
+[cloud-talent]: https://cloud.google.com/solutions/talent-solution/
 [cloud-trace]: https://cloud.google.com/trace/
-[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace/apiv2
-
-[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/
-[cloud-dialogflow-ref]: https://godoc.org/cloud.google.com/go/dialogflow/apiv2
-
-[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis
-[cloud-containeranalysis-ref]: https://godoc.org/cloud.google.com/go/devtools/containeranalysis/apiv1beta1
-
-[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory
-[cloud-asset-ref]: https://godoc.org/cloud.google.com/go/asset/apiv1
-
-[cloud-tasks]: https://cloud.google.com/tasks/
-[cloud-tasks-ref]: https://godoc.org/cloud.google.com/go/cloudtasks/apiv2
-
-[cloud-scheduler]: https://cloud.google.com/scheduler
-[cloud-scheduler-ref]: https://godoc.org/cloud.google.com/go/scheduler/apiv1
-
-[cloud-iot]: https://cloud.google.com/iot-core/
-[cloud-iot-ref]: https://godoc.org/cloud.google.com/go/iot/apiv1
-
-[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/
-[cloud-phishingprotection-ref]: https://cloud.google.com/go/phishingprotection/apiv1beta1
-
+[cloud-translate]: https://cloud.google.com/translate
 [cloud-recaptcha]: https://cloud.google.com/recaptcha-enterprise/
-[cloud-recaptcha-ref]: https://cloud.google.com/go/recaptchaenterprise/apiv1beta1
-
-[cloud-talent]: https://cloud.google.com/solutions/talent-solution/
-[cloud-talent-ref]: https://godoc.org/cloud.google.com/go/talent/apiv4beta1
+[cloud-recommender]: https://cloud.google.com/recommendations/
+[cloud-video]: https://cloud.google.com/video-intelligence/
+[cloud-vision]: https://cloud.google.com/vision
+[cloud-webrisk]: https://cloud.google.com/web-risk/

+ 120 - 10
vendor/cloud.google.com/go/RELEASING.md

@@ -1,18 +1,128 @@
-# How to Release this Repo
+# Setup from scratch
 
-1. Determine the current release version with `git tag -l`. It should look
-   something like `vX.Y.Z`. We'll call the current version `$CV` and the new
-   version `$NV`.
-1. On master, run `git log $CV..` to list all the changes since the last
+1. [Install Go](https://golang.org/dl/).
+    1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`)
+    is in your `PATH`.
+    1. Check it's working by running `go version`.
+        * If it doesn't work, check the install location, usually
+        `/usr/local/go`, is on your `PATH`.
+
+1. Sign one of the
+[contributor license agreements](#contributor-license-agreements) below.
+
+1. Clone the repo:
+    `git clone https://github.com/googleapis/google-cloud-go`
+
+1. Change into the checked out source:
+    `cd google-cloud-go`
+
+1. Fork the repo and add your fork as a secondary remote (this is necessary in
+   order to create PRs).
+
+# Which module to release?
+
+The Go client libraries have several modules. Each module does not strictly
+correspond to a single library - they correspond to trees of directories. If a
+file needs to be released, you must release the closest ancestor module.
+
+To see all modules:
+
+```
+$ cat `find . -name go.mod` | grep module
+module cloud.google.com/go
+module cloud.google.com/go/bigtable
+module cloud.google.com/go/firestore
+module cloud.google.com/go/bigquery
+module cloud.google.com/go/storage
+module cloud.google.com/go/datastore
+module cloud.google.com/go/pubsub
+module cloud.google.com/go/spanner
+module cloud.google.com/go/logging
+```
+
+The `cloud.google.com/go` is the repository root module. Each other module is
+a submodule.
+
+So, if you need to release a change in `bigtable/bttest/inmem.go`, the closest
+ancestor module is `cloud.google.com/go/bigtable` - so you should release a new
+version of the `cloud.google.com/go/bigtable` submodule.
+
+If you need to release a change in `asset/apiv1/asset_client.go`, the closest
+ancestor module is `cloud.google.com/go` - so you should release a new version
+of the `cloud.google.com/go` repository root module. Note: releasing
+`cloud.google.com/go` has no impact on any of the submodules, and vice-versa.
+They are released entirely independently.
+
+# Test failures
+
+If there are any test failures in the Kokoro build, releases are blocked until
+the failures have been resolved.
+
+# How to release `cloud.google.com/go`
+
+1. Check for failures in the
+   [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any
+   failures in the most recent build, address them before proceeding with the
    release.
+1. Navigate to `~/code/gocloud/` and switch to master.
+1. `git pull`
+1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases.
+   The current latest tag `$CV` is the largest tag. It should look something
+   like `vX.Y.Z` (note: ignore all `LIB/vX.Y.Z` tags - these are tags for a
+   specific library, not the module root). We'll call the current version `$CV`
+   and the new version `$NV`.
+1. On master, run `git log $CV...` to list all the changes since the last
+   release. NOTE: You must manually visually parse out changes to submodules [1]
+   (the `git log` is going to show you things in submodules, which are not going
+   to be part of your release).
 1. Edit `CHANGES.md` to include a summary of the changes.
 1. `cd internal/version && go generate && cd -`
-1. Mail the CL containing the `CHANGES.md` changes. When the CL is approved,
-   submit it.
-1. Without submitting any other CLs:
+1. Commit the changes, push to your fork, and create a PR.
+1. Wait for the PR to be reviewed and merged. Once it's merged, and without
+   merging any other PRs in the meantime:
    a. Switch to master.
    b. `git pull`
    c. Tag the repo with the next version: `git tag $NV`.
-   d. Push the tag: `git push origin $NV`.
+   d. Push the tag to origin:
+      `git push origin $NV`
+2. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
+   with the new release, copying the contents of `CHANGES.md`.
+
+# How to release a submodule
+
+We have several submodules, including `cloud.google.com/go/logging`,
+`cloud.google.com/go/datastore`, and so on.
+
+To release a submodule:
+
+(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly)
+
+1. Check for failures in the
+   [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any
+   failures in the most recent build, address them before proceeding with the
+   release. (This applies even if the failures are in a different submodule from the one
+   being released.)
+1. Navigate to `~/code/gocloud/` and switch to master.
+1. `git pull`
+1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all
+   existing releases. The current latest tag `$CV` is the largest tag. It
+   should look something like `datastore/vX.Y.Z`. We'll call the current version
+   `$CV` and the new version `$NV`.
+1. On master, run `git log $CV.. -- datastore/` to list all the changes to the
+   submodule directory since the last release.
+1. Edit `datastore/CHANGES.md` to include a summary of the changes.
+1. `cd internal/version && go generate && cd -`
+1. Commit the changes, push to your fork, and create a PR.
+1. Wait for the PR to be reviewed and merged. Once it's merged, and without
+   merging any other PRs in the meantime:
+   a. Switch to master.
+   b. `git pull`
+   c. Tag the repo with the next version: `git tag $NV`.
+   d. Push the tag to origin:
+      `git push origin $NV`
 1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
-   with the new release, copying the contents of the CHANGES.md.
+   with the new release, copying the contents of `datastore/CHANGES.md`.
+
+# Appendix
+
+1: This should get better as submodule tooling matures.

+ 36 - 31
vendor/cloud.google.com/go/compute/metadata/metadata.go

@@ -61,25 +61,14 @@ var (
 	instID  = &cachedValue{k: "instance/id", trim: true}
 )
 
-var (
-	defaultClient = &Client{hc: &http.Client{
-		Transport: &http.Transport{
-			Dial: (&net.Dialer{
-				Timeout:   2 * time.Second,
-				KeepAlive: 30 * time.Second,
-			}).Dial,
-			ResponseHeaderTimeout: 2 * time.Second,
-		},
-	}}
-	subscribeClient = &Client{hc: &http.Client{
-		Transport: &http.Transport{
-			Dial: (&net.Dialer{
-				Timeout:   2 * time.Second,
-				KeepAlive: 30 * time.Second,
-			}).Dial,
-		},
-	}}
-)
+var defaultClient = &Client{hc: &http.Client{
+	Transport: &http.Transport{
+		Dial: (&net.Dialer{
+			Timeout:   2 * time.Second,
+			KeepAlive: 30 * time.Second,
+		}).Dial,
+	},
+}}
 
 // NotDefinedError is returned when requested metadata is not defined.
 //
@@ -151,7 +140,7 @@ func testOnGCE() bool {
 	}()
 
 	go func() {
-		addrs, err := net.LookupHost("metadata.google.internal")
+		addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal")
 		if err != nil || len(addrs) == 0 {
 			resc <- false
 			return
@@ -206,10 +195,9 @@ func systemInfoSuggestsGCE() bool {
 	return name == "Google" || name == "Google Compute Engine"
 }
 
-// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
-// ResponseHeaderTimeout).
+// Subscribe calls Client.Subscribe on the default client.
 func Subscribe(suffix string, fn func(v string, ok bool) error) error {
-	return subscribeClient.Subscribe(suffix, fn)
+	return defaultClient.Subscribe(suffix, fn)
 }
 
 // Get calls Client.Get on the default client.
@@ -227,6 +215,9 @@ func InternalIP() (string, error) { return defaultClient.InternalIP() }
 // ExternalIP returns the instance's primary external (public) IP address.
 func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
 
+// Email calls Client.Email on the default client.
+func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
+
 // Hostname returns the instance's hostname. This will be of the form
 // "<instanceID>.c.<projID>.internal".
 func Hostname() (string, error) { return defaultClient.Hostname() }
@@ -277,9 +268,14 @@ type Client struct {
 	hc *http.Client
 }
 
-// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
-// will use the given http.Client instead of the default client.
+// NewClient returns a Client that can be used to fetch metadata.
+// Returns the client that uses the specified http.Client for HTTP requests.
+// If nil is specified, returns the default client.
 func NewClient(c *http.Client) *Client {
+	if c == nil {
+		return defaultClient
+	}
+
 	return &Client{hc: c}
 }
 
@@ -301,7 +297,10 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
 		host = metadataIP
 	}
 	u := "http://" + host + "/computeMetadata/v1/" + suffix
-	req, _ := http.NewRequest("GET", u, nil)
+	req, err := http.NewRequest("GET", u, nil)
+	if err != nil {
+		return "", "", err
+	}
 	req.Header.Set("Metadata-Flavor", "Google")
 	req.Header.Set("User-Agent", userAgent)
 	res, err := c.hc.Do(req)
@@ -367,6 +366,16 @@ func (c *Client) InternalIP() (string, error) {
 	return c.getTrimmed("instance/network-interfaces/0/ip")
 }
 
+// Email returns the email address associated with the service account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func (c *Client) Email(serviceAccount string) (string, error) {
+	if serviceAccount == "" {
+		serviceAccount = "default"
+	}
+	return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
+}
+
 // ExternalIP returns the instance's primary external (public) IP address.
 func (c *Client) ExternalIP() (string, error) {
 	return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
@@ -394,11 +403,7 @@ func (c *Client) InstanceTags() ([]string, error) {
 
 // InstanceName returns the current VM's instance ID string.
 func (c *Client) InstanceName() (string, error) {
-	host, err := c.Hostname()
-	if err != nil {
-		return "", err
-	}
-	return strings.Split(host, ".")[0], nil
+	return c.getTrimmed("instance/name")
 }
 
 // Zone returns the current VM's zone, such as "us-central1-b".

+ 0 - 0
vendor/cloud.google.com/go/cloud.go → vendor/cloud.google.com/go/doc.go


+ 13 - 0
vendor/cloud.google.com/go/internal/version/update_version.sh

@@ -1,4 +1,17 @@
 #!/bin/bash
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
 today=$(date +%Y%m%d)
 

+ 1 - 1
vendor/cloud.google.com/go/internal/version/version.go

@@ -26,7 +26,7 @@ import (
 
 // Repo is the current version of the client libraries in this
 // repo. It should be a date in YYYYMMDD format.
-const Repo = "20190802"
+const Repo = "20200618"
 
 // Go returns the Go runtime version. The returned string
 // has no whitespace.

+ 0 - 17
vendor/cloud.google.com/go/issue_template.md

@@ -1,17 +0,0 @@
-(delete this for feature requests)
-
-## Client
-
-e.g. PubSub
-
-## Describe Your Environment
-
-e.g. Alpine Docker on GKE
-
-## Expected Behavior
-
-e.g. Messages arrive really fast.
-
-## Actual Behavior
-
-e.g. Messages arrive really slowly.

+ 0 - 150
vendor/cloud.google.com/go/regen-gapic.sh

@@ -1,150 +0,0 @@
-#!/bin/bash
-
-# This script generates all GAPIC clients in this repo.
-# One-time setup:
-#   cd path/to/googleapis # https://github.com/googleapis/googleapis
-#   virtualenv env
-#   . env/bin/activate
-#   pip install googleapis-artman
-#   deactivate
-#
-# Regenerate:
-#   cd path/to/googleapis
-#   . env/bin/activate
-#   $GOPATH/src/cloud.google.com/go/regen-gapic.sh
-#   deactivate
-#
-# Being in googleapis directory is important;
-# that's where we find YAML files and where artman puts the "artman-genfiles" directory.
-#
-# NOTE: This script does not generate the "raw" gRPC client found in google.golang.org/genproto.
-# To do that, use the regen.sh script in the genproto repo instead.
-
-set -ex
-
-APIS=(
-google/api/expr/artman_cel.yaml
-google/iam/artman_iam_admin.yaml
-google/cloud/asset/artman_cloudasset_v1beta1.yaml
-google/cloud/asset/artman_cloudasset_v1p2beta1.yaml
-google/cloud/asset/artman_cloudasset_v1.yaml
-google/iam/credentials/artman_iamcredentials_v1.yaml
-google/cloud/automl/artman_automl_v1beta1.yaml
-google/cloud/bigquery/datatransfer/artman_bigquerydatatransfer.yaml
-google/cloud/bigquery/storage/artman_bigquerystorage_v1beta1.yaml
-google/cloud/dataproc/artman_dataproc_v1.yaml
-google/cloud/dataproc/artman_dataproc_v1beta2.yaml
-google/cloud/dialogflow/artman_dialogflow_v2.yaml
-google/cloud/iot/artman_cloudiot.yaml
-google/cloud/irm/artman_irm_v1alpha2.yaml
-google/cloud/kms/artman_cloudkms.yaml
-google/cloud/language/artman_language_v1.yaml
-google/cloud/language/artman_language_v1beta2.yaml
-google/cloud/oslogin/artman_oslogin_v1.yaml
-google/cloud/oslogin/artman_oslogin_v1beta.yaml
-google/cloud/phishingprotection/artman_phishingprotection_v1beta1.yaml
-google/cloud/recaptchaenterprise/artman_recaptchaenterprise_v1beta1.yaml
-google/cloud/redis/artman_redis_v1beta1.yaml
-google/cloud/redis/artman_redis_v1.yaml
-google/cloud/scheduler/artman_cloudscheduler_v1beta1.yaml
-google/cloud/scheduler/artman_cloudscheduler_v1.yaml
-google/cloud/securitycenter/artman_securitycenter_v1beta1.yaml
-google/cloud/securitycenter/artman_securitycenter_v1.yaml
-google/cloud/speech/artman_speech_v1.yaml
-google/cloud/speech/artman_speech_v1p1beta1.yaml
-google/cloud/talent/artman_talent_v4beta1.yaml
-google/cloud/tasks/artman_cloudtasks_v2beta2.yaml
-google/cloud/tasks/artman_cloudtasks_v2beta3.yaml
-google/cloud/tasks/artman_cloudtasks_v2.yaml
-google/cloud/texttospeech/artman_texttospeech_v1.yaml
-google/cloud/videointelligence/artman_videointelligence_v1.yaml
-google/cloud/videointelligence/artman_videointelligence_v1beta1.yaml
-google/cloud/videointelligence/artman_videointelligence_v1beta2.yaml
-google/cloud/vision/artman_vision_v1.yaml
-google/cloud/vision/artman_vision_v1p1beta1.yaml
-google/cloud/webrisk/artman_webrisk_v1beta1.yaml
-google/devtools/artman_clouddebugger.yaml
-google/devtools/clouderrorreporting/artman_errorreporting.yaml
-google/devtools/cloudtrace/artman_cloudtrace_v1.yaml
-google/devtools/cloudtrace/artman_cloudtrace_v2.yaml
-
-# The containeranalysis team wants manual changes in the auto-generated gapic.
-# So, let's remove it from the autogen list until we're ready to spend energy
-# generating and manually updating it.
-# google/devtools/containeranalysis/artman_containeranalysis_v1.yaml
-
-google/devtools/containeranalysis/artman_containeranalysis_v1beta1.yaml
-google/firestore/artman_firestore.yaml
-google/firestore/admin/artman_firestore_v1.yaml
-
-# See containeranalysis note above.
-# grafeas/artman_grafeas_v1.yaml
-
-google/logging/artman_logging.yaml
-google/longrunning/artman_longrunning.yaml
-google/monitoring/artman_monitoring.yaml
-google/privacy/dlp/artman_dlp_v2.yaml
-google/pubsub/artman_pubsub.yaml
-google/spanner/admin/database/artman_spanner_admin_database.yaml
-google/spanner/admin/instance/artman_spanner_admin_instance.yaml
-google/spanner/artman_spanner.yaml
-)
-
-for api in "${APIS[@]}"; do
-  rm -rf artman-genfiles/*
-  artman --config "$api" generate go_gapic
-  cp -r artman-genfiles/gapi-*/cloud.google.com/go/* $GOPATH/src/cloud.google.com/go/
-done
-
-microgen() {
-  input=$1
-  options="${@:2}"
-
-  # see https://github.com/googleapis/gapic-generator-go/blob/master/README.md#docker-wrapper for details
-  docker run \
-    --mount type=bind,source=$(pwd),destination=/conf,readonly \
-    --mount type=bind,source=$(pwd)/$input,destination=/in/$input,readonly \
-    --mount type=bind,source=$GOPATH/src,destination=/out \
-    --rm \
-    gcr.io/gapic-images/gapic-generator-go:latest \
-    $options
-}
-
-MICROAPIS=(
-  # input proto directory  |  gapic-generator-go flag  | gapic-service-config flag
-  # "google/cloud/language/v1 --go-gapic-package cloud.google.com/go/language/apiv1;language --gapic-service-config google/cloud/language/language_v1.yaml"
-)
-
-for api in "${MICROAPIS[@]}"; do
-  microgen $api
-done
-
-pushd $GOPATH/src/cloud.google.com/go/
-  gofmt -s -d -l -w . && goimports -w .
-
-  # NOTE(pongad): `sed -i` doesn't work on Macs, because -i option needs an argument.
-  # `-i ''` doesn't work on GNU, since the empty string is treated as a file name.
-  # So we just create the backup and delete it after.
-  ver=$(date +%Y%m%d)
-  git ls-files -mo | while read modified; do
-    dir=${modified%/*.*}
-    find . -path "*/$dir/doc.go" -exec sed -i.backup -e "s/^const versionClient.*/const versionClient = \"$ver\"/" '{}' +
-  done
-popd
-
-
-HASMANUAL=(
-errorreporting/apiv1beta1
-firestore/apiv1beta1
-firestore/apiv1
-logging/apiv2
-longrunning/autogen
-pubsub/apiv1
-spanner/apiv1
-trace/apiv1
-)
-for dir in "${HASMANUAL[@]}"; do
-	find "$GOPATH/src/cloud.google.com/go/$dir" -name '*.go' -exec sed -i.backup -e 's/setGoogleClientInfo/SetGoogleClientInfo/g' '{}' '+'
-done
-
-find $GOPATH/src/cloud.google.com/go/ -name '*.backup' -delete

+ 0 - 2
vendor/cloud.google.com/go/tools.go

@@ -26,8 +26,6 @@ package cloud
 import (
 	_ "github.com/golang/protobuf/protoc-gen-go"
 	_ "github.com/jstemmer/go-junit-report"
-	_ "golang.org/x/exp/cmd/apidiff"
 	_ "golang.org/x/lint/golint"
 	_ "golang.org/x/tools/cmd/goimports"
-	_ "honnef.co/go/tools/cmd/staticcheck"
 )

+ 0 - 5
vendor/github.com/BurntSushi/toml/.gitignore

@@ -1,5 +0,0 @@
-TAGS
-tags
-.*.swp
-tomlcheck/tomlcheck
-toml.test

+ 0 - 15
vendor/github.com/BurntSushi/toml/.travis.yml

@@ -1,15 +0,0 @@
-language: go
-go:
-  - 1.1
-  - 1.2
-  - 1.3
-  - 1.4
-  - 1.5
-  - 1.6
-  - tip
-install:
-  - go install ./...
-  - go get github.com/BurntSushi/toml-test
-script:
-  - export PATH="$PATH:$HOME/gopath/bin"
-  - make test

+ 0 - 3
vendor/github.com/BurntSushi/toml/COMPATIBLE

@@ -1,3 +0,0 @@
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
-

+ 0 - 21
vendor/github.com/BurntSushi/toml/COPYING

@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 TOML authors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.

+ 0 - 19
vendor/github.com/BurntSushi/toml/Makefile

@@ -1,19 +0,0 @@
-install:
-	go install ./...
-
-test: install
-	go test -v
-	toml-test toml-test-decoder
-	toml-test -encoder toml-test-encoder
-
-fmt:
-	gofmt -w *.go */*.go
-	colcheck *.go */*.go
-
-tags:
-	find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
-
-push:
-	git push origin master
-	git push github master
-

+ 0 - 218
vendor/github.com/BurntSushi/toml/README.md

@@ -1,218 +0,0 @@
-## TOML parser and encoder for Go with reflection
-
-TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
-reflection interface similar to Go's standard library `json` and `xml`
-packages. This package also supports the `encoding.TextUnmarshaler` and
-`encoding.TextMarshaler` interfaces so that you can define custom data
-representations. (There is an example of this below.)
-
-Spec: https://github.com/toml-lang/toml
-
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
-
-Documentation: https://godoc.org/github.com/BurntSushi/toml
-
-Installation:
-
-```bash
-go get github.com/BurntSushi/toml
-```
-
-Try the toml validator:
-
-```bash
-go get github.com/BurntSushi/toml/cmd/tomlv
-tomlv some-toml-file.toml
-```
-
-[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
-
-### Testing
-
-This package passes all tests in
-[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
-and the encoder.
-
-### Examples
-
-This package works similarly to how the Go standard library handles `XML`
-and `JSON`. Namely, data is loaded into Go values via reflection.
-
-For the simplest example, consider some TOML file as just a list of keys
-and values:
-
-```toml
-Age = 25
-Cats = [ "Cauchy", "Plato" ]
-Pi = 3.14
-Perfection = [ 6, 28, 496, 8128 ]
-DOB = 1987-07-05T05:45:00Z
-```
-
-Which could be defined in Go as:
-
-```go
-type Config struct {
-  Age int
-  Cats []string
-  Pi float64
-  Perfection []int
-  DOB time.Time // requires `import time`
-}
-```
-
-And then decoded with:
-
-```go
-var conf Config
-if _, err := toml.Decode(tomlData, &conf); err != nil {
-  // handle error
-}
-```
-
-You can also use struct tags if your struct field name doesn't map to a TOML
-key value directly:
-
-```toml
-some_key_NAME = "wat"
-```
-
-```go
-type TOML struct {
-  ObscureKey string `toml:"some_key_NAME"`
-}
-```
-
-### Using the `encoding.TextUnmarshaler` interface
-
-Here's an example that automatically parses duration strings into
-`time.Duration` values:
-
-```toml
-[[song]]
-name = "Thunder Road"
-duration = "4m49s"
-
-[[song]]
-name = "Stairway to Heaven"
-duration = "8m03s"
-```
-
-Which can be decoded with:
-
-```go
-type song struct {
-  Name     string
-  Duration duration
-}
-type songs struct {
-  Song []song
-}
-var favorites songs
-if _, err := toml.Decode(blob, &favorites); err != nil {
-  log.Fatal(err)
-}
-
-for _, s := range favorites.Song {
-  fmt.Printf("%s (%s)\n", s.Name, s.Duration)
-}
-```
-
-And you'll also need a `duration` type that satisfies the
-`encoding.TextUnmarshaler` interface:
-
-```go
-type duration struct {
-	time.Duration
-}
-
-func (d *duration) UnmarshalText(text []byte) error {
-	var err error
-	d.Duration, err = time.ParseDuration(string(text))
-	return err
-}
-```
-
-### More complex usage
-
-Here's an example of how to load the example from the official spec page:
-
-```toml
-# This is a TOML document. Boom.
-
-title = "TOML Example"
-
-[owner]
-name = "Tom Preston-Werner"
-organization = "GitHub"
-bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
-dob = 1979-05-27T07:32:00Z # First class dates? Why not?
-
-[database]
-server = "192.168.1.1"
-ports = [ 8001, 8001, 8002 ]
-connection_max = 5000
-enabled = true
-
-[servers]
-
-  # You can indent as you please. Tabs or spaces. TOML don't care.
-  [servers.alpha]
-  ip = "10.0.0.1"
-  dc = "eqdc10"
-
-  [servers.beta]
-  ip = "10.0.0.2"
-  dc = "eqdc10"
-
-[clients]
-data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
-
-# Line breaks are OK when inside arrays
-hosts = [
-  "alpha",
-  "omega"
-]
-```
-
-And the corresponding Go types are:
-
-```go
-type tomlConfig struct {
-	Title string
-	Owner ownerInfo
-	DB database `toml:"database"`
-	Servers map[string]server
-	Clients clients
-}
-
-type ownerInfo struct {
-	Name string
-	Org string `toml:"organization"`
-	Bio string
-	DOB time.Time
-}
-
-type database struct {
-	Server string
-	Ports []int
-	ConnMax int `toml:"connection_max"`
-	Enabled bool
-}
-
-type server struct {
-	IP string
-	DC string
-}
-
-type clients struct {
-	Data [][]interface{}
-	Hosts []string
-}
-```
-
-Note that a case insensitive match will be tried if an exact match can't be
-found.
-
-A working example of the above can be found in `_examples/example.{go,toml}`.

+ 0 - 509
vendor/github.com/BurntSushi/toml/decode.go

@@ -1,509 +0,0 @@
-package toml
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"math"
-	"reflect"
-	"strings"
-	"time"
-)
-
-func e(format string, args ...interface{}) error {
-	return fmt.Errorf("toml: "+format, args...)
-}
-
-// Unmarshaler is the interface implemented by objects that can unmarshal a
-// TOML description of themselves.
-type Unmarshaler interface {
-	UnmarshalTOML(interface{}) error
-}
-
-// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
-func Unmarshal(p []byte, v interface{}) error {
-	_, err := Decode(string(p), v)
-	return err
-}
-
-// Primitive is a TOML value that hasn't been decoded into a Go value.
-// When using the various `Decode*` functions, the type `Primitive` may
-// be given to any value, and its decoding will be delayed.
-//
-// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
-//
-// The underlying representation of a `Primitive` value is subject to change.
-// Do not rely on it.
-//
-// N.B. Primitive values are still parsed, so using them will only avoid
-// the overhead of reflection. They can be useful when you don't know the
-// exact type of TOML data until run time.
-type Primitive struct {
-	undecoded interface{}
-	context   Key
-}
-
-// DEPRECATED!
-//
-// Use MetaData.PrimitiveDecode instead.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
-	md := MetaData{decoded: make(map[string]bool)}
-	return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// PrimitiveDecode is just like the other `Decode*` functions, except it
-// decodes a TOML value that has already been parsed. Valid primitive values
-// can *only* be obtained from values filled by the decoder functions,
-// including this method. (i.e., `v` may contain more `Primitive`
-// values.)
-//
-// Meta data for primitive values is included in the meta data returned by
-// the `Decode*` functions with one exception: keys returned by the Undecoded
-// method will only reflect keys that were decoded. Namely, any keys hidden
-// behind a Primitive will be considered undecoded. Executing this method will
-// update the undecoded keys in the meta data. (See the example.)
-func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
-	md.context = primValue.context
-	defer func() { md.context = nil }()
-	return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// Decode will decode the contents of `data` in TOML format into a pointer
-// `v`.
-//
-// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
-// used interchangeably.)
-//
-// TOML arrays of tables correspond to either a slice of structs or a slice
-// of maps.
-//
-// TOML datetimes correspond to Go `time.Time` values.
-//
-// All other TOML types (float, string, int, bool and array) correspond
-// to the obvious Go types.
-//
-// An exception to the above rules is if a type implements the
-// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
-// (floats, strings, integers, booleans and datetimes) will be converted to
-// a byte string and given to the value's UnmarshalText method. See the
-// Unmarshaler example for a demonstration with time duration strings.
-//
-// Key mapping
-//
-// TOML keys can map to either keys in a Go map or field names in a Go
-// struct. The special `toml` struct tag may be used to map TOML keys to
-// struct fields that don't match the key name exactly. (See the example.)
-// A case insensitive match to struct names will be tried if an exact match
-// can't be found.
-//
-// The mapping between TOML values and Go values is loose. That is, there
-// may exist TOML values that cannot be placed into your representation, and
-// there may be parts of your representation that do not correspond to
-// TOML values. This loose mapping can be made stricter by using the IsDefined
-// and/or Undecoded methods on the MetaData returned.
-//
-// This decoder will not handle cyclic types. If a cyclic type is passed,
-// `Decode` will not terminate.
-func Decode(data string, v interface{}) (MetaData, error) {
-	rv := reflect.ValueOf(v)
-	if rv.Kind() != reflect.Ptr {
-		return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
-	}
-	if rv.IsNil() {
-		return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
-	}
-	p, err := parse(data)
-	if err != nil {
-		return MetaData{}, err
-	}
-	md := MetaData{
-		p.mapping, p.types, p.ordered,
-		make(map[string]bool, len(p.ordered)), nil,
-	}
-	return md, md.unify(p.mapping, indirect(rv))
-}
-
-// DecodeFile is just like Decode, except it will automatically read the
-// contents of the file at `fpath` and decode it for you.
-func DecodeFile(fpath string, v interface{}) (MetaData, error) {
-	bs, err := ioutil.ReadFile(fpath)
-	if err != nil {
-		return MetaData{}, err
-	}
-	return Decode(string(bs), v)
-}
-
-// DecodeReader is just like Decode, except it will consume all bytes
-// from the reader and decode it for you.
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
-	bs, err := ioutil.ReadAll(r)
-	if err != nil {
-		return MetaData{}, err
-	}
-	return Decode(string(bs), v)
-}
-
-// unify performs a sort of type unification based on the structure of `rv`,
-// which is the client representation.
-//
-// Any type mismatch produces an error. Finding a type that we don't know
-// how to handle produces an unsupported type error.
-func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
-
-	// Special case. Look for a `Primitive` value.
-	if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
-		// Save the undecoded data and the key context into the primitive
-		// value.
-		context := make(Key, len(md.context))
-		copy(context, md.context)
-		rv.Set(reflect.ValueOf(Primitive{
-			undecoded: data,
-			context:   context,
-		}))
-		return nil
-	}
-
-	// Special case. Unmarshaler Interface support.
-	if rv.CanAddr() {
-		if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
-			return v.UnmarshalTOML(data)
-		}
-	}
-
-	// Special case. Handle time.Time values specifically.
-	// TODO: Remove this code when we decide to drop support for Go 1.1.
-	// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
-	// interfaces.
-	if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
-		return md.unifyDatetime(data, rv)
-	}
-
-	// Special case. Look for a value satisfying the TextUnmarshaler interface.
-	if v, ok := rv.Interface().(TextUnmarshaler); ok {
-		return md.unifyText(data, v)
-	}
-	// BUG(burntsushi)
-	// The behavior here is incorrect whenever a Go type satisfies the
-	// encoding.TextUnmarshaler interface but also corresponds to a TOML
-	// hash or array. In particular, the unmarshaler should only be applied
-	// to primitive TOML values. But at this point, it will be applied to
-	// all kinds of values and produce an incorrect error whenever those values
-	// are hashes or arrays (including arrays of tables).
-
-	k := rv.Kind()
-
-	// laziness
-	if k >= reflect.Int && k <= reflect.Uint64 {
-		return md.unifyInt(data, rv)
-	}
-	switch k {
-	case reflect.Ptr:
-		elem := reflect.New(rv.Type().Elem())
-		err := md.unify(data, reflect.Indirect(elem))
-		if err != nil {
-			return err
-		}
-		rv.Set(elem)
-		return nil
-	case reflect.Struct:
-		return md.unifyStruct(data, rv)
-	case reflect.Map:
-		return md.unifyMap(data, rv)
-	case reflect.Array:
-		return md.unifyArray(data, rv)
-	case reflect.Slice:
-		return md.unifySlice(data, rv)
-	case reflect.String:
-		return md.unifyString(data, rv)
-	case reflect.Bool:
-		return md.unifyBool(data, rv)
-	case reflect.Interface:
-		// we only support empty interfaces.
-		if rv.NumMethod() > 0 {
-			return e("unsupported type %s", rv.Type())
-		}
-		return md.unifyAnything(data, rv)
-	case reflect.Float32:
-		fallthrough
-	case reflect.Float64:
-		return md.unifyFloat64(data, rv)
-	}
-	return e("unsupported type %s", rv.Kind())
-}
-
-func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
-	tmap, ok := mapping.(map[string]interface{})
-	if !ok {
-		if mapping == nil {
-			return nil
-		}
-		return e("type mismatch for %s: expected table but found %T",
-			rv.Type().String(), mapping)
-	}
-
-	for key, datum := range tmap {
-		var f *field
-		fields := cachedTypeFields(rv.Type())
-		for i := range fields {
-			ff := &fields[i]
-			if ff.name == key {
-				f = ff
-				break
-			}
-			if f == nil && strings.EqualFold(ff.name, key) {
-				f = ff
-			}
-		}
-		if f != nil {
-			subv := rv
-			for _, i := range f.index {
-				subv = indirect(subv.Field(i))
-			}
-			if isUnifiable(subv) {
-				md.decoded[md.context.add(key).String()] = true
-				md.context = append(md.context, key)
-				if err := md.unify(datum, subv); err != nil {
-					return err
-				}
-				md.context = md.context[0 : len(md.context)-1]
-			} else if f.name != "" {
-				// Bad user! No soup for you!
-				return e("cannot write unexported field %s.%s",
-					rv.Type().String(), f.name)
-			}
-		}
-	}
-	return nil
-}
-
-func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
-	tmap, ok := mapping.(map[string]interface{})
-	if !ok {
-		if tmap == nil {
-			return nil
-		}
-		return badtype("map", mapping)
-	}
-	if rv.IsNil() {
-		rv.Set(reflect.MakeMap(rv.Type()))
-	}
-	for k, v := range tmap {
-		md.decoded[md.context.add(k).String()] = true
-		md.context = append(md.context, k)
-
-		rvkey := indirect(reflect.New(rv.Type().Key()))
-		rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
-		if err := md.unify(v, rvval); err != nil {
-			return err
-		}
-		md.context = md.context[0 : len(md.context)-1]
-
-		rvkey.SetString(k)
-		rv.SetMapIndex(rvkey, rvval)
-	}
-	return nil
-}
-
-func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
-	datav := reflect.ValueOf(data)
-	if datav.Kind() != reflect.Slice {
-		if !datav.IsValid() {
-			return nil
-		}
-		return badtype("slice", data)
-	}
-	sliceLen := datav.Len()
-	if sliceLen != rv.Len() {
-		return e("expected array length %d; got TOML array of length %d",
-			rv.Len(), sliceLen)
-	}
-	return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
-	datav := reflect.ValueOf(data)
-	if datav.Kind() != reflect.Slice {
-		if !datav.IsValid() {
-			return nil
-		}
-		return badtype("slice", data)
-	}
-	n := datav.Len()
-	if rv.IsNil() || rv.Cap() < n {
-		rv.Set(reflect.MakeSlice(rv.Type(), n, n))
-	}
-	rv.SetLen(n)
-	return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
-	sliceLen := data.Len()
-	for i := 0; i < sliceLen; i++ {
-		v := data.Index(i).Interface()
-		sliceval := indirect(rv.Index(i))
-		if err := md.unify(v, sliceval); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
-	if _, ok := data.(time.Time); ok {
-		rv.Set(reflect.ValueOf(data))
-		return nil
-	}
-	return badtype("time.Time", data)
-}
-
-func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
-	if s, ok := data.(string); ok {
-		rv.SetString(s)
-		return nil
-	}
-	return badtype("string", data)
-}
-
-func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
-	if num, ok := data.(float64); ok {
-		switch rv.Kind() {
-		case reflect.Float32:
-			fallthrough
-		case reflect.Float64:
-			rv.SetFloat(num)
-		default:
-			panic("bug")
-		}
-		return nil
-	}
-	return badtype("float", data)
-}
-
-func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
-	if num, ok := data.(int64); ok {
-		if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
-			switch rv.Kind() {
-			case reflect.Int, reflect.Int64:
-				// No bounds checking necessary.
-			case reflect.Int8:
-				if num < math.MinInt8 || num > math.MaxInt8 {
-					return e("value %d is out of range for int8", num)
-				}
-			case reflect.Int16:
-				if num < math.MinInt16 || num > math.MaxInt16 {
-					return e("value %d is out of range for int16", num)
-				}
-			case reflect.Int32:
-				if num < math.MinInt32 || num > math.MaxInt32 {
-					return e("value %d is out of range for int32", num)
-				}
-			}
-			rv.SetInt(num)
-		} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
-			unum := uint64(num)
-			switch rv.Kind() {
-			case reflect.Uint, reflect.Uint64:
-				// No bounds checking necessary.
-			case reflect.Uint8:
-				if num < 0 || unum > math.MaxUint8 {
-					return e("value %d is out of range for uint8", num)
-				}
-			case reflect.Uint16:
-				if num < 0 || unum > math.MaxUint16 {
-					return e("value %d is out of range for uint16", num)
-				}
-			case reflect.Uint32:
-				if num < 0 || unum > math.MaxUint32 {
-					return e("value %d is out of range for uint32", num)
-				}
-			}
-			rv.SetUint(unum)
-		} else {
-			panic("unreachable")
-		}
-		return nil
-	}
-	return badtype("integer", data)
-}
-
-func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
-	if b, ok := data.(bool); ok {
-		rv.SetBool(b)
-		return nil
-	}
-	return badtype("boolean", data)
-}
-
-func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
-	rv.Set(reflect.ValueOf(data))
-	return nil
-}
-
-func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
-	var s string
-	switch sdata := data.(type) {
-	case TextMarshaler:
-		text, err := sdata.MarshalText()
-		if err != nil {
-			return err
-		}
-		s = string(text)
-	case fmt.Stringer:
-		s = sdata.String()
-	case string:
-		s = sdata
-	case bool:
-		s = fmt.Sprintf("%v", sdata)
-	case int64:
-		s = fmt.Sprintf("%d", sdata)
-	case float64:
-		s = fmt.Sprintf("%f", sdata)
-	default:
-		return badtype("primitive (string-like)", data)
-	}
-	if err := v.UnmarshalText([]byte(s)); err != nil {
-		return err
-	}
-	return nil
-}
-
-// rvalue returns a reflect.Value of `v`. All pointers are resolved.
-func rvalue(v interface{}) reflect.Value {
-	return indirect(reflect.ValueOf(v))
-}
-
-// indirect returns the value pointed to by a pointer.
-// Pointers are followed until the value is not a pointer.
-// New values are allocated for each nil pointer.
-//
-// An exception to this rule is if the value satisfies an interface of
-// interest to us (like encoding.TextUnmarshaler).
-func indirect(v reflect.Value) reflect.Value {
-	if v.Kind() != reflect.Ptr {
-		if v.CanSet() {
-			pv := v.Addr()
-			if _, ok := pv.Interface().(TextUnmarshaler); ok {
-				return pv
-			}
-		}
-		return v
-	}
-	if v.IsNil() {
-		v.Set(reflect.New(v.Type().Elem()))
-	}
-	return indirect(reflect.Indirect(v))
-}
-
-func isUnifiable(rv reflect.Value) bool {
-	if rv.CanSet() {
-		return true
-	}
-	if _, ok := rv.Interface().(TextUnmarshaler); ok {
-		return true
-	}
-	return false
-}
-
-func badtype(expected string, data interface{}) error {
-	return e("cannot load TOML value of type %T into a Go %s", data, expected)
-}

+ 0 - 121
vendor/github.com/BurntSushi/toml/decode_meta.go

@@ -1,121 +0,0 @@
-package toml
-
-import "strings"
-
-// MetaData allows access to meta information about TOML data that may not
-// be inferrable via reflection. In particular, whether a key has been defined
-// and the TOML type of a key.
-type MetaData struct {
-	mapping map[string]interface{}
-	types   map[string]tomlType
-	keys    []Key
-	decoded map[string]bool
-	context Key // Used only during decoding.
-}
-
-// IsDefined returns true if the key given exists in the TOML data. The key
-// should be specified hierarchially. e.g.,
-//
-//	// access the TOML key 'a.b.c'
-//	IsDefined("a", "b", "c")
-//
-// IsDefined will return false if an empty key given. Keys are case sensitive.
-func (md *MetaData) IsDefined(key ...string) bool {
-	if len(key) == 0 {
-		return false
-	}
-
-	var hash map[string]interface{}
-	var ok bool
-	var hashOrVal interface{} = md.mapping
-	for _, k := range key {
-		if hash, ok = hashOrVal.(map[string]interface{}); !ok {
-			return false
-		}
-		if hashOrVal, ok = hash[k]; !ok {
-			return false
-		}
-	}
-	return true
-}
-
-// Type returns a string representation of the type of the key specified.
-//
-// Type will return the empty string if given an empty key or a key that
-// does not exist. Keys are case sensitive.
-func (md *MetaData) Type(key ...string) string {
-	fullkey := strings.Join(key, ".")
-	if typ, ok := md.types[fullkey]; ok {
-		return typ.typeString()
-	}
-	return ""
-}
-
-// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
-// to get values of this type.
-type Key []string
-
-func (k Key) String() string {
-	return strings.Join(k, ".")
-}
-
-func (k Key) maybeQuotedAll() string {
-	var ss []string
-	for i := range k {
-		ss = append(ss, k.maybeQuoted(i))
-	}
-	return strings.Join(ss, ".")
-}
-
-func (k Key) maybeQuoted(i int) string {
-	quote := false
-	for _, c := range k[i] {
-		if !isBareKeyChar(c) {
-			quote = true
-			break
-		}
-	}
-	if quote {
-		return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
-	}
-	return k[i]
-}
-
-func (k Key) add(piece string) Key {
-	newKey := make(Key, len(k)+1)
-	copy(newKey, k)
-	newKey[len(k)] = piece
-	return newKey
-}
-
-// Keys returns a slice of every key in the TOML data, including key groups.
-// Each key is itself a slice, where the first element is the top of the
-// hierarchy and the last is the most specific.
-//
-// The list will have the same order as the keys appeared in the TOML data.
-//
-// All keys returned are non-empty.
-func (md *MetaData) Keys() []Key {
-	return md.keys
-}
-
-// Undecoded returns all keys that have not been decoded in the order in which
-// they appear in the original TOML document.
-//
-// This includes keys that haven't been decoded because of a Primitive value.
-// Once the Primitive value is decoded, the keys will be considered decoded.
-//
-// Also note that decoding into an empty interface will result in no decoding,
-// and so no keys will be considered decoded.
-//
-// In this sense, the Undecoded keys correspond to keys in the TOML document
-// that do not have a concrete type in your representation.
-func (md *MetaData) Undecoded() []Key {
-	undecoded := make([]Key, 0, len(md.keys))
-	for _, key := range md.keys {
-		if !md.decoded[key.String()] {
-			undecoded = append(undecoded, key)
-		}
-	}
-	return undecoded
-}

+ 0 - 27
vendor/github.com/BurntSushi/toml/doc.go

@@ -1,27 +0,0 @@
-/*
-Package toml provides facilities for decoding and encoding TOML configuration
-files via reflection. There is also support for delaying decoding with
-the Primitive type, and querying the set of keys in a TOML document with the
-MetaData type.
-
-The specification implemented: https://github.com/toml-lang/toml
-
-The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
-whether a file is a valid TOML document. It can also be used to print the
-type of each key in a TOML document.
-
-Testing
-
-There are two important types of tests used for this package. The first is
-contained inside '*_test.go' files and uses the standard Go unit testing
-framework. These tests are primarily devoted to holistically testing the
-decoder and encoder.
-
-The second type of testing is used to verify the implementation's adherence
-to the TOML specification. These tests have been factored into their own
-project: https://github.com/BurntSushi/toml-test
-
-The reason the tests are in a separate project is so that they can be used by
-any implementation of TOML. Namely, it is language agnostic.
-*/
-package toml

+ 0 - 568
vendor/github.com/BurntSushi/toml/encode.go

@@ -1,568 +0,0 @@
-package toml
-
-import (
-	"bufio"
-	"errors"
-	"fmt"
-	"io"
-	"reflect"
-	"sort"
-	"strconv"
-	"strings"
-	"time"
-)
-
-type tomlEncodeError struct{ error }
-
-var (
-	errArrayMixedElementTypes = errors.New(
-		"toml: cannot encode array with mixed element types")
-	errArrayNilElement = errors.New(
-		"toml: cannot encode array with nil element")
-	errNonString = errors.New(
-		"toml: cannot encode a map with non-string key type")
-	errAnonNonStruct = errors.New(
-		"toml: cannot encode an anonymous field that is not a struct")
-	errArrayNoTable = errors.New(
-		"toml: TOML array element cannot contain a table")
-	errNoKey = errors.New(
-		"toml: top-level values must be Go maps or structs")
-	errAnything = errors.New("") // used in testing
-)
-
-var quotedReplacer = strings.NewReplacer(
-	"\t", "\\t",
-	"\n", "\\n",
-	"\r", "\\r",
-	"\"", "\\\"",
-	"\\", "\\\\",
-)
-
-// Encoder controls the encoding of Go values to a TOML document to some
-// io.Writer.
-//
-// The indentation level can be controlled with the Indent field.
-type Encoder struct {
-	// A single indentation level. By default it is two spaces.
-	Indent string
-
-	// hasWritten is whether we have written any output to w yet.
-	hasWritten bool
-	w          *bufio.Writer
-}
-
-// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
-// given. By default, a single indentation level is 2 spaces.
-func NewEncoder(w io.Writer) *Encoder {
-	return &Encoder{
-		w:      bufio.NewWriter(w),
-		Indent: "  ",
-	}
-}
-
-// Encode writes a TOML representation of the Go value to the underlying
-// io.Writer. If the value given cannot be encoded to a valid TOML document,
-// then an error is returned.
-//
-// The mapping between Go values and TOML values should be precisely the same
-// as for the Decode* functions. Similarly, the TextMarshaler interface is
-// supported by encoding the resulting bytes as strings. (If you want to write
-// arbitrary binary data then you will need to use something like base64 since
-// TOML does not have any binary types.)
-//
-// When encoding TOML hashes (i.e., Go maps or structs), keys without any
-// sub-hashes are encoded first.
-//
-// If a Go map is encoded, then its keys are sorted alphabetically for
-// deterministic output. More control over this behavior may be provided if
-// there is demand for it.
-//
-// Encoding Go values without a corresponding TOML representation---like map
-// types with non-string keys---will cause an error to be returned. Similarly
-// for mixed arrays/slices, arrays/slices with nil elements, embedded
-// non-struct types and nested slices containing maps or structs.
-// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
-// and so is []map[string][]string.)
-func (enc *Encoder) Encode(v interface{}) error {
-	rv := eindirect(reflect.ValueOf(v))
-	if err := enc.safeEncode(Key([]string{}), rv); err != nil {
-		return err
-	}
-	return enc.w.Flush()
-}
-
-func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
-	defer func() {
-		if r := recover(); r != nil {
-			if terr, ok := r.(tomlEncodeError); ok {
-				err = terr.error
-				return
-			}
-			panic(r)
-		}
-	}()
-	enc.encode(key, rv)
-	return nil
-}
-
-func (enc *Encoder) encode(key Key, rv reflect.Value) {
-	// Special case. Time needs to be in ISO8601 format.
-	// Special case. If we can marshal the type to text, then we used that.
-	// Basically, this prevents the encoder for handling these types as
-	// generic structs (or whatever the underlying type of a TextMarshaler is).
-	switch rv.Interface().(type) {
-	case time.Time, TextMarshaler:
-		enc.keyEqElement(key, rv)
-		return
-	}
-
-	k := rv.Kind()
-	switch k {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
-		reflect.Int64,
-		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
-		reflect.Uint64,
-		reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
-		enc.keyEqElement(key, rv)
-	case reflect.Array, reflect.Slice:
-		if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
-			enc.eArrayOfTables(key, rv)
-		} else {
-			enc.keyEqElement(key, rv)
-		}
-	case reflect.Interface:
-		if rv.IsNil() {
-			return
-		}
-		enc.encode(key, rv.Elem())
-	case reflect.Map:
-		if rv.IsNil() {
-			return
-		}
-		enc.eTable(key, rv)
-	case reflect.Ptr:
-		if rv.IsNil() {
-			return
-		}
-		enc.encode(key, rv.Elem())
-	case reflect.Struct:
-		enc.eTable(key, rv)
-	default:
-		panic(e("unsupported type for key '%s': %s", key, k))
-	}
-}
-
-// eElement encodes any value that can be an array element (primitives and
-// arrays).
-func (enc *Encoder) eElement(rv reflect.Value) {
-	switch v := rv.Interface().(type) {
-	case time.Time:
-		// Special case time.Time as a primitive. Has to come before
-		// TextMarshaler below because time.Time implements
-		// encoding.TextMarshaler, but we need to always use UTC.
-		enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
-		return
-	case TextMarshaler:
-		// Special case. Use text marshaler if it's available for this value.
-		if s, err := v.MarshalText(); err != nil {
-			encPanic(err)
-		} else {
-			enc.writeQuoted(string(s))
-		}
-		return
-	}
-	switch rv.Kind() {
-	case reflect.Bool:
-		enc.wf(strconv.FormatBool(rv.Bool()))
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
-		reflect.Int64:
-		enc.wf(strconv.FormatInt(rv.Int(), 10))
-	case reflect.Uint, reflect.Uint8, reflect.Uint16,
-		reflect.Uint32, reflect.Uint64:
-		enc.wf(strconv.FormatUint(rv.Uint(), 10))
-	case reflect.Float32:
-		enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
-	case reflect.Float64:
-		enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
-	case reflect.Array, reflect.Slice:
-		enc.eArrayOrSliceElement(rv)
-	case reflect.Interface:
-		enc.eElement(rv.Elem())
-	case reflect.String:
-		enc.writeQuoted(rv.String())
-	default:
-		panic(e("unexpected primitive type: %s", rv.Kind()))
-	}
-}
-
-// By the TOML spec, all floats must have a decimal with at least one
-// number on either side.
-func floatAddDecimal(fstr string) string {
-	if !strings.Contains(fstr, ".") {
-		return fstr + ".0"
-	}
-	return fstr
-}
-
-func (enc *Encoder) writeQuoted(s string) {
-	enc.wf("\"%s\"", quotedReplacer.Replace(s))
-}
-
-func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
-	length := rv.Len()
-	enc.wf("[")
-	for i := 0; i < length; i++ {
-		elem := rv.Index(i)
-		enc.eElement(elem)
-		if i != length-1 {
-			enc.wf(", ")
-		}
-	}
-	enc.wf("]")
-}
-
-func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
-	if len(key) == 0 {
-		encPanic(errNoKey)
-	}
-	for i := 0; i < rv.Len(); i++ {
-		trv := rv.Index(i)
-		if isNil(trv) {
-			continue
-		}
-		panicIfInvalidKey(key)
-		enc.newline()
-		enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
-		enc.newline()
-		enc.eMapOrStruct(key, trv)
-	}
-}
-
-func (enc *Encoder) eTable(key Key, rv reflect.Value) {
-	panicIfInvalidKey(key)
-	if len(key) == 1 {
-		// Output an extra newline between top-level tables.
-		// (The newline isn't written if nothing else has been written though.)
-		enc.newline()
-	}
-	if len(key) > 0 {
-		enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
-		enc.newline()
-	}
-	enc.eMapOrStruct(key, rv)
-}
-
-func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
-	switch rv := eindirect(rv); rv.Kind() {
-	case reflect.Map:
-		enc.eMap(key, rv)
-	case reflect.Struct:
-		enc.eStruct(key, rv)
-	default:
-		panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
-	}
-}
-
-func (enc *Encoder) eMap(key Key, rv reflect.Value) {
-	rt := rv.Type()
-	if rt.Key().Kind() != reflect.String {
-		encPanic(errNonString)
-	}
-
-	// Sort keys so that we have deterministic output. And write keys directly
-	// underneath this key first, before writing sub-structs or sub-maps.
-	var mapKeysDirect, mapKeysSub []string
-	for _, mapKey := range rv.MapKeys() {
-		k := mapKey.String()
-		if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
-			mapKeysSub = append(mapKeysSub, k)
-		} else {
-			mapKeysDirect = append(mapKeysDirect, k)
-		}
-	}
-
-	var writeMapKeys = func(mapKeys []string) {
-		sort.Strings(mapKeys)
-		for _, mapKey := range mapKeys {
-			mrv := rv.MapIndex(reflect.ValueOf(mapKey))
-			if isNil(mrv) {
-				// Don't write anything for nil fields.
-				continue
-			}
-			enc.encode(key.add(mapKey), mrv)
-		}
-	}
-	writeMapKeys(mapKeysDirect)
-	writeMapKeys(mapKeysSub)
-}
-
-func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
-	// Write keys for fields directly under this key first, because if we write
-	// a field that creates a new table, then all keys under it will be in that
-	// table (not the one we're writing here).
-	rt := rv.Type()
-	var fieldsDirect, fieldsSub [][]int
-	var addFields func(rt reflect.Type, rv reflect.Value, start []int)
-	addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
-		for i := 0; i < rt.NumField(); i++ {
-			f := rt.Field(i)
-			// skip unexported fields
-			if f.PkgPath != "" && !f.Anonymous {
-				continue
-			}
-			frv := rv.Field(i)
-			if f.Anonymous {
-				t := f.Type
-				switch t.Kind() {
-				case reflect.Struct:
-					// Treat anonymous struct fields with
-					// tag names as though they are not
-					// anonymous, like encoding/json does.
-					if getOptions(f.Tag).name == "" {
-						addFields(t, frv, f.Index)
-						continue
-					}
-				case reflect.Ptr:
-					if t.Elem().Kind() == reflect.Struct &&
-						getOptions(f.Tag).name == "" {
-						if !frv.IsNil() {
-							addFields(t.Elem(), frv.Elem(), f.Index)
-						}
-						continue
-					}
-					// Fall through to the normal field encoding logic below
-					// for non-struct anonymous fields.
-				}
-			}
-
-			if typeIsHash(tomlTypeOfGo(frv)) {
-				fieldsSub = append(fieldsSub, append(start, f.Index...))
-			} else {
-				fieldsDirect = append(fieldsDirect, append(start, f.Index...))
-			}
-		}
-	}
-	addFields(rt, rv, nil)
-
-	var writeFields = func(fields [][]int) {
-		for _, fieldIndex := range fields {
-			sft := rt.FieldByIndex(fieldIndex)
-			sf := rv.FieldByIndex(fieldIndex)
-			if isNil(sf) {
-				// Don't write anything for nil fields.
-				continue
-			}
-
-			opts := getOptions(sft.Tag)
-			if opts.skip {
-				continue
-			}
-			keyName := sft.Name
-			if opts.name != "" {
-				keyName = opts.name
-			}
-			if opts.omitempty && isEmpty(sf) {
-				continue
-			}
-			if opts.omitzero && isZero(sf) {
-				continue
-			}
-
-			enc.encode(key.add(keyName), sf)
-		}
-	}
-	writeFields(fieldsDirect)
-	writeFields(fieldsSub)
-}
-
-// tomlTypeName returns the TOML type name of the Go value's type. It is
-// used to determine whether the types of array elements are mixed (which is
-// forbidden). If the Go value is nil, then it is illegal for it to be an array
-// element, and valueIsNil is returned as true.
-
-// Returns the TOML type of a Go value. The type may be `nil`, which means
-// no concrete TOML type could be found.
-func tomlTypeOfGo(rv reflect.Value) tomlType {
-	if isNil(rv) || !rv.IsValid() {
-		return nil
-	}
-	switch rv.Kind() {
-	case reflect.Bool:
-		return tomlBool
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
-		reflect.Int64,
-		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
-		reflect.Uint64:
-		return tomlInteger
-	case reflect.Float32, reflect.Float64:
-		return tomlFloat
-	case reflect.Array, reflect.Slice:
-		if typeEqual(tomlHash, tomlArrayType(rv)) {
-			return tomlArrayHash
-		}
-		return tomlArray
-	case reflect.Ptr, reflect.Interface:
-		return tomlTypeOfGo(rv.Elem())
-	case reflect.String:
-		return tomlString
-	case reflect.Map:
-		return tomlHash
-	case reflect.Struct:
-		switch rv.Interface().(type) {
-		case time.Time:
-			return tomlDatetime
-		case TextMarshaler:
-			return tomlString
-		default:
-			return tomlHash
-		}
-	default:
-		panic("unexpected reflect.Kind: " + rv.Kind().String())
-	}
-}
-
-// tomlArrayType returns the element type of a TOML array. The type returned
-// may be nil if it cannot be determined (e.g., a nil slice or a zero length
-// slize). This function may also panic if it finds a type that cannot be
-// expressed in TOML (such as nil elements, heterogeneous arrays or directly
-// nested arrays of tables).
-func tomlArrayType(rv reflect.Value) tomlType {
-	if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
-		return nil
-	}
-	firstType := tomlTypeOfGo(rv.Index(0))
-	if firstType == nil {
-		encPanic(errArrayNilElement)
-	}
-
-	rvlen := rv.Len()
-	for i := 1; i < rvlen; i++ {
-		elem := rv.Index(i)
-		switch elemType := tomlTypeOfGo(elem); {
-		case elemType == nil:
-			encPanic(errArrayNilElement)
-		case !typeEqual(firstType, elemType):
-			encPanic(errArrayMixedElementTypes)
-		}
-	}
-	// If we have a nested array, then we must make sure that the nested
-	// array contains ONLY primitives.
-	// This checks arbitrarily nested arrays.
-	if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
-		nest := tomlArrayType(eindirect(rv.Index(0)))
-		if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
-			encPanic(errArrayNoTable)
-		}
-	}
-	return firstType
-}
-
-type tagOptions struct {
-	skip      bool // "-"
-	name      string
-	omitempty bool
-	omitzero  bool
-}
-
-func getOptions(tag reflect.StructTag) tagOptions {
-	t := tag.Get("toml")
-	if t == "-" {
-		return tagOptions{skip: true}
-	}
-	var opts tagOptions
-	parts := strings.Split(t, ",")
-	opts.name = parts[0]
-	for _, s := range parts[1:] {
-		switch s {
-		case "omitempty":
-			opts.omitempty = true
-		case "omitzero":
-			opts.omitzero = true
-		}
-	}
-	return opts
-}
-
-func isZero(rv reflect.Value) bool {
-	switch rv.Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return rv.Int() == 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
-		return rv.Uint() == 0
-	case reflect.Float32, reflect.Float64:
-		return rv.Float() == 0.0
-	}
-	return false
-}
-
-func isEmpty(rv reflect.Value) bool {
-	switch rv.Kind() {
-	case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
-		return rv.Len() == 0
-	case reflect.Bool:
-		return !rv.Bool()
-	}
-	return false
-}
-
-func (enc *Encoder) newline() {
-	if enc.hasWritten {
-		enc.wf("\n")
-	}
-}
-
-func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
-	if len(key) == 0 {
-		encPanic(errNoKey)
-	}
-	panicIfInvalidKey(key)
-	enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
-	enc.eElement(val)
-	enc.newline()
-}
-
-func (enc *Encoder) wf(format string, v ...interface{}) {
-	if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
-		encPanic(err)
-	}
-	enc.hasWritten = true
-}
-
-func (enc *Encoder) indentStr(key Key) string {
-	return strings.Repeat(enc.Indent, len(key)-1)
-}
-
-func encPanic(err error) {
-	panic(tomlEncodeError{err})
-}
-
-func eindirect(v reflect.Value) reflect.Value {
-	switch v.Kind() {
-	case reflect.Ptr, reflect.Interface:
-		return eindirect(v.Elem())
-	default:
-		return v
-	}
-}
-
-func isNil(rv reflect.Value) bool {
-	switch rv.Kind() {
-	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
-		return rv.IsNil()
-	default:
-		return false
-	}
-}
-
-func panicIfInvalidKey(key Key) {
-	for _, k := range key {
-		if len(k) == 0 {
-			encPanic(e("Key '%s' is not a valid table name. Key names "+
-				"cannot be empty.", key.maybeQuotedAll()))
-		}
-	}
-}
-
-func isValidKeyName(s string) bool {
-	return len(s) != 0
-}

+ 0 - 19
vendor/github.com/BurntSushi/toml/encoding_types.go

@@ -1,19 +0,0 @@
-// +build go1.2
-
-package toml
-
-// In order to support Go 1.1, we define our own TextMarshaler and
-// TextUnmarshaler types. For Go 1.2+, we just alias them with the
-// standard library interfaces.
-
-import (
-	"encoding"
-)
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler encoding.TextMarshaler
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler encoding.TextUnmarshaler

+ 0 - 18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go

@@ -1,18 +0,0 @@
-// +build !go1.2
-
-package toml
-
-// These interfaces were introduced in Go 1.2, so we add them manually when
-// compiling for Go 1.1.
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler interface {
-	MarshalText() (text []byte, err error)
-}
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler interface {
-	UnmarshalText(text []byte) error
-}

+ 0 - 953
vendor/github.com/BurntSushi/toml/lex.go

@@ -1,953 +0,0 @@
-package toml
-
-import (
-	"fmt"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-)
-
-type itemType int
-
-const (
-	itemError itemType = iota
-	itemNIL            // used in the parser to indicate no type
-	itemEOF
-	itemText
-	itemString
-	itemRawString
-	itemMultilineString
-	itemRawMultilineString
-	itemBool
-	itemInteger
-	itemFloat
-	itemDatetime
-	itemArray // the start of an array
-	itemArrayEnd
-	itemTableStart
-	itemTableEnd
-	itemArrayTableStart
-	itemArrayTableEnd
-	itemKeyStart
-	itemCommentStart
-	itemInlineTableStart
-	itemInlineTableEnd
-)
-
-const (
-	eof              = 0
-	comma            = ','
-	tableStart       = '['
-	tableEnd         = ']'
-	arrayTableStart  = '['
-	arrayTableEnd    = ']'
-	tableSep         = '.'
-	keySep           = '='
-	arrayStart       = '['
-	arrayEnd         = ']'
-	commentStart     = '#'
-	stringStart      = '"'
-	stringEnd        = '"'
-	rawStringStart   = '\''
-	rawStringEnd     = '\''
-	inlineTableStart = '{'
-	inlineTableEnd   = '}'
-)
-
-type stateFn func(lx *lexer) stateFn
-
-type lexer struct {
-	input string
-	start int
-	pos   int
-	line  int
-	state stateFn
-	items chan item
-
-	// Allow for backing up up to three runes.
-	// This is necessary because TOML contains 3-rune tokens (""" and ''').
-	prevWidths [3]int
-	nprev      int // how many of prevWidths are in use
-	// If we emit an eof, we can still back up, but it is not OK to call
-	// next again.
-	atEOF bool
-
-	// A stack of state functions used to maintain context.
-	// The idea is to reuse parts of the state machine in various places.
-	// For example, values can appear at the top level or within arbitrarily
-	// nested arrays. The last state on the stack is used after a value has
-	// been lexed. Similarly for comments.
-	stack []stateFn
-}
-
-type item struct {
-	typ  itemType
-	val  string
-	line int
-}
-
-func (lx *lexer) nextItem() item {
-	for {
-		select {
-		case item := <-lx.items:
-			return item
-		default:
-			lx.state = lx.state(lx)
-		}
-	}
-}
-
-func lex(input string) *lexer {
-	lx := &lexer{
-		input: input,
-		state: lexTop,
-		line:  1,
-		items: make(chan item, 10),
-		stack: make([]stateFn, 0, 10),
-	}
-	return lx
-}
-
-func (lx *lexer) push(state stateFn) {
-	lx.stack = append(lx.stack, state)
-}
-
-func (lx *lexer) pop() stateFn {
-	if len(lx.stack) == 0 {
-		return lx.errorf("BUG in lexer: no states to pop")
-	}
-	last := lx.stack[len(lx.stack)-1]
-	lx.stack = lx.stack[0 : len(lx.stack)-1]
-	return last
-}
-
-func (lx *lexer) current() string {
-	return lx.input[lx.start:lx.pos]
-}
-
-func (lx *lexer) emit(typ itemType) {
-	lx.items <- item{typ, lx.current(), lx.line}
-	lx.start = lx.pos
-}
-
-func (lx *lexer) emitTrim(typ itemType) {
-	lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
-	lx.start = lx.pos
-}
-
-func (lx *lexer) next() (r rune) {
-	if lx.atEOF {
-		panic("next called after EOF")
-	}
-	if lx.pos >= len(lx.input) {
-		lx.atEOF = true
-		return eof
-	}
-
-	if lx.input[lx.pos] == '\n' {
-		lx.line++
-	}
-	lx.prevWidths[2] = lx.prevWidths[1]
-	lx.prevWidths[1] = lx.prevWidths[0]
-	if lx.nprev < 3 {
-		lx.nprev++
-	}
-	r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
-	lx.prevWidths[0] = w
-	lx.pos += w
-	return r
-}
-
-// ignore skips over the pending input before this point.
-func (lx *lexer) ignore() {
-	lx.start = lx.pos
-}
-
-// backup steps back one rune. Can be called only twice between calls to next.
-func (lx *lexer) backup() {
-	if lx.atEOF {
-		lx.atEOF = false
-		return
-	}
-	if lx.nprev < 1 {
-		panic("backed up too far")
-	}
-	w := lx.prevWidths[0]
-	lx.prevWidths[0] = lx.prevWidths[1]
-	lx.prevWidths[1] = lx.prevWidths[2]
-	lx.nprev--
-	lx.pos -= w
-	if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
-		lx.line--
-	}
-}
-
-// accept consumes the next rune if it's equal to `valid`.
-func (lx *lexer) accept(valid rune) bool {
-	if lx.next() == valid {
-		return true
-	}
-	lx.backup()
-	return false
-}
-
-// peek returns but does not consume the next rune in the input.
-func (lx *lexer) peek() rune {
-	r := lx.next()
-	lx.backup()
-	return r
-}
-
-// skip ignores all input that matches the given predicate.
-func (lx *lexer) skip(pred func(rune) bool) {
-	for {
-		r := lx.next()
-		if pred(r) {
-			continue
-		}
-		lx.backup()
-		lx.ignore()
-		return
-	}
-}
-
-// errorf stops all lexing by emitting an error and returning `nil`.
-// Note that any value that is a character is escaped if it's a special
-// character (newlines, tabs, etc.).
-func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
-	lx.items <- item{
-		itemError,
-		fmt.Sprintf(format, values...),
-		lx.line,
-	}
-	return nil
-}
-
-// lexTop consumes elements at the top level of TOML data.
-func lexTop(lx *lexer) stateFn {
-	r := lx.next()
-	if isWhitespace(r) || isNL(r) {
-		return lexSkip(lx, lexTop)
-	}
-	switch r {
-	case commentStart:
-		lx.push(lexTop)
-		return lexCommentStart
-	case tableStart:
-		return lexTableStart
-	case eof:
-		if lx.pos > lx.start {
-			return lx.errorf("unexpected EOF")
-		}
-		lx.emit(itemEOF)
-		return nil
-	}
-
-	// At this point, the only valid item can be a key, so we back up
-	// and let the key lexer do the rest.
-	lx.backup()
-	lx.push(lexTopEnd)
-	return lexKeyStart
-}
-
-// lexTopEnd is entered whenever a top-level item has been consumed. (A value
-// or a table.) It must see only whitespace, and will turn back to lexTop
-// upon a newline. If it sees EOF, it will quit the lexer successfully.
-func lexTopEnd(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case r == commentStart:
-		// a comment will read to a newline for us.
-		lx.push(lexTop)
-		return lexCommentStart
-	case isWhitespace(r):
-		return lexTopEnd
-	case isNL(r):
-		lx.ignore()
-		return lexTop
-	case r == eof:
-		lx.emit(itemEOF)
-		return nil
-	}
-	return lx.errorf("expected a top-level item to end with a newline, "+
-		"comment, or EOF, but got %q instead", r)
-}
-
-// lexTable lexes the beginning of a table. Namely, it makes sure that
-// it starts with a character other than '.' and ']'.
-// It assumes that '[' has already been consumed.
-// It also handles the case that this is an item in an array of tables.
-// e.g., '[[name]]'.
-func lexTableStart(lx *lexer) stateFn {
-	if lx.peek() == arrayTableStart {
-		lx.next()
-		lx.emit(itemArrayTableStart)
-		lx.push(lexArrayTableEnd)
-	} else {
-		lx.emit(itemTableStart)
-		lx.push(lexTableEnd)
-	}
-	return lexTableNameStart
-}
-
-func lexTableEnd(lx *lexer) stateFn {
-	lx.emit(itemTableEnd)
-	return lexTopEnd
-}
-
-func lexArrayTableEnd(lx *lexer) stateFn {
-	if r := lx.next(); r != arrayTableEnd {
-		return lx.errorf("expected end of table array name delimiter %q, "+
-			"but got %q instead", arrayTableEnd, r)
-	}
-	lx.emit(itemArrayTableEnd)
-	return lexTopEnd
-}
-
-func lexTableNameStart(lx *lexer) stateFn {
-	lx.skip(isWhitespace)
-	switch r := lx.peek(); {
-	case r == tableEnd || r == eof:
-		return lx.errorf("unexpected end of table name " +
-			"(table names cannot be empty)")
-	case r == tableSep:
-		return lx.errorf("unexpected table separator " +
-			"(table names cannot be empty)")
-	case r == stringStart || r == rawStringStart:
-		lx.ignore()
-		lx.push(lexTableNameEnd)
-		return lexValue // reuse string lexing
-	default:
-		return lexBareTableName
-	}
-}
-
-// lexBareTableName lexes the name of a table. It assumes that at least one
-// valid character for the table has already been read.
-func lexBareTableName(lx *lexer) stateFn {
-	r := lx.next()
-	if isBareKeyChar(r) {
-		return lexBareTableName
-	}
-	lx.backup()
-	lx.emit(itemText)
-	return lexTableNameEnd
-}
-
-// lexTableNameEnd reads the end of a piece of a table name, optionally
-// consuming whitespace.
-func lexTableNameEnd(lx *lexer) stateFn {
-	lx.skip(isWhitespace)
-	switch r := lx.next(); {
-	case isWhitespace(r):
-		return lexTableNameEnd
-	case r == tableSep:
-		lx.ignore()
-		return lexTableNameStart
-	case r == tableEnd:
-		return lx.pop()
-	default:
-		return lx.errorf("expected '.' or ']' to end table name, "+
-			"but got %q instead", r)
-	}
-}
-
-// lexKeyStart consumes a key name up until the first non-whitespace character.
-// lexKeyStart will ignore whitespace.
-func lexKeyStart(lx *lexer) stateFn {
-	r := lx.peek()
-	switch {
-	case r == keySep:
-		return lx.errorf("unexpected key separator %q", keySep)
-	case isWhitespace(r) || isNL(r):
-		lx.next()
-		return lexSkip(lx, lexKeyStart)
-	case r == stringStart || r == rawStringStart:
-		lx.ignore()
-		lx.emit(itemKeyStart)
-		lx.push(lexKeyEnd)
-		return lexValue // reuse string lexing
-	default:
-		lx.ignore()
-		lx.emit(itemKeyStart)
-		return lexBareKey
-	}
-}
-
-// lexBareKey consumes the text of a bare key. Assumes that the first character
-// (which is not whitespace) has not yet been consumed.
-func lexBareKey(lx *lexer) stateFn {
-	switch r := lx.next(); {
-	case isBareKeyChar(r):
-		return lexBareKey
-	case isWhitespace(r):
-		lx.backup()
-		lx.emit(itemText)
-		return lexKeyEnd
-	case r == keySep:
-		lx.backup()
-		lx.emit(itemText)
-		return lexKeyEnd
-	default:
-		return lx.errorf("bare keys cannot contain %q", r)
-	}
-}
-
-// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
-// separator).
-func lexKeyEnd(lx *lexer) stateFn {
-	switch r := lx.next(); {
-	case r == keySep:
-		return lexSkip(lx, lexValue)
-	case isWhitespace(r):
-		return lexSkip(lx, lexKeyEnd)
-	default:
-		return lx.errorf("expected key separator %q, but got %q instead",
-			keySep, r)
-	}
-}
-
-// lexValue starts the consumption of a value anywhere a value is expected.
-// lexValue will ignore whitespace.
-// After a value is lexed, the last state on the next is popped and returned.
-func lexValue(lx *lexer) stateFn {
-	// We allow whitespace to precede a value, but NOT newlines.
-	// In array syntax, the array states are responsible for ignoring newlines.
-	r := lx.next()
-	switch {
-	case isWhitespace(r):
-		return lexSkip(lx, lexValue)
-	case isDigit(r):
-		lx.backup() // avoid an extra state and use the same as above
-		return lexNumberOrDateStart
-	}
-	switch r {
-	case arrayStart:
-		lx.ignore()
-		lx.emit(itemArray)
-		return lexArrayValue
-	case inlineTableStart:
-		lx.ignore()
-		lx.emit(itemInlineTableStart)
-		return lexInlineTableValue
-	case stringStart:
-		if lx.accept(stringStart) {
-			if lx.accept(stringStart) {
-				lx.ignore() // Ignore """
-				return lexMultilineString
-			}
-			lx.backup()
-		}
-		lx.ignore() // ignore the '"'
-		return lexString
-	case rawStringStart:
-		if lx.accept(rawStringStart) {
-			if lx.accept(rawStringStart) {
-				lx.ignore() // Ignore """
-				return lexMultilineRawString
-			}
-			lx.backup()
-		}
-		lx.ignore() // ignore the "'"
-		return lexRawString
-	case '+', '-':
-		return lexNumberStart
-	case '.': // special error case, be kind to users
-		return lx.errorf("floats must start with a digit, not '.'")
-	}
-	if unicode.IsLetter(r) {
-		// Be permissive here; lexBool will give a nice error if the
-		// user wrote something like
-		//   x = foo
-		// (i.e. not 'true' or 'false' but is something else word-like.)
-		lx.backup()
-		return lexBool
-	}
-	return lx.errorf("expected value but found %q instead", r)
-}
-
-// lexArrayValue consumes one value in an array. It assumes that '[' or ','
-// have already been consumed. All whitespace and newlines are ignored.
-func lexArrayValue(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case isWhitespace(r) || isNL(r):
-		return lexSkip(lx, lexArrayValue)
-	case r == commentStart:
-		lx.push(lexArrayValue)
-		return lexCommentStart
-	case r == comma:
-		return lx.errorf("unexpected comma")
-	case r == arrayEnd:
-		// NOTE(caleb): The spec isn't clear about whether you can have
-		// a trailing comma or not, so we'll allow it.
-		return lexArrayEnd
-	}
-
-	lx.backup()
-	lx.push(lexArrayValueEnd)
-	return lexValue
-}
-
-// lexArrayValueEnd consumes everything between the end of an array value and
-// the next value (or the end of the array): it ignores whitespace and newlines
-// and expects either a ',' or a ']'.
-func lexArrayValueEnd(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case isWhitespace(r) || isNL(r):
-		return lexSkip(lx, lexArrayValueEnd)
-	case r == commentStart:
-		lx.push(lexArrayValueEnd)
-		return lexCommentStart
-	case r == comma:
-		lx.ignore()
-		return lexArrayValue // move on to the next value
-	case r == arrayEnd:
-		return lexArrayEnd
-	}
-	return lx.errorf(
-		"expected a comma or array terminator %q, but got %q instead",
-		arrayEnd, r,
-	)
-}
-
-// lexArrayEnd finishes the lexing of an array.
-// It assumes that a ']' has just been consumed.
-func lexArrayEnd(lx *lexer) stateFn {
-	lx.ignore()
-	lx.emit(itemArrayEnd)
-	return lx.pop()
-}
-
-// lexInlineTableValue consumes one key/value pair in an inline table.
-// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
-func lexInlineTableValue(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case isWhitespace(r):
-		return lexSkip(lx, lexInlineTableValue)
-	case isNL(r):
-		return lx.errorf("newlines not allowed within inline tables")
-	case r == commentStart:
-		lx.push(lexInlineTableValue)
-		return lexCommentStart
-	case r == comma:
-		return lx.errorf("unexpected comma")
-	case r == inlineTableEnd:
-		return lexInlineTableEnd
-	}
-	lx.backup()
-	lx.push(lexInlineTableValueEnd)
-	return lexKeyStart
-}
-
-// lexInlineTableValueEnd consumes everything between the end of an inline table
-// key/value pair and the next pair (or the end of the table):
-// it ignores whitespace and expects either a ',' or a '}'.
-func lexInlineTableValueEnd(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case isWhitespace(r):
-		return lexSkip(lx, lexInlineTableValueEnd)
-	case isNL(r):
-		return lx.errorf("newlines not allowed within inline tables")
-	case r == commentStart:
-		lx.push(lexInlineTableValueEnd)
-		return lexCommentStart
-	case r == comma:
-		lx.ignore()
-		return lexInlineTableValue
-	case r == inlineTableEnd:
-		return lexInlineTableEnd
-	}
-	return lx.errorf("expected a comma or an inline table terminator %q, "+
-		"but got %q instead", inlineTableEnd, r)
-}
-
-// lexInlineTableEnd finishes the lexing of an inline table.
-// It assumes that a '}' has just been consumed.
-func lexInlineTableEnd(lx *lexer) stateFn {
-	lx.ignore()
-	lx.emit(itemInlineTableEnd)
-	return lx.pop()
-}
-
-// lexString consumes the inner contents of a string. It assumes that the
-// beginning '"' has already been consumed and ignored.
-func lexString(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case r == eof:
-		return lx.errorf("unexpected EOF")
-	case isNL(r):
-		return lx.errorf("strings cannot contain newlines")
-	case r == '\\':
-		lx.push(lexString)
-		return lexStringEscape
-	case r == stringEnd:
-		lx.backup()
-		lx.emit(itemString)
-		lx.next()
-		lx.ignore()
-		return lx.pop()
-	}
-	return lexString
-}
-
-// lexMultilineString consumes the inner contents of a string. It assumes that
-// the beginning '"""' has already been consumed and ignored.
-func lexMultilineString(lx *lexer) stateFn {
-	switch lx.next() {
-	case eof:
-		return lx.errorf("unexpected EOF")
-	case '\\':
-		return lexMultilineStringEscape
-	case stringEnd:
-		if lx.accept(stringEnd) {
-			if lx.accept(stringEnd) {
-				lx.backup()
-				lx.backup()
-				lx.backup()
-				lx.emit(itemMultilineString)
-				lx.next()
-				lx.next()
-				lx.next()
-				lx.ignore()
-				return lx.pop()
-			}
-			lx.backup()
-		}
-	}
-	return lexMultilineString
-}
-
-// lexRawString consumes a raw string. Nothing can be escaped in such a string.
-// It assumes that the beginning "'" has already been consumed and ignored.
-func lexRawString(lx *lexer) stateFn {
-	r := lx.next()
-	switch {
-	case r == eof:
-		return lx.errorf("unexpected EOF")
-	case isNL(r):
-		return lx.errorf("strings cannot contain newlines")
-	case r == rawStringEnd:
-		lx.backup()
-		lx.emit(itemRawString)
-		lx.next()
-		lx.ignore()
-		return lx.pop()
-	}
-	return lexRawString
-}
-
-// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
-// a string. It assumes that the beginning "'''" has already been consumed and
-// ignored.
-func lexMultilineRawString(lx *lexer) stateFn {
-	switch lx.next() {
-	case eof:
-		return lx.errorf("unexpected EOF")
-	case rawStringEnd:
-		if lx.accept(rawStringEnd) {
-			if lx.accept(rawStringEnd) {
-				lx.backup()
-				lx.backup()
-				lx.backup()
-				lx.emit(itemRawMultilineString)
-				lx.next()
-				lx.next()
-				lx.next()
-				lx.ignore()
-				return lx.pop()
-			}
-			lx.backup()
-		}
-	}
-	return lexMultilineRawString
-}
-
-// lexMultilineStringEscape consumes an escaped character. It assumes that the
-// preceding '\\' has already been consumed.
-func lexMultilineStringEscape(lx *lexer) stateFn {
-	// Handle the special case first:
-	if isNL(lx.next()) {
-		return lexMultilineString
-	}
-	lx.backup()
-	lx.push(lexMultilineString)
-	return lexStringEscape(lx)
-}
-
-func lexStringEscape(lx *lexer) stateFn {
-	r := lx.next()
-	switch r {
-	case 'b':
-		fallthrough
-	case 't':
-		fallthrough
-	case 'n':
-		fallthrough
-	case 'f':
-		fallthrough
-	case 'r':
-		fallthrough
-	case '"':
-		fallthrough
-	case '\\':
-		return lx.pop()
-	case 'u':
-		return lexShortUnicodeEscape
-	case 'U':
-		return lexLongUnicodeEscape
-	}
-	return lx.errorf("invalid escape character %q; only the following "+
-		"escape characters are allowed: "+
-		`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
-}
-
-func lexShortUnicodeEscape(lx *lexer) stateFn {
-	var r rune
-	for i := 0; i < 4; i++ {
-		r = lx.next()
-		if !isHexadecimal(r) {
-			return lx.errorf(`expected four hexadecimal digits after '\u', `+
-				"but got %q instead", lx.current())
-		}
-	}
-	return lx.pop()
-}
-
-func lexLongUnicodeEscape(lx *lexer) stateFn {
-	var r rune
-	for i := 0; i < 8; i++ {
-		r = lx.next()
-		if !isHexadecimal(r) {
-			return lx.errorf(`expected eight hexadecimal digits after '\U', `+
-				"but got %q instead", lx.current())
-		}
-	}
-	return lx.pop()
-}
-
-// lexNumberOrDateStart consumes either an integer, a float, or datetime.
-func lexNumberOrDateStart(lx *lexer) stateFn {
-	r := lx.next()
-	if isDigit(r) {
-		return lexNumberOrDate
-	}
-	switch r {
-	case '_':
-		return lexNumber
-	case 'e', 'E':
-		return lexFloat
-	case '.':
-		return lx.errorf("floats must start with a digit, not '.'")
-	}
-	return lx.errorf("expected a digit but got %q", r)
-}
-
-// lexNumberOrDate consumes either an integer, float or datetime.
-func lexNumberOrDate(lx *lexer) stateFn {
-	r := lx.next()
-	if isDigit(r) {
-		return lexNumberOrDate
-	}
-	switch r {
-	case '-':
-		return lexDatetime
-	case '_':
-		return lexNumber
-	case '.', 'e', 'E':
-		return lexFloat
-	}
-
-	lx.backup()
-	lx.emit(itemInteger)
-	return lx.pop()
-}
-
-// lexDatetime consumes a Datetime, to a first approximation.
-// The parser validates that it matches one of the accepted formats.
-func lexDatetime(lx *lexer) stateFn {
-	r := lx.next()
-	if isDigit(r) {
-		return lexDatetime
-	}
-	switch r {
-	case '-', 'T', ':', '.', 'Z', '+':
-		return lexDatetime
-	}
-
-	lx.backup()
-	lx.emit(itemDatetime)
-	return lx.pop()
-}
-
-// lexNumberStart consumes either an integer or a float. It assumes that a sign
-// has already been read, but that *no* digits have been consumed.
-// lexNumberStart will move to the appropriate integer or float states.
-func lexNumberStart(lx *lexer) stateFn {
-	// We MUST see a digit. Even floats have to start with a digit.
-	r := lx.next()
-	if !isDigit(r) {
-		if r == '.' {
-			return lx.errorf("floats must start with a digit, not '.'")
-		}
-		return lx.errorf("expected a digit but got %q", r)
-	}
-	return lexNumber
-}
-
-// lexNumber consumes an integer or a float after seeing the first digit.
-func lexNumber(lx *lexer) stateFn {
-	r := lx.next()
-	if isDigit(r) {
-		return lexNumber
-	}
-	switch r {
-	case '_':
-		return lexNumber
-	case '.', 'e', 'E':
-		return lexFloat
-	}
-
-	lx.backup()
-	lx.emit(itemInteger)
-	return lx.pop()
-}
-
-// lexFloat consumes the elements of a float. It allows any sequence of
-// float-like characters, so floats emitted by the lexer are only a first
-// approximation and must be validated by the parser.
-func lexFloat(lx *lexer) stateFn {
-	r := lx.next()
-	if isDigit(r) {
-		return lexFloat
-	}
-	switch r {
-	case '_', '.', '-', '+', 'e', 'E':
-		return lexFloat
-	}
-
-	lx.backup()
-	lx.emit(itemFloat)
-	return lx.pop()
-}
-
-// lexBool consumes a bool string: 'true' or 'false.
-func lexBool(lx *lexer) stateFn {
-	var rs []rune
-	for {
-		r := lx.next()
-		if !unicode.IsLetter(r) {
-			lx.backup()
-			break
-		}
-		rs = append(rs, r)
-	}
-	s := string(rs)
-	switch s {
-	case "true", "false":
-		lx.emit(itemBool)
-		return lx.pop()
-	}
-	return lx.errorf("expected value but found %q instead", s)
-}
-
-// lexCommentStart begins the lexing of a comment. It will emit
-// itemCommentStart and consume no characters, passing control to lexComment.
-func lexCommentStart(lx *lexer) stateFn {
-	lx.ignore()
-	lx.emit(itemCommentStart)
-	return lexComment
-}
-
-// lexComment lexes an entire comment. It assumes that '#' has been consumed.
-// It will consume *up to* the first newline character, and pass control
-// back to the last state on the stack.
-func lexComment(lx *lexer) stateFn {
-	r := lx.peek()
-	if isNL(r) || r == eof {
-		lx.emit(itemText)
-		return lx.pop()
-	}
-	lx.next()
-	return lexComment
-}
-
-// lexSkip ignores all slurped input and moves on to the next state.
-func lexSkip(lx *lexer, nextState stateFn) stateFn {
-	return func(lx *lexer) stateFn {
-		lx.ignore()
-		return nextState
-	}
-}
-
-// isWhitespace returns true if `r` is a whitespace character according
-// to the spec.
-func isWhitespace(r rune) bool {
-	return r == '\t' || r == ' '
-}
-
-func isNL(r rune) bool {
-	return r == '\n' || r == '\r'
-}
-
-func isDigit(r rune) bool {
-	return r >= '0' && r <= '9'
-}
-
-func isHexadecimal(r rune) bool {
-	return (r >= '0' && r <= '9') ||
-		(r >= 'a' && r <= 'f') ||
-		(r >= 'A' && r <= 'F')
-}
-
-func isBareKeyChar(r rune) bool {
-	return (r >= 'A' && r <= 'Z') ||
-		(r >= 'a' && r <= 'z') ||
-		(r >= '0' && r <= '9') ||
-		r == '_' ||
-		r == '-'
-}
-
-func (itype itemType) String() string {
-	switch itype {
-	case itemError:
-		return "Error"
-	case itemNIL:
-		return "NIL"
-	case itemEOF:
-		return "EOF"
-	case itemText:
-		return "Text"
-	case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
-		return "String"
-	case itemBool:
-		return "Bool"
-	case itemInteger:
-		return "Integer"
-	case itemFloat:
-		return "Float"
-	case itemDatetime:
-		return "DateTime"
-	case itemTableStart:
-		return "TableStart"
-	case itemTableEnd:
-		return "TableEnd"
-	case itemKeyStart:
-		return "KeyStart"
-	case itemArray:
-		return "Array"
-	case itemArrayEnd:
-		return "ArrayEnd"
-	case itemCommentStart:
-		return "CommentStart"
-	}
-	panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
-}
-
-func (item item) String() string {
-	return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
-}

+ 0 - 592
vendor/github.com/BurntSushi/toml/parse.go

@@ -1,592 +0,0 @@
-package toml
-
-import (
-	"fmt"
-	"strconv"
-	"strings"
-	"time"
-	"unicode"
-	"unicode/utf8"
-)
-
-type parser struct {
-	mapping map[string]interface{}
-	types   map[string]tomlType
-	lx      *lexer
-
-	// A list of keys in the order that they appear in the TOML data.
-	ordered []Key
-
-	// the full key for the current hash in scope
-	context Key
-
-	// the base key name for everything except hashes
-	currentKey string
-
-	// rough approximation of line number
-	approxLine int
-
-	// A map of 'key.group.names' to whether they were created implicitly.
-	implicits map[string]bool
-}
-
-type parseError string
-
-func (pe parseError) Error() string {
-	return string(pe)
-}
-
-func parse(data string) (p *parser, err error) {
-	defer func() {
-		if r := recover(); r != nil {
-			var ok bool
-			if err, ok = r.(parseError); ok {
-				return
-			}
-			panic(r)
-		}
-	}()
-
-	p = &parser{
-		mapping:   make(map[string]interface{}),
-		types:     make(map[string]tomlType),
-		lx:        lex(data),
-		ordered:   make([]Key, 0),
-		implicits: make(map[string]bool),
-	}
-	for {
-		item := p.next()
-		if item.typ == itemEOF {
-			break
-		}
-		p.topLevel(item)
-	}
-
-	return p, nil
-}
-
-func (p *parser) panicf(format string, v ...interface{}) {
-	msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
-		p.approxLine, p.current(), fmt.Sprintf(format, v...))
-	panic(parseError(msg))
-}
-
-func (p *parser) next() item {
-	it := p.lx.nextItem()
-	if it.typ == itemError {
-		p.panicf("%s", it.val)
-	}
-	return it
-}
-
-func (p *parser) bug(format string, v ...interface{}) {
-	panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
-}
-
-func (p *parser) expect(typ itemType) item {
-	it := p.next()
-	p.assertEqual(typ, it.typ)
-	return it
-}
-
-func (p *parser) assertEqual(expected, got itemType) {
-	if expected != got {
-		p.bug("Expected '%s' but got '%s'.", expected, got)
-	}
-}
-
-func (p *parser) topLevel(item item) {
-	switch item.typ {
-	case itemCommentStart:
-		p.approxLine = item.line
-		p.expect(itemText)
-	case itemTableStart:
-		kg := p.next()
-		p.approxLine = kg.line
-
-		var key Key
-		for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
-			key = append(key, p.keyString(kg))
-		}
-		p.assertEqual(itemTableEnd, kg.typ)
-
-		p.establishContext(key, false)
-		p.setType("", tomlHash)
-		p.ordered = append(p.ordered, key)
-	case itemArrayTableStart:
-		kg := p.next()
-		p.approxLine = kg.line
-
-		var key Key
-		for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
-			key = append(key, p.keyString(kg))
-		}
-		p.assertEqual(itemArrayTableEnd, kg.typ)
-
-		p.establishContext(key, true)
-		p.setType("", tomlArrayHash)
-		p.ordered = append(p.ordered, key)
-	case itemKeyStart:
-		kname := p.next()
-		p.approxLine = kname.line
-		p.currentKey = p.keyString(kname)
-
-		val, typ := p.value(p.next())
-		p.setValue(p.currentKey, val)
-		p.setType(p.currentKey, typ)
-		p.ordered = append(p.ordered, p.context.add(p.currentKey))
-		p.currentKey = ""
-	default:
-		p.bug("Unexpected type at top level: %s", item.typ)
-	}
-}
-
-// Gets a string for a key (or part of a key in a table name).
-func (p *parser) keyString(it item) string {
-	switch it.typ {
-	case itemText:
-		return it.val
-	case itemString, itemMultilineString,
-		itemRawString, itemRawMultilineString:
-		s, _ := p.value(it)
-		return s.(string)
-	default:
-		p.bug("Unexpected key type: %s", it.typ)
-		panic("unreachable")
-	}
-}
-
-// value translates an expected value from the lexer into a Go value wrapped
-// as an empty interface.
-func (p *parser) value(it item) (interface{}, tomlType) {
-	switch it.typ {
-	case itemString:
-		return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
-	case itemMultilineString:
-		trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
-		return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
-	case itemRawString:
-		return it.val, p.typeOfPrimitive(it)
-	case itemRawMultilineString:
-		return stripFirstNewline(it.val), p.typeOfPrimitive(it)
-	case itemBool:
-		switch it.val {
-		case "true":
-			return true, p.typeOfPrimitive(it)
-		case "false":
-			return false, p.typeOfPrimitive(it)
-		}
-		p.bug("Expected boolean value, but got '%s'.", it.val)
-	case itemInteger:
-		if !numUnderscoresOK(it.val) {
-			p.panicf("Invalid integer %q: underscores must be surrounded by digits",
-				it.val)
-		}
-		val := strings.Replace(it.val, "_", "", -1)
-		num, err := strconv.ParseInt(val, 10, 64)
-		if err != nil {
-			// Distinguish integer values. Normally, it'd be a bug if the lexer
-			// provides an invalid integer, but it's possible that the number is
-			// out of range of valid values (which the lexer cannot determine).
-			// So mark the former as a bug but the latter as a legitimate user
-			// error.
-			if e, ok := err.(*strconv.NumError); ok &&
-				e.Err == strconv.ErrRange {
-
-				p.panicf("Integer '%s' is out of the range of 64-bit "+
-					"signed integers.", it.val)
-			} else {
-				p.bug("Expected integer value, but got '%s'.", it.val)
-			}
-		}
-		return num, p.typeOfPrimitive(it)
-	case itemFloat:
-		parts := strings.FieldsFunc(it.val, func(r rune) bool {
-			switch r {
-			case '.', 'e', 'E':
-				return true
-			}
-			return false
-		})
-		for _, part := range parts {
-			if !numUnderscoresOK(part) {
-				p.panicf("Invalid float %q: underscores must be "+
-					"surrounded by digits", it.val)
-			}
-		}
-		if !numPeriodsOK(it.val) {
-			// As a special case, numbers like '123.' or '1.e2',
-			// which are valid as far as Go/strconv are concerned,
-			// must be rejected because TOML says that a fractional
-			// part consists of '.' followed by 1+ digits.
-			p.panicf("Invalid float %q: '.' must be followed "+
-				"by one or more digits", it.val)
-		}
-		val := strings.Replace(it.val, "_", "", -1)
-		num, err := strconv.ParseFloat(val, 64)
-		if err != nil {
-			if e, ok := err.(*strconv.NumError); ok &&
-				e.Err == strconv.ErrRange {
-
-				p.panicf("Float '%s' is out of the range of 64-bit "+
-					"IEEE-754 floating-point numbers.", it.val)
-			} else {
-				p.panicf("Invalid float value: %q", it.val)
-			}
-		}
-		return num, p.typeOfPrimitive(it)
-	case itemDatetime:
-		var t time.Time
-		var ok bool
-		var err error
-		for _, format := range []string{
-			"2006-01-02T15:04:05Z07:00",
-			"2006-01-02T15:04:05",
-			"2006-01-02",
-		} {
-			t, err = time.ParseInLocation(format, it.val, time.Local)
-			if err == nil {
-				ok = true
-				break
-			}
-		}
-		if !ok {
-			p.panicf("Invalid TOML Datetime: %q.", it.val)
-		}
-		return t, p.typeOfPrimitive(it)
-	case itemArray:
-		array := make([]interface{}, 0)
-		types := make([]tomlType, 0)
-
-		for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
-			if it.typ == itemCommentStart {
-				p.expect(itemText)
-				continue
-			}
-
-			val, typ := p.value(it)
-			array = append(array, val)
-			types = append(types, typ)
-		}
-		return array, p.typeOfArray(types)
-	case itemInlineTableStart:
-		var (
-			hash         = make(map[string]interface{})
-			outerContext = p.context
-			outerKey     = p.currentKey
-		)
-
-		p.context = append(p.context, p.currentKey)
-		p.currentKey = ""
-		for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
-			if it.typ != itemKeyStart {
-				p.bug("Expected key start but instead found %q, around line %d",
-					it.val, p.approxLine)
-			}
-			if it.typ == itemCommentStart {
-				p.expect(itemText)
-				continue
-			}
-
-			// retrieve key
-			k := p.next()
-			p.approxLine = k.line
-			kname := p.keyString(k)
-
-			// retrieve value
-			p.currentKey = kname
-			val, typ := p.value(p.next())
-			// make sure we keep metadata up to date
-			p.setType(kname, typ)
-			p.ordered = append(p.ordered, p.context.add(p.currentKey))
-			hash[kname] = val
-		}
-		p.context = outerContext
-		p.currentKey = outerKey
-		return hash, tomlHash
-	}
-	p.bug("Unexpected value type: %s", it.typ)
-	panic("unreachable")
-}
-
-// numUnderscoresOK checks whether each underscore in s is surrounded by
-// characters that are not underscores.
-func numUnderscoresOK(s string) bool {
-	accept := false
-	for _, r := range s {
-		if r == '_' {
-			if !accept {
-				return false
-			}
-			accept = false
-			continue
-		}
-		accept = true
-	}
-	return accept
-}
-
-// numPeriodsOK checks whether every period in s is followed by a digit.
-func numPeriodsOK(s string) bool {
-	period := false
-	for _, r := range s {
-		if period && !isDigit(r) {
-			return false
-		}
-		period = r == '.'
-	}
-	return !period
-}
-
-// establishContext sets the current context of the parser,
-// where the context is either a hash or an array of hashes. Which one is
-// set depends on the value of the `array` parameter.
-//
-// Establishing the context also makes sure that the key isn't a duplicate, and
-// will create implicit hashes automatically.
-func (p *parser) establishContext(key Key, array bool) {
-	var ok bool
-
-	// Always start at the top level and drill down for our context.
-	hashContext := p.mapping
-	keyContext := make(Key, 0)
-
-	// We only need implicit hashes for key[0:-1]
-	for _, k := range key[0 : len(key)-1] {
-		_, ok = hashContext[k]
-		keyContext = append(keyContext, k)
-
-		// No key? Make an implicit hash and move on.
-		if !ok {
-			p.addImplicit(keyContext)
-			hashContext[k] = make(map[string]interface{})
-		}
-
-		// If the hash context is actually an array of tables, then set
-		// the hash context to the last element in that array.
-		//
-		// Otherwise, it better be a table, since this MUST be a key group (by
-		// virtue of it not being the last element in a key).
-		switch t := hashContext[k].(type) {
-		case []map[string]interface{}:
-			hashContext = t[len(t)-1]
-		case map[string]interface{}:
-			hashContext = t
-		default:
-			p.panicf("Key '%s' was already created as a hash.", keyContext)
-		}
-	}
-
-	p.context = keyContext
-	if array {
-		// If this is the first element for this array, then allocate a new
-		// list of tables for it.
-		k := key[len(key)-1]
-		if _, ok := hashContext[k]; !ok {
-			hashContext[k] = make([]map[string]interface{}, 0, 5)
-		}
-
-		// Add a new table. But make sure the key hasn't already been used
-		// for something else.
-		if hash, ok := hashContext[k].([]map[string]interface{}); ok {
-			hashContext[k] = append(hash, make(map[string]interface{}))
-		} else {
-			p.panicf("Key '%s' was already created and cannot be used as "+
-				"an array.", keyContext)
-		}
-	} else {
-		p.setValue(key[len(key)-1], make(map[string]interface{}))
-	}
-	p.context = append(p.context, key[len(key)-1])
-}
-
-// setValue sets the given key to the given value in the current context.
-// It will make sure that the key hasn't already been defined, account for
-// implicit key groups.
-func (p *parser) setValue(key string, value interface{}) {
-	var tmpHash interface{}
-	var ok bool
-
-	hash := p.mapping
-	keyContext := make(Key, 0)
-	for _, k := range p.context {
-		keyContext = append(keyContext, k)
-		if tmpHash, ok = hash[k]; !ok {
-			p.bug("Context for key '%s' has not been established.", keyContext)
-		}
-		switch t := tmpHash.(type) {
-		case []map[string]interface{}:
-			// The context is a table of hashes. Pick the most recent table
-			// defined as the current hash.
-			hash = t[len(t)-1]
-		case map[string]interface{}:
-			hash = t
-		default:
-			p.bug("Expected hash to have type 'map[string]interface{}', but "+
-				"it has '%T' instead.", tmpHash)
-		}
-	}
-	keyContext = append(keyContext, key)
-
-	if _, ok := hash[key]; ok {
-		// Typically, if the given key has already been set, then we have
-		// to raise an error since duplicate keys are disallowed. However,
-		// it's possible that a key was previously defined implicitly. In this
-		// case, it is allowed to be redefined concretely. (See the
-		// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
-		//
-		// But we have to make sure to stop marking it as an implicit. (So that
-		// another redefinition provokes an error.)
-		//
-		// Note that since it has already been defined (as a hash), we don't
-		// want to overwrite it. So our business is done.
-		if p.isImplicit(keyContext) {
-			p.removeImplicit(keyContext)
-			return
-		}
-
-		// Otherwise, we have a concrete key trying to override a previous
-		// key, which is *always* wrong.
-		p.panicf("Key '%s' has already been defined.", keyContext)
-	}
-	hash[key] = value
-}
-
-// setType sets the type of a particular value at a given key.
-// It should be called immediately AFTER setValue.
-//
-// Note that if `key` is empty, then the type given will be applied to the
-// current context (which is either a table or an array of tables).
-func (p *parser) setType(key string, typ tomlType) {
-	keyContext := make(Key, 0, len(p.context)+1)
-	for _, k := range p.context {
-		keyContext = append(keyContext, k)
-	}
-	if len(key) > 0 { // allow type setting for hashes
-		keyContext = append(keyContext, key)
-	}
-	p.types[keyContext.String()] = typ
-}
-
-// addImplicit sets the given Key as having been created implicitly.
-func (p *parser) addImplicit(key Key) {
-	p.implicits[key.String()] = true
-}
-
-// removeImplicit stops tagging the given key as having been implicitly
-// created.
-func (p *parser) removeImplicit(key Key) {
-	p.implicits[key.String()] = false
-}
-
-// isImplicit returns true if the key group pointed to by the key was created
-// implicitly.
-func (p *parser) isImplicit(key Key) bool {
-	return p.implicits[key.String()]
-}
-
-// current returns the full key name of the current context.
-func (p *parser) current() string {
-	if len(p.currentKey) == 0 {
-		return p.context.String()
-	}
-	if len(p.context) == 0 {
-		return p.currentKey
-	}
-	return fmt.Sprintf("%s.%s", p.context, p.currentKey)
-}
-
-func stripFirstNewline(s string) string {
-	if len(s) == 0 || s[0] != '\n' {
-		return s
-	}
-	return s[1:]
-}
-
-func stripEscapedWhitespace(s string) string {
-	esc := strings.Split(s, "\\\n")
-	if len(esc) > 1 {
-		for i := 1; i < len(esc); i++ {
-			esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
-		}
-	}
-	return strings.Join(esc, "")
-}
-
-func (p *parser) replaceEscapes(str string) string {
-	var replaced []rune
-	s := []byte(str)
-	r := 0
-	for r < len(s) {
-		if s[r] != '\\' {
-			c, size := utf8.DecodeRune(s[r:])
-			r += size
-			replaced = append(replaced, c)
-			continue
-		}
-		r += 1
-		if r >= len(s) {
-			p.bug("Escape sequence at end of string.")
-			return ""
-		}
-		switch s[r] {
-		default:
-			p.bug("Expected valid escape code after \\, but got %q.", s[r])
-			return ""
-		case 'b':
-			replaced = append(replaced, rune(0x0008))
-			r += 1
-		case 't':
-			replaced = append(replaced, rune(0x0009))
-			r += 1
-		case 'n':
-			replaced = append(replaced, rune(0x000A))
-			r += 1
-		case 'f':
-			replaced = append(replaced, rune(0x000C))
-			r += 1
-		case 'r':
-			replaced = append(replaced, rune(0x000D))
-			r += 1
-		case '"':
-			replaced = append(replaced, rune(0x0022))
-			r += 1
-		case '\\':
-			replaced = append(replaced, rune(0x005C))
-			r += 1
-		case 'u':
-			// At this point, we know we have a Unicode escape of the form
-			// `uXXXX` at [r, r+5). (Because the lexer guarantees this
-			// for us.)
-			escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
-			replaced = append(replaced, escaped)
-			r += 5
-		case 'U':
-			// At this point, we know we have a Unicode escape of the form
-			// `uXXXX` at [r, r+9). (Because the lexer guarantees this
-			// for us.)
-			escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
-			replaced = append(replaced, escaped)
-			r += 9
-		}
-	}
-	return string(replaced)
-}
-
-func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
-	s := string(bs)
-	hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
-	if err != nil {
-		p.bug("Could not parse '%s' as a hexadecimal number, but the "+
-			"lexer claims it's OK: %s", s, err)
-	}
-	if !utf8.ValidRune(rune(hex)) {
-		p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
-	}
-	return rune(hex)
-}
-
-func isStringType(ty itemType) bool {
-	return ty == itemString || ty == itemMultilineString ||
-		ty == itemRawString || ty == itemRawMultilineString
-}

+ 0 - 1
vendor/github.com/BurntSushi/toml/session.vim

@@ -1 +0,0 @@
-au BufWritePost *.go silent!make tags > /dev/null 2>&1

+ 0 - 91
vendor/github.com/BurntSushi/toml/type_check.go

@@ -1,91 +0,0 @@
-package toml
-
-// tomlType represents any Go type that corresponds to a TOML type.
-// While the first draft of the TOML spec has a simplistic type system that
-// probably doesn't need this level of sophistication, we seem to be militating
-// toward adding real composite types.
-type tomlType interface {
-	typeString() string
-}
-
-// typeEqual accepts any two types and returns true if they are equal.
-func typeEqual(t1, t2 tomlType) bool {
-	if t1 == nil || t2 == nil {
-		return false
-	}
-	return t1.typeString() == t2.typeString()
-}
-
-func typeIsHash(t tomlType) bool {
-	return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
-}
-
-type tomlBaseType string
-
-func (btype tomlBaseType) typeString() string {
-	return string(btype)
-}
-
-func (btype tomlBaseType) String() string {
-	return btype.typeString()
-}
-
-var (
-	tomlInteger   tomlBaseType = "Integer"
-	tomlFloat     tomlBaseType = "Float"
-	tomlDatetime  tomlBaseType = "Datetime"
-	tomlString    tomlBaseType = "String"
-	tomlBool      tomlBaseType = "Bool"
-	tomlArray     tomlBaseType = "Array"
-	tomlHash      tomlBaseType = "Hash"
-	tomlArrayHash tomlBaseType = "ArrayHash"
-)
-
-// typeOfPrimitive returns a tomlType of any primitive value in TOML.
-// Primitive values are: Integer, Float, Datetime, String and Bool.
-//
-// Passing a lexer item other than the following will cause a BUG message
-// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
-func (p *parser) typeOfPrimitive(lexItem item) tomlType {
-	switch lexItem.typ {
-	case itemInteger:
-		return tomlInteger
-	case itemFloat:
-		return tomlFloat
-	case itemDatetime:
-		return tomlDatetime
-	case itemString:
-		return tomlString
-	case itemMultilineString:
-		return tomlString
-	case itemRawString:
-		return tomlString
-	case itemRawMultilineString:
-		return tomlString
-	case itemBool:
-		return tomlBool
-	}
-	p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
-	panic("unreachable")
-}
-
-// typeOfArray returns a tomlType for an array given a list of types of its
-// values.
-//
-// In the current spec, if an array is homogeneous, then its type is always
-// "Array". If the array is not homogeneous, an error is generated.
-func (p *parser) typeOfArray(types []tomlType) tomlType {
-	// Empty arrays are cool.
-	if len(types) == 0 {
-		return tomlArray
-	}
-
-	theType := types[0]
-	for _, t := range types[1:] {
-		if !typeEqual(theType, t) {
-			p.panicf("Array contains values of type '%s' and '%s', but "+
-				"arrays must be homogeneous.", theType, t)
-		}
-	}
-	return tomlArray
-}

+ 0 - 242
vendor/github.com/BurntSushi/toml/type_fields.go

@@ -1,242 +0,0 @@
-package toml
-
-// Struct field handling is adapted from code in encoding/json:
-//
-// Copyright 2010 The Go Authors.  All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the Go distribution.
-
-import (
-	"reflect"
-	"sort"
-	"sync"
-)
-
-// A field represents a single field found in a struct.
-type field struct {
-	name  string       // the name of the field (`toml` tag included)
-	tag   bool         // whether field has a `toml` tag
-	index []int        // represents the depth of an anonymous field
-	typ   reflect.Type // the type of the field
-}
-
-// byName sorts field by name, breaking ties with depth,
-// then breaking ties with "name came from toml tag", then
-// breaking ties with index sequence.
-type byName []field
-
-func (x byName) Len() int { return len(x) }
-
-func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byName) Less(i, j int) bool {
-	if x[i].name != x[j].name {
-		return x[i].name < x[j].name
-	}
-	if len(x[i].index) != len(x[j].index) {
-		return len(x[i].index) < len(x[j].index)
-	}
-	if x[i].tag != x[j].tag {
-		return x[i].tag
-	}
-	return byIndex(x).Less(i, j)
-}
-
-// byIndex sorts field by index sequence.
-type byIndex []field
-
-func (x byIndex) Len() int { return len(x) }
-
-func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byIndex) Less(i, j int) bool {
-	for k, xik := range x[i].index {
-		if k >= len(x[j].index) {
-			return false
-		}
-		if xik != x[j].index[k] {
-			return xik < x[j].index[k]
-		}
-	}
-	return len(x[i].index) < len(x[j].index)
-}
-
-// typeFields returns a list of fields that TOML should recognize for the given
-// type. The algorithm is breadth-first search over the set of structs to
-// include - the top struct and then any reachable anonymous structs.
-func typeFields(t reflect.Type) []field {
-	// Anonymous fields to explore at the current level and the next.
-	current := []field{}
-	next := []field{{typ: t}}
-
-	// Count of queued names for current level and the next.
-	count := map[reflect.Type]int{}
-	nextCount := map[reflect.Type]int{}
-
-	// Types already visited at an earlier level.
-	visited := map[reflect.Type]bool{}
-
-	// Fields found.
-	var fields []field
-
-	for len(next) > 0 {
-		current, next = next, current[:0]
-		count, nextCount = nextCount, map[reflect.Type]int{}
-
-		for _, f := range current {
-			if visited[f.typ] {
-				continue
-			}
-			visited[f.typ] = true
-
-			// Scan f.typ for fields to include.
-			for i := 0; i < f.typ.NumField(); i++ {
-				sf := f.typ.Field(i)
-				if sf.PkgPath != "" && !sf.Anonymous { // unexported
-					continue
-				}
-				opts := getOptions(sf.Tag)
-				if opts.skip {
-					continue
-				}
-				index := make([]int, len(f.index)+1)
-				copy(index, f.index)
-				index[len(f.index)] = i
-
-				ft := sf.Type
-				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
-					// Follow pointer.
-					ft = ft.Elem()
-				}
-
-				// Record found field and index sequence.
-				if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
-					tagged := opts.name != ""
-					name := opts.name
-					if name == "" {
-						name = sf.Name
-					}
-					fields = append(fields, field{name, tagged, index, ft})
-					if count[f.typ] > 1 {
-						// If there were multiple instances, add a second,
-						// so that the annihilation code will see a duplicate.
-						// It only cares about the distinction between 1 or 2,
-						// so don't bother generating any more copies.
-						fields = append(fields, fields[len(fields)-1])
-					}
-					continue
-				}
-
-				// Record new anonymous struct to explore in next round.
-				nextCount[ft]++
-				if nextCount[ft] == 1 {
-					f := field{name: ft.Name(), index: index, typ: ft}
-					next = append(next, f)
-				}
-			}
-		}
-	}
-
-	sort.Sort(byName(fields))
-
-	// Delete all fields that are hidden by the Go rules for embedded fields,
-	// except that fields with TOML tags are promoted.
-
-	// The fields are sorted in primary order of name, secondary order
-	// of field index length. Loop over names; for each name, delete
-	// hidden fields by choosing the one dominant field that survives.
-	out := fields[:0]
-	for advance, i := 0, 0; i < len(fields); i += advance {
-		// One iteration per name.
-		// Find the sequence of fields with the name of this first field.
-		fi := fields[i]
-		name := fi.name
-		for advance = 1; i+advance < len(fields); advance++ {
-			fj := fields[i+advance]
-			if fj.name != name {
-				break
-			}
-		}
-		if advance == 1 { // Only one field with this name
-			out = append(out, fi)
-			continue
-		}
-		dominant, ok := dominantField(fields[i : i+advance])
-		if ok {
-			out = append(out, dominant)
-		}
-	}
-
-	fields = out
-	sort.Sort(byIndex(fields))
-
-	return fields
-}
-
-// dominantField looks through the fields, all of which are known to
-// have the same name, to find the single field that dominates the
-// others using Go's embedding rules, modified by the presence of
-// TOML tags. If there are multiple top-level fields, the boolean
-// will be false: This condition is an error in Go and we skip all
-// the fields.
-func dominantField(fields []field) (field, bool) {
-	// The fields are sorted in increasing index-length order. The winner
-	// must therefore be one with the shortest index length. Drop all
-	// longer entries, which is easy: just truncate the slice.
-	length := len(fields[0].index)
-	tagged := -1 // Index of first tagged field.
-	for i, f := range fields {
-		if len(f.index) > length {
-			fields = fields[:i]
-			break
-		}
-		if f.tag {
-			if tagged >= 0 {
-				// Multiple tagged fields at the same level: conflict.
-				// Return no field.
-				return field{}, false
-			}
-			tagged = i
-		}
-	}
-	if tagged >= 0 {
-		return fields[tagged], true
-	}
-	// All remaining fields have the same length. If there's more than one,
-	// we have a conflict (two fields named "X" at the same level) and we
-	// return no field.
-	if len(fields) > 1 {
-		return field{}, false
-	}
-	return fields[0], true
-}
-
-var fieldCache struct {
-	sync.RWMutex
-	m map[reflect.Type][]field
-}
-
-// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
-func cachedTypeFields(t reflect.Type) []field {
-	fieldCache.RLock()
-	f := fieldCache.m[t]
-	fieldCache.RUnlock()
-	if f != nil {
-		return f
-	}
-
-	// Compute fields without lock.
-	// Might duplicate effort but won't hold other computations back.
-	f = typeFields(t)
-	if f == nil {
-		f = []field{}
-	}
-
-	fieldCache.Lock()
-	if fieldCache.m == nil {
-		fieldCache.m = map[reflect.Type][]field{}
-	}
-	fieldCache.m[t] = f
-	fieldCache.Unlock()
-	return f
-}

+ 0 - 3
vendor/golang.org/x/exp/AUTHORS

@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.

+ 0 - 3
vendor/golang.org/x/exp/CONTRIBUTORS

@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.

+ 0 - 27
vendor/golang.org/x/exp/LICENSE

@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 0 - 22
vendor/golang.org/x/exp/PATENTS

@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go.  This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation.  If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.

+ 0 - 624
vendor/golang.org/x/exp/apidiff/README.md

@@ -1,624 +0,0 @@
-# Checking Go Package API Compatibility
-
-The `apidiff` tool in this directory determines whether two versions of the same
-package are compatible. The goal is to help the developer make an informed
-choice of semantic version after they have changed the code of their module.
-
-`apidiff` reports two kinds of changes: incompatible ones, which require
-incrementing the major part of the semantic version, and compatible ones, which
-require a minor version increment. If no API changes are reported but there are
-code changes that could affect client code, then the patch version should
-be incremented.
-
-Because `apidiff` ignores package import paths, it may be used to display API
-differences between any two packages, not just different versions of the same
-package.
-
-The current version of `apidiff` compares only packages, not modules.
-
-
-## Compatibility Desiderata
-
-Any tool that checks compatibility can offer only an approximation. No tool can
-detect behavioral changes; and even if it could, whether a behavioral change is
-a breaking change or not depends on many factors, such as whether it closes a
-security hole or fixes a bug. Even a change that causes some code to fail to
-compile may not be considered a breaking change by the developers or their
-users. It may only affect code marked as experimental or unstable, for
-example, or the break may only manifest in unlikely cases.
-
-For a tool to be useful, its notion of compatibility must be relaxed enough to
-allow reasonable changes, like adding a field to a struct, but strict enough to
-catch significant breaking changes. A tool that is too lax will miss important
-incompatibilities, and users will stop trusting it; one that is too strict may
-generate so much noise that users will ignore it.
-
-To a first approximation, this tool reports a change as incompatible if it could
-cause client code to stop compiling. But `apidiff` ignores five ways in which
-code may fail to compile after a change. Three of them are mentioned in the
-[Go 1 Compatibility Guarantee](https://golang.org/doc/go1compat).
-
-### Unkeyed Struct Literals
-
-Code that uses an unkeyed struct literal would fail to compile if a field was
-added to the struct, making any such addition an incompatible change. An example:
-
-```
-// old
-type Point struct { X, Y int }
-
-// new
-type Point struct { X, Y, Z int }
-
-// client
-p := pkg.Point{1, 2} // fails in new because there are more fields than expressions
-```
-Here and below, we provide three snippets: the code in the old version of the
-package, the code in the new version, and the code written in a client of the package,
-which refers to it by the name `pkg`. The client code compiles against the old
-code but not the new.
-
-### Embedding and Shadowing
-
-Adding an exported field to a struct can break code that embeds that struct,
-because the newly added field may conflict with an identically named field
-at the same struct depth. A selector referring to the latter would become
-ambiguous and thus erroneous.
-
-
-```
-// old
-type Point struct { X, Y int }
-
-// new
-type Point struct { X, Y, Z int }
-
-// client
-type z struct { Z int }
-
-var v struct {
-    pkg.Point
-    z
-}
-
-_ = v.Z // fails in new
-```
-In the new version, the last line fails to compile because there are two embedded `Z`
-fields at the same depth, one from `z` and one from `pkg.Point`.
-
-
-### Using an Identical Type Externally
-
-If it is possible for client code to write a type expression representing the
-underlying type of a defined type in a package, then external code can use it in
-assignments involving the package type, making any change to that type incompatible.
-```
-// old
-type Point struct { X, Y int }
-
-// new
-type Point struct { X, Y, Z int }
-
-// client
-var p struct { X, Y int } = pkg.Point{} // fails in new because of Point's extra field
-```
-Here, the external code could have used the provided name `Point`, but chose not
-to. I'll have more to say about this and related examples later.
-
-### unsafe.Sizeof and Friends
-
-Since `unsafe.Sizeof`, `unsafe.Offsetof` and `unsafe.Alignof` are constant
-expressions, they can be used in an array type literal:
-
-```
-// old
-type S struct{ X int }
-
-// new
-type S struct{ X, y int }
-
-// client
-var a [unsafe.Sizeof(pkg.S{})]int = [8]int{} // fails in new because S's size is not 8
-```
-Use of these operations could make many changes to a type potentially incompatible.
-
-
-### Type Switches
-
-A package change that merges two different types (with same underlying type)
-into a single new type may break type switches in clients that refer to both
-original types:
-
-```
-// old
-type T1 int
-type T2 int
-
-// new
-type T1 int
-type T2 = T1
-
-// client
-switch x.(type) {
-case T1:
-case T2:
-} // fails with new because two cases have the same type
-```
-This sort of incompatibility is sufficiently esoteric to ignore; the tool allows
-merging types.
-
-## First Attempt at a Definition
-
-Our first attempt at defining compatibility captures the idea that all the
-exported names in the old package must have compatible equivalents in the new
-package.
-
-A new package is compatible with an old one if and only if:
-- For every exported package-level name in the old package, the same name is
-  declared in the new at package level, and
-- the names denote the same kind of object (e.g. both are variables), and
-- the types of the objects are compatible.
-
-We will work out the details (and make some corrections) below, but it is clear
-already that we will need to determine what makes two types compatible. And
-whatever the definition of type compatibility, it's certainly true that if two
-types are the same, they are compatible. So we will need to decide what makes an
-old and new type the same. We will call this sameness relation _correspondence_.
-
-## Type Correspondence
-
-Go already has a definition of when two types are the same:
-[type identity](https://golang.org/ref/spec#Type_identity).
-But identity isn't adequate for our purpose: it says that two defined
-types are identical if they arise from the same definition, but it's unclear
-what "same" means when talking about two different packages (or two versions of
-a single package).
-
-The obvious change to the definition of identity is to require that old and new
-[defined types](https://golang.org/ref/spec#Type_definitions)
-have the same name instead. But that doesn't work either, for two
-reasons. First, type aliases can equate two defined types with different names:
-
-```
-// old
-type E int
-
-// new
-type t int
-type E = t
-```
-Second, an unexported type can be renamed:
-
-```
-// old
-type u1 int
-var V u1
-
-// new
-type u2 int
-var V u2
-```
-Here, even though `u1` and `u2` are unexported, their exported fields and
-methods are visible to clients, so they are part of the API. But since the name
-`u1` is not visible to clients, it can be changed compatibly. We say that `u1`
-and `u2` are _exposed_: a type is exposed if a client package can declare variables of that type.
-
-We will say that an old defined type _corresponds_ to a new one if they have the
-same name, or one can be renamed to the other without otherwise changing the
-API. In the first example above, old `E` and new `t` correspond. In the second,
-old `u1` and new `u2` correspond.
-
-Two or more old defined types can correspond to a single new type: we consider
-"merging" two types into one to be a compatible change. As mentioned above,
-code that uses both names in a type switch will fail, but we deliberately ignore
-this case. However, a single old type can correspond to only one new type.
-
-So far, we've explained what correspondence means for defined types. To extend
-the definition to all types, we parallel the language's definition of type
-identity. So, for instance, an old and a new slice type correspond if their
-element types correspond.
-
-## Definition of Compatibility
-
-We can now present the definition of compatibility used by `apidiff`.
-
-### Package Compatibility
-
-> A new package is compatible with an old one if:
->1. Each exported name in the old package's scope also appears in the new
->package's scope, and the object (constant, variable, function or type) denoted
->by that name in the old package is compatible with the object denoted by the
->name in the new package, and
->2. For every exposed type that implements an exposed interface in the old package,
-> its corresponding type should implement the corresponding interface in the new package.
->
->Otherwise the packages are incompatible.
-
-As an aside, the tool also finds exported names in the new package that are not
-exported in the old, and marks them as compatible changes.
-
-Clause 2 is discussed further in "Whole-Package Compatibility."
-
-### Object Compatibility
-
-This section provides compatibility rules for constants, variables, functions
-and types.
-
-#### Constants
-
->A new exported constant is compatible with an old one of the same name if and only if
->1. Their types correspond, and
->2. Their values are identical.
-
-It is tempting to allow changing a typed constant to an untyped one. That may
-seem harmless, but it can break code like this:
-
-```
-// old
-const C int64 = 1
-
-// new
-const C = 1
-
-// client
-var x = C          // old type is int64, new is int
-var y int64 = x // fails with new: different types in assignment
-```
-
-A change to the value of a constant can break compatibility if the value is used
-in an array type:
-
-```
-// old
-const C = 1
-
-// new
-const C = 2
-
-// client
-var a [C]int = [1]int{} // fails with new because [2]int and [1]int are different types
-```
-Changes to constant values are rare, and determining whether they are compatible
-or not is better left to the user, so the tool reports them.
-
-#### Variables
-
->A new exported variable is compatible with an old one of the same name if and
->only if their types correspond.
-
-Correspondence doesn't look past names, so this rule does not prevent adding a
-field to `MyStruct` if the package declares `var V MyStruct`. It does, however, mean that
-
-```
-var V struct { X int }
-```
-is incompatible with
-```
-var V struct { X, Y int }
-```
-I discuss this at length below in the section "Compatibility, Types and Names."
-
-#### Functions
-
->A new exported function or variable is compatible with an old function of the
->same name if and only if their types (signatures) correspond.
-
-This rule captures the fact that, although many signature changes are compatible
-for all call sites, none are compatible for assignment:
-
-```
-var v func(int) = pkg.F
-```
-Here, `F` must be of type `func(int)` and not, for instance, `func(...int)` or `func(interface{})`.
-
-Note that the rule permits changing a function to a variable. This is a common
-practice, usually done for test stubbing, and cannot break any code at compile
-time.
-
-#### Exported Types
-
-> A new exported type is compatible with an old one if and only if their
-> names are the same and their types correspond.
-
-This rule seems far too strict. But, ignoring aliases for the moment, it demands only
-that the old and new _defined_ types correspond. Consider:
-```
-// old
-type T struct { X int }
-
-// new
-type T struct { X, Y int }
-```
-The addition of `Y` is a compatible change, because this rule does not require
-that the struct literals have to correspond, only that the defined types
-denoted by `T` must correspond. (Remember that correspondence stops at type
-names.)
-
-If one type is an alias that refers to the corresponding defined type, the
-situation is the same:
-
-```
-// old
-type T struct { X int }
-
-// new
-type u struct { X, Y int }
-type T = u
-```
-Here, the only requirement is that old `T` corresponds to new `u`, not that the
-struct types correspond. (We can't tell from this snippet that the old `T` and
-the new `u` do correspond; that depends on whether `u` replaces `T` throughout
-the API.)
-
-However, the following change is incompatible, because the names do not
-denote corresponding types:
-
-```
-// old
-type T = struct { X int }
-
-// new
-type T = struct { X, Y int }
-```
-### Type Literal Compatibility
-
-Only five kinds of types can differ compatibly: defined types, structs,
-interfaces, channels and numeric types. We only consider the compatibility of
-the last four when they are the underlying type of a defined type. See
-"Compatibility, Types and Names" for a rationale.
-
-We justify the compatibility rules by enumerating all the ways a type
-can be used, and by showing that the allowed changes cannot break any code that
-uses values of the type in those ways.
-
-Values of all types can be used in assignments (including argument passing and
-function return), but we do not require that old and new types are assignment
-compatible. That is because we assume that the old and new packages are never
-used together: any given binary will link in either the old package or the new.
-So in describing how a type can be used in the sections below, we omit
-assignment.
-
-Any type can also be used in a type assertion or conversion. The changes we allow
-below may affect the run-time behavior of these operations, but they cannot affect
-whether they compile. The only such breaking change would be to change
-the type `T` in an assertion `x.T` so that it no longer implements the interface
-type of `x`; but the rules for interfaces below disallow that.
-
-> A new type is compatible with an old one if and only if they correspond, or
-> one of the cases below applies.
-
-#### Defined Types
-
-Other than assignment, the only ways to use a defined type are to access its
-methods, or to make use of the properties of its underlying type. Rule 2 below
-covers the latter, and rules 3 and 4 cover the former.
-
-> A new defined type is compatible with an old one if and only if all of the
-> following hold:
->1. They correspond.
->2. Their underlying types are compatible.
->3. The new exported value method set is a superset of the old.
->4. The new exported pointer method set is a superset of the old.
-
-An exported method set is a method set with all unexported methods removed.
-When comparing methods of a method set, we require identical names and
-corresponding signatures.
-
-Removing an exported method is clearly a breaking change. But removing an
-unexported one (or changing its signature) can be breaking as well, if it
-results in the type no longer implementing an interface. See "Whole-Package
-Compatibility," below.
-
-#### Channels
-
-> A new channel type is compatible with an old one if
->  1. The element types correspond, and
->  2. Either the directions are the same, or the new type has no direction.
-
-Other than assignment, the only ways to use values of a channel type are to send
-and receive on them, to close them, and to use them as map keys. Changes to a
-channel type cannot cause code that closes a channel or uses it as a map key to
-fail to compile, so we need not consider those operations.
-
-Rule 1 ensures that any operations on the values sent or received will compile.
-Rule 2 captures the fact that any program that compiles with a directed channel
-must use either only sends, or only receives, so allowing the other operation
-by removing the channel direction cannot break any code.
-
-
-#### Interfaces
-
-> A new interface is compatible with an old one if and only if:
-> 1. The old interface does not have an unexported method, and it corresponds
->    to the new interfaces (i.e. they have the same method set), or
-> 2. The old interface has an unexported method and the new exported method set is a
->    superset of the old.
-
-Other than assignment, the only ways to use an interface are to implement it,
-embed it, or call one of its methods. (Interface values can also be used as map
-keys, but that cannot cause a compile-time error.)
-
-Certainly, removing an exported method from an interface could break a client
-call, so neither rule allows it.
-
-Rule 1 also disallows adding a method to an interface without an existing unexported
-method. Such an interface can be implemented in client code. If adding a method
-were allowed, a type that implements the old interface could fail to implement
-the new one:
-
-```
-type I interface { M1() }         // old
-type I interface { M1(); M2() }   // new
-
-// client
-type t struct{}
-func (t) M1() {}
-var i pkg.I = t{} // fails with new, because t lacks M2
-```
-
-Rule 2 is based on the observation that if an interface has an unexported
-method, the only way a client can implement it is to embed it.
-Adding a method is compatible in this case, because the embedding struct will
-continue to implement the interface. Adding a method also cannot break any call
-sites, since no program that compiles could have any such call sites.
-
-#### Structs
-
-> A new struct is compatible with an old one if all of the following hold:
-> 1. The new set of top-level exported fields is a superset of the old.
-> 2. The new set of _selectable_ exported fields is a superset of the old.
-> 3. If the old struct is comparable, so is the new one.
-
-The set of selectable exported fields is the set of exported fields `F`
-such that `x.F` is a valid selector expression for a value `x` of the struct
-type. `F` may be at the top level of the struct, or it may be a field of an
-embedded struct.
-
-Two fields are the same if they have the same name and corresponding types.
-
-Other than assignment, there are only four ways to use a struct: write a struct
-literal, select a field, use a value of the struct as a map key, or compare two
-values for equality. The first clause ensures that struct literals compile; the
-second, that selections compile; and the third, that equality expressions and
-map index expressions compile.
-
-#### Numeric Types
-
-> A new numeric type is compatible with an old one if and only if they are
-> both unsigned integers, both signed integers, both floats or both complex
-> types, and the new one is at least as large as the old on both 32-bit and
-> 64-bit architectures.
-
-Other than in assignments, numeric types appear in arithmetic and comparison
-expressions. Since all arithmetic operations but shifts (see below) require that
-operand types be identical, and by assumption the old and new types underly
-defined types (see "Compatibility, Types and Names," below), there is no way for
-client code to write an arithmetic expression that compiles with operands of the
-old type but not the new.
-
-Numeric types can also appear in type switches and type assertions. Again, since
-the old and new types underly defined types, type switches and type assertions
-that compiled using the old defined type will continue to compile with the new
-defined type.
-
-Going from an unsigned to a signed integer type is an incompatible change for
-the sole reason that only an unsigned type can appear as the right operand of a
-shift. If this rule is relaxed, then changes from an unsigned type to a larger
-signed type would be compatible. See [this
-issue](https://github.com/golang/go/issues/19113).
-
-Only integer types can be used in bitwise and shift operations, and for indexing
-slices and arrays. That is why switching from an integer to a floating-point
-type--even one that can represent all values of the integer type--is an
-incompatible change.
-
-
-Conversions from floating-point to complex types or vice versa are not permitted
-(the predeclared functions real, imag, and complex must be used instead). To
-prevent valid floating-point or complex conversions from becoming invalid,
-changing a floating-point type to a complex type or vice versa is considered an
-incompatible change.
-
-Although conversions between any two integer types are valid, assigning a
-constant value to a variable of integer type that is too small to represent the
-constant is not permitted. That is why the only compatible changes are to
-a new type whose values are a superset of the old. The requirement that the new
-set of values must include the old on both 32-bit and 64-bit machines allows
-conversions from `int32` to `int` and from `int` to `int64`, but not the other
-direction; and similarly for `uint`.
-
-Changing a type to or from `uintptr` is considered an incompatible change. Since
-its size is not specified, there is no way to know whether the new type's values
-are a superset of the old type's.
-
-## Whole-Package Compatibility
-
-Some changes that are compatible for a single type are not compatible when the
-package is considered as a whole. For example, if you remove an unexported
-method on a defined type, it may no longer implement an interface of the
-package. This can break client code:
-
-```
-// old
-type T int
-func (T) m() {}
-type I interface { m() }
-
-// new
-type T int // no method m anymore
-
-// client
-var i pkg.I = pkg.T{} // fails with new because T lacks m
-```
-
-Similarly, adding a method to an interface can cause defined types
-in the package to stop implementing it.
-
-The second clause in the definition for package compatibility handles these
-cases. To repeat:
-> 2. For every exposed type that implements an exposed interface in the old package,
-> its corresponding type should implement the corresponding interface in the new package.
-Recall that a type is exposed if it is part of the package's API, even if it is
-unexported.
-
-Other incompatibilities that involve more than one type in the package can arise
-whenever two types with identical underlying types exist in the old or new
-package. Here, a change "splits" an identical underlying type into two, breaking
-conversions:
-
-```
-// old
-type B struct { X int }
-type C struct { X int }
-
-// new
-type B struct { X int }
-type C struct { X, Y int }
-
-// client
-var b B
-_ = C(b) // fails with new: cannot convert B to C
-```
-Finally, changes that are compatible for the package in which they occur can
-break downstream packages. That can happen even if they involve unexported
-methods, thanks to embedding.
-
-The definitions given here don't account for these sorts of problems.
-
-
-## Compatibility, Types and Names 
-
-The above definitions state that the only types that can differ compatibly are
-defined types and the types that underly them. Changes to other type literals
-are considered incompatible. For instance, it is considered an incompatible
-change to add a field to the struct in this variable declaration:
-
-```
-var V struct { X int }
-```
-or this alias definition:
-```
-type T = struct { X int }
-```
-
-We make this choice to keep the definition of compatibility (relatively) simple.
-A more precise definition could, for instance, distinguish between
-
-```
-func F(struct { X int })
-```
-where any changes to the struct are incompatible, and
-
-```
-func F(struct { X, u int })
-```
-where adding a field is compatible (since clients cannot write the signature,
-and thus cannot assign `F` to a variable of the signature type). The definition
-should then also allow other function signature changes that only require
-call-site compatibility, like
-
-```
-func F(struct { X, u int }, ...int)
-```
-The result would be a much more complex definition with little benefit, since
-the examples in this section rarely arise in practice.

+ 0 - 220
vendor/golang.org/x/exp/apidiff/apidiff.go

@@ -1,220 +0,0 @@
-// TODO: test swap corresponding types (e.g. u1 <-> u2 and u2 <-> u1)
-// TODO: test exported alias refers to something in another package -- does correspondence work then?
-// TODO: CODE COVERAGE
-// TODO: note that we may miss correspondences because we bail early when we compare a signature (e.g. when lengths differ; we could do up to the shorter)
-// TODO: if you add an unexported method to an exposed interface, you have to check that
-//		every exposed type that previously implemented the interface still does. Otherwise
-//		an external assignment of the exposed type to the interface type could fail.
-// TODO: check constant values: large values aren't representable by some types.
-// TODO: Document all the incompatibilities we don't check for.
-
-package apidiff
-
-import (
-	"fmt"
-	"go/constant"
-	"go/token"
-	"go/types"
-)
-
-// Changes reports on the differences between the APIs of the old and new packages.
-// It classifies each difference as either compatible or incompatible (breaking.) For
-// a detailed discussion of what constitutes an incompatible change, see the package
-// documentation.
-func Changes(old, new *types.Package) Report {
-	d := newDiffer(old, new)
-	d.checkPackage()
-	r := Report{}
-	for _, m := range d.incompatibles.collect() {
-		r.Changes = append(r.Changes, Change{Message: m, Compatible: false})
-	}
-	for _, m := range d.compatibles.collect() {
-		r.Changes = append(r.Changes, Change{Message: m, Compatible: true})
-	}
-	return r
-}
-
-type differ struct {
-	old, new *types.Package
-	// Correspondences between named types.
-	// Even though it is the named types (*types.Named) that correspond, we use
-	// *types.TypeName as a map key because they are canonical.
-	// The values can be either named types or basic types.
-	correspondMap map[*types.TypeName]types.Type
-
-	// Messages.
-	incompatibles messageSet
-	compatibles   messageSet
-}
-
-func newDiffer(old, new *types.Package) *differ {
-	return &differ{
-		old:           old,
-		new:           new,
-		correspondMap: map[*types.TypeName]types.Type{},
-		incompatibles: messageSet{},
-		compatibles:   messageSet{},
-	}
-}
-
-func (d *differ) incompatible(obj types.Object, part, format string, args ...interface{}) {
-	addMessage(d.incompatibles, obj, part, format, args)
-}
-
-func (d *differ) compatible(obj types.Object, part, format string, args ...interface{}) {
-	addMessage(d.compatibles, obj, part, format, args)
-}
-
-func addMessage(ms messageSet, obj types.Object, part, format string, args []interface{}) {
-	ms.add(obj, part, fmt.Sprintf(format, args...))
-}
-
-func (d *differ) checkPackage() {
-	// Old changes.
-	for _, name := range d.old.Scope().Names() {
-		oldobj := d.old.Scope().Lookup(name)
-		if !oldobj.Exported() {
-			continue
-		}
-		newobj := d.new.Scope().Lookup(name)
-		if newobj == nil {
-			d.incompatible(oldobj, "", "removed")
-			continue
-		}
-		d.checkObjects(oldobj, newobj)
-	}
-	// New additions.
-	for _, name := range d.new.Scope().Names() {
-		newobj := d.new.Scope().Lookup(name)
-		if newobj.Exported() && d.old.Scope().Lookup(name) == nil {
-			d.compatible(newobj, "", "added")
-		}
-	}
-
-	// Whole-package satisfaction.
-	// For every old exposed interface oIface and its corresponding new interface nIface...
-	for otn1, nt1 := range d.correspondMap {
-		oIface, ok := otn1.Type().Underlying().(*types.Interface)
-		if !ok {
-			continue
-		}
-		nIface, ok := nt1.Underlying().(*types.Interface)
-		if !ok {
-			// If nt1 isn't an interface but otn1 is, then that's an incompatibility that
-			// we've already noticed, so there's no need to do anything here.
-			continue
-		}
-		// For every old type that implements oIface, its corresponding new type must implement
-		// nIface.
-		for otn2, nt2 := range d.correspondMap {
-			if otn1 == otn2 {
-				continue
-			}
-			if types.Implements(otn2.Type(), oIface) && !types.Implements(nt2, nIface) {
-				d.incompatible(otn2, "", "no longer implements %s", objectString(otn1))
-			}
-		}
-	}
-}
-
-func (d *differ) checkObjects(old, new types.Object) {
-	switch old := old.(type) {
-	case *types.Const:
-		if new, ok := new.(*types.Const); ok {
-			d.constChanges(old, new)
-			return
-		}
-	case *types.Var:
-		if new, ok := new.(*types.Var); ok {
-			d.checkCorrespondence(old, "", old.Type(), new.Type())
-			return
-		}
-	case *types.Func:
-		switch new := new.(type) {
-		case *types.Func:
-			d.checkCorrespondence(old, "", old.Type(), new.Type())
-			return
-		case *types.Var:
-			d.compatible(old, "", "changed from func to var")
-			d.checkCorrespondence(old, "", old.Type(), new.Type())
-			return
-
-		}
-	case *types.TypeName:
-		if new, ok := new.(*types.TypeName); ok {
-			d.checkCorrespondence(old, "", old.Type(), new.Type())
-			return
-		}
-	default:
-		panic("unexpected obj type")
-	}
-	// Here if kind of type changed.
-	d.incompatible(old, "", "changed from %s to %s",
-		objectKindString(old), objectKindString(new))
-}
-
-// Compare two constants.
-func (d *differ) constChanges(old, new *types.Const) {
-	ot := old.Type()
-	nt := new.Type()
-	// Check for change of type.
-	if !d.correspond(ot, nt) {
-		d.typeChanged(old, "", ot, nt)
-		return
-	}
-	// Check for change of value.
-	// We know the types are the same, so constant.Compare shouldn't panic.
-	if !constant.Compare(old.Val(), token.EQL, new.Val()) {
-		d.incompatible(old, "", "value changed from %s to %s", old.Val(), new.Val())
-	}
-}
-
-func objectKindString(obj types.Object) string {
-	switch obj.(type) {
-	case *types.Const:
-		return "const"
-	case *types.Var:
-		return "var"
-	case *types.Func:
-		return "func"
-	case *types.TypeName:
-		return "type"
-	default:
-		return "???"
-	}
-}
-
-func (d *differ) checkCorrespondence(obj types.Object, part string, old, new types.Type) {
-	if !d.correspond(old, new) {
-		d.typeChanged(obj, part, old, new)
-	}
-}
-
-func (d *differ) typeChanged(obj types.Object, part string, old, new types.Type) {
-	old = removeNamesFromSignature(old)
-	new = removeNamesFromSignature(new)
-	olds := types.TypeString(old, types.RelativeTo(d.old))
-	news := types.TypeString(new, types.RelativeTo(d.new))
-	d.incompatible(obj, part, "changed from %s to %s", olds, news)
-}
-
-// go/types always includes the argument and result names when formatting a signature.
-// Since these can change without affecting compatibility, we don't want users to
-// be distracted by them, so we remove them.
-func removeNamesFromSignature(t types.Type) types.Type {
-	sig, ok := t.(*types.Signature)
-	if !ok {
-		return t
-	}
-
-	dename := func(p *types.Tuple) *types.Tuple {
-		var vars []*types.Var
-		for i := 0; i < p.Len(); i++ {
-			v := p.At(i)
-			vars = append(vars, types.NewVar(v.Pos(), v.Pkg(), "", v.Type()))
-		}
-		return types.NewTuple(vars...)
-	}
-
-	return types.NewSignature(sig.Recv(), dename(sig.Params()), dename(sig.Results()), sig.Variadic())
-}

+ 0 - 361
vendor/golang.org/x/exp/apidiff/compatibility.go

@@ -1,361 +0,0 @@
-package apidiff
-
-import (
-	"fmt"
-	"go/types"
-	"reflect"
-)
-
-func (d *differ) checkCompatible(otn *types.TypeName, old, new types.Type) {
-	switch old := old.(type) {
-	case *types.Interface:
-		if new, ok := new.(*types.Interface); ok {
-			d.checkCompatibleInterface(otn, old, new)
-			return
-		}
-
-	case *types.Struct:
-		if new, ok := new.(*types.Struct); ok {
-			d.checkCompatibleStruct(otn, old, new)
-			return
-		}
-
-	case *types.Chan:
-		if new, ok := new.(*types.Chan); ok {
-			d.checkCompatibleChan(otn, old, new)
-			return
-		}
-
-	case *types.Basic:
-		if new, ok := new.(*types.Basic); ok {
-			d.checkCompatibleBasic(otn, old, new)
-			return
-		}
-
-	case *types.Named:
-		panic("unreachable")
-
-	default:
-		d.checkCorrespondence(otn, "", old, new)
-		return
-
-	}
-	// Here if old and new are different kinds of types.
-	d.typeChanged(otn, "", old, new)
-}
-
-func (d *differ) checkCompatibleChan(otn *types.TypeName, old, new *types.Chan) {
-	d.checkCorrespondence(otn, ", element type", old.Elem(), new.Elem())
-	if old.Dir() != new.Dir() {
-		if new.Dir() == types.SendRecv {
-			d.compatible(otn, "", "removed direction")
-		} else {
-			d.incompatible(otn, "", "changed direction")
-		}
-	}
-}
-
-func (d *differ) checkCompatibleBasic(otn *types.TypeName, old, new *types.Basic) {
-	// Certain changes to numeric types are compatible. Approximately, the info must
-	// be the same, and the new values must be a superset of the old.
-	if old.Kind() == new.Kind() {
-		// old and new are identical
-		return
-	}
-	if compatibleBasics[[2]types.BasicKind{old.Kind(), new.Kind()}] {
-		d.compatible(otn, "", "changed from %s to %s", old, new)
-	} else {
-		d.typeChanged(otn, "", old, new)
-	}
-}
-
-// All pairs (old, new) of compatible basic types.
-var compatibleBasics = map[[2]types.BasicKind]bool{
-	{types.Uint8, types.Uint16}:         true,
-	{types.Uint8, types.Uint32}:         true,
-	{types.Uint8, types.Uint}:           true,
-	{types.Uint8, types.Uint64}:         true,
-	{types.Uint16, types.Uint32}:        true,
-	{types.Uint16, types.Uint}:          true,
-	{types.Uint16, types.Uint64}:        true,
-	{types.Uint32, types.Uint}:          true,
-	{types.Uint32, types.Uint64}:        true,
-	{types.Uint, types.Uint64}:          true,
-	{types.Int8, types.Int16}:           true,
-	{types.Int8, types.Int32}:           true,
-	{types.Int8, types.Int}:             true,
-	{types.Int8, types.Int64}:           true,
-	{types.Int16, types.Int32}:          true,
-	{types.Int16, types.Int}:            true,
-	{types.Int16, types.Int64}:          true,
-	{types.Int32, types.Int}:            true,
-	{types.Int32, types.Int64}:          true,
-	{types.Int, types.Int64}:            true,
-	{types.Float32, types.Float64}:      true,
-	{types.Complex64, types.Complex128}: true,
-}
-
-// Interface compatibility:
-// If the old interface has an unexported method, the new interface is compatible
-// if its exported method set is a superset of the old. (Users could not implement,
-// only embed.)
-//
-// If the old interface did not have an unexported method, the new interface is
-// compatible if its exported method set is the same as the old, and it has no
-// unexported methods. (Adding an unexported method makes the interface
-// unimplementable outside the package.)
-//
-// TODO: must also check that if any methods were added or removed, every exposed
-// type in the package that implemented the interface in old still implements it in
-// new. Otherwise external assignments could fail.
-func (d *differ) checkCompatibleInterface(otn *types.TypeName, old, new *types.Interface) {
-	// Method sets are checked in checkCompatibleDefined.
-
-	// Does the old interface have an unexported method?
-	if unexportedMethod(old) != nil {
-		d.checkMethodSet(otn, old, new, additionsCompatible)
-	} else {
-		// Perform an equivalence check, but with more information.
-		d.checkMethodSet(otn, old, new, additionsIncompatible)
-		if u := unexportedMethod(new); u != nil {
-			d.incompatible(otn, u.Name(), "added unexported method")
-		}
-	}
-}
-
-// Return an unexported method from the method set of t, or nil if there are none.
-func unexportedMethod(t *types.Interface) *types.Func {
-	for i := 0; i < t.NumMethods(); i++ {
-		if m := t.Method(i); !m.Exported() {
-			return m
-		}
-	}
-	return nil
-}
-
-// We need to check three things for structs:
-// 1. The set of exported fields must be compatible. This ensures that keyed struct
-//    literals continue to compile. (There is no compatibility guarantee for unkeyed
-//    struct literals.)
-// 2. The set of exported *selectable* fields must be compatible. This includes the exported
-//    fields of all embedded structs. This ensures that selections continue to compile.
-// 3. If the old struct is comparable, so must the new one be. This ensures that equality
-//    expressions and uses of struct values as map keys continue to compile.
-//
-// An unexported embedded struct can't appear in a struct literal outside the
-// package, so it doesn't have to be present, or have the same name, in the new
-// struct.
-//
-// Field tags are ignored: they have no compile-time implications.
-func (d *differ) checkCompatibleStruct(obj types.Object, old, new *types.Struct) {
-	d.checkCompatibleObjectSets(obj, exportedFields(old), exportedFields(new))
-	d.checkCompatibleObjectSets(obj, exportedSelectableFields(old), exportedSelectableFields(new))
-	// Removing comparability from a struct is an incompatible change.
-	if types.Comparable(old) && !types.Comparable(new) {
-		d.incompatible(obj, "", "old is comparable, new is not")
-	}
-}
-
-// exportedFields collects all the immediate fields of the struct that are exported.
-// This is also the set of exported keys for keyed struct literals.
-func exportedFields(s *types.Struct) map[string]types.Object {
-	m := map[string]types.Object{}
-	for i := 0; i < s.NumFields(); i++ {
-		f := s.Field(i)
-		if f.Exported() {
-			m[f.Name()] = f
-		}
-	}
-	return m
-}
-
-// exportedSelectableFields collects all the exported fields of the struct, including
-// exported fields of embedded structs.
-//
-// We traverse the struct breadth-first, because of the rule that a lower-depth field
-// shadows one at a higher depth.
-func exportedSelectableFields(s *types.Struct) map[string]types.Object {
-	var (
-		m    = map[string]types.Object{}
-		next []*types.Struct // embedded structs at the next depth
-		seen []*types.Struct // to handle recursive embedding
-	)
-	for cur := []*types.Struct{s}; len(cur) > 0; cur, next = next, nil {
-		seen = append(seen, cur...)
-		// We only want to consider unambiguous fields. Ambiguous fields (where there
-		// is more than one field of the same name at the same level) are legal, but
-		// cannot be selected.
-		for name, f := range unambiguousFields(cur) {
-			// Record an exported field we haven't seen before. If we have seen it,
-			// it occurred a lower depth, so it shadows this field.
-			if f.Exported() && m[name] == nil {
-				m[name] = f
-			}
-			// Remember embedded structs for processing at the next depth,
-			// but only if we haven't seen the struct at this depth or above.
-			if !f.Anonymous() {
-				continue
-			}
-			t := f.Type().Underlying()
-			if p, ok := t.(*types.Pointer); ok {
-				t = p.Elem().Underlying()
-			}
-			if t, ok := t.(*types.Struct); ok && !contains(seen, t) {
-				next = append(next, t)
-			}
-		}
-	}
-	return m
-}
-
-func contains(ts []*types.Struct, t *types.Struct) bool {
-	for _, s := range ts {
-		if types.Identical(s, t) {
-			return true
-		}
-	}
-	return false
-}
-
-// Given a set of structs at the same depth, the unambiguous fields are the ones whose
-// names appear exactly once.
-func unambiguousFields(structs []*types.Struct) map[string]*types.Var {
-	fields := map[string]*types.Var{}
-	seen := map[string]bool{}
-	for _, s := range structs {
-		for i := 0; i < s.NumFields(); i++ {
-			f := s.Field(i)
-			name := f.Name()
-			if seen[name] {
-				delete(fields, name)
-			} else {
-				seen[name] = true
-				fields[name] = f
-			}
-		}
-	}
-	return fields
-}
-
-// Anything removed or change from the old set is an incompatible change.
-// Anything added to the new set is a compatible change.
-func (d *differ) checkCompatibleObjectSets(obj types.Object, old, new map[string]types.Object) {
-	for name, oldo := range old {
-		newo := new[name]
-		if newo == nil {
-			d.incompatible(obj, name, "removed")
-		} else {
-			d.checkCorrespondence(obj, name, oldo.Type(), newo.Type())
-		}
-	}
-	for name := range new {
-		if old[name] == nil {
-			d.compatible(obj, name, "added")
-		}
-	}
-}
-
-func (d *differ) checkCompatibleDefined(otn *types.TypeName, old *types.Named, new types.Type) {
-	// We've already checked that old and new correspond.
-	d.checkCompatible(otn, old.Underlying(), new.Underlying())
-	// If there are different kinds of types (e.g. struct and interface), don't bother checking
-	// the method sets.
-	if reflect.TypeOf(old.Underlying()) != reflect.TypeOf(new.Underlying()) {
-		return
-	}
-	// Interface method sets are checked in checkCompatibleInterface.
-	if _, ok := old.Underlying().(*types.Interface); ok {
-		return
-	}
-
-	// A new method set is compatible with an old if the new exported methods are a superset of the old.
-	d.checkMethodSet(otn, old, new, additionsCompatible)
-	d.checkMethodSet(otn, types.NewPointer(old), types.NewPointer(new), additionsCompatible)
-}
-
-const (
-	additionsCompatible   = true
-	additionsIncompatible = false
-)
-
-func (d *differ) checkMethodSet(otn *types.TypeName, oldt, newt types.Type, addcompat bool) {
-	// TODO: find a way to use checkCompatibleObjectSets for this.
-	oldMethodSet := exportedMethods(oldt)
-	newMethodSet := exportedMethods(newt)
-	msname := otn.Name()
-	if _, ok := oldt.(*types.Pointer); ok {
-		msname = "*" + msname
-	}
-	for name, oldMethod := range oldMethodSet {
-		newMethod := newMethodSet[name]
-		if newMethod == nil {
-			var part string
-			// Due to embedding, it's possible that the method's receiver type is not
-			// the same as the defined type whose method set we're looking at. So for
-			// a type T with removed method M that is embedded in some other type U,
-			// we will generate two "removed" messages for T.M, one for its own type
-			// T and one for the embedded type U. We want both messages to appear,
-			// but the messageSet dedup logic will allow only one message for a given
-			// object. So use the part string to distinguish them.
-			if receiverNamedType(oldMethod).Obj() != otn {
-				part = fmt.Sprintf(", method set of %s", msname)
-			}
-			d.incompatible(oldMethod, part, "removed")
-		} else {
-			obj := oldMethod
-			// If a value method is changed to a pointer method and has a signature
-			// change, then we can get two messages for the same method definition: one
-			// for the value method set that says it's removed, and another for the
-			// pointer method set that says it changed. To keep both messages (since
-			// messageSet dedups), use newMethod for the second. (Slight hack.)
-			if !hasPointerReceiver(oldMethod) && hasPointerReceiver(newMethod) {
-				obj = newMethod
-			}
-			d.checkCorrespondence(obj, "", oldMethod.Type(), newMethod.Type())
-		}
-	}
-
-	// Check for added methods.
-	for name, newMethod := range newMethodSet {
-		if oldMethodSet[name] == nil {
-			if addcompat {
-				d.compatible(newMethod, "", "added")
-			} else {
-				d.incompatible(newMethod, "", "added")
-			}
-		}
-	}
-}
-
-// exportedMethods collects all the exported methods of type's method set.
-func exportedMethods(t types.Type) map[string]types.Object {
-	m := map[string]types.Object{}
-	ms := types.NewMethodSet(t)
-	for i := 0; i < ms.Len(); i++ {
-		obj := ms.At(i).Obj()
-		if obj.Exported() {
-			m[obj.Name()] = obj
-		}
-	}
-	return m
-}
-
-func receiverType(method types.Object) types.Type {
-	return method.Type().(*types.Signature).Recv().Type()
-}
-
-func receiverNamedType(method types.Object) *types.Named {
-	switch t := receiverType(method).(type) {
-	case *types.Pointer:
-		return t.Elem().(*types.Named)
-	case *types.Named:
-		return t
-	default:
-		panic("unreachable")
-	}
-}
-
-func hasPointerReceiver(method types.Object) bool {
-	_, ok := receiverType(method).(*types.Pointer)
-	return ok
-}

+ 0 - 219
vendor/golang.org/x/exp/apidiff/correspondence.go

@@ -1,219 +0,0 @@
-package apidiff
-
-import (
-	"go/types"
-	"sort"
-)
-
-// Two types are correspond if they are identical except for defined types,
-// which must correspond.
-//
-// Two defined types correspond if they can be interchanged in the old and new APIs,
-// possibly after a renaming.
-//
-// This is not a pure function. If we come across named types while traversing,
-// we establish correspondence.
-func (d *differ) correspond(old, new types.Type) bool {
-	return d.corr(old, new, nil)
-}
-
-// corr determines whether old and new correspond. The argument p is a list of
-// known interface identities, to avoid infinite recursion.
-//
-// corr calls itself recursively as much as possible, to establish more
-// correspondences and so check more of the API. E.g. if the new function has more
-// parameters than the old, compare all the old ones before returning false.
-//
-// Compare this to the implementation of go/types.Identical.
-func (d *differ) corr(old, new types.Type, p *ifacePair) bool {
-	// Structure copied from types.Identical.
-	switch old := old.(type) {
-	case *types.Basic:
-		return types.Identical(old, new)
-
-	case *types.Array:
-		if new, ok := new.(*types.Array); ok {
-			return d.corr(old.Elem(), new.Elem(), p) && old.Len() == new.Len()
-		}
-
-	case *types.Slice:
-		if new, ok := new.(*types.Slice); ok {
-			return d.corr(old.Elem(), new.Elem(), p)
-		}
-
-	case *types.Map:
-		if new, ok := new.(*types.Map); ok {
-			return d.corr(old.Key(), new.Key(), p) && d.corr(old.Elem(), new.Elem(), p)
-		}
-
-	case *types.Chan:
-		if new, ok := new.(*types.Chan); ok {
-			return d.corr(old.Elem(), new.Elem(), p) && old.Dir() == new.Dir()
-		}
-
-	case *types.Pointer:
-		if new, ok := new.(*types.Pointer); ok {
-			return d.corr(old.Elem(), new.Elem(), p)
-		}
-
-	case *types.Signature:
-		if new, ok := new.(*types.Signature); ok {
-			pe := d.corr(old.Params(), new.Params(), p)
-			re := d.corr(old.Results(), new.Results(), p)
-			return old.Variadic() == new.Variadic() && pe && re
-		}
-
-	case *types.Tuple:
-		if new, ok := new.(*types.Tuple); ok {
-			for i := 0; i < old.Len(); i++ {
-				if i >= new.Len() || !d.corr(old.At(i).Type(), new.At(i).Type(), p) {
-					return false
-				}
-			}
-			return old.Len() == new.Len()
-		}
-
-	case *types.Struct:
-		if new, ok := new.(*types.Struct); ok {
-			for i := 0; i < old.NumFields(); i++ {
-				if i >= new.NumFields() {
-					return false
-				}
-				of := old.Field(i)
-				nf := new.Field(i)
-				if of.Anonymous() != nf.Anonymous() ||
-					old.Tag(i) != new.Tag(i) ||
-					!d.corr(of.Type(), nf.Type(), p) ||
-					!d.corrFieldNames(of, nf) {
-					return false
-				}
-			}
-			return old.NumFields() == new.NumFields()
-		}
-
-	case *types.Interface:
-		if new, ok := new.(*types.Interface); ok {
-			// Deal with circularity. See the comment in types.Identical.
-			q := &ifacePair{old, new, p}
-			for p != nil {
-				if p.identical(q) {
-					return true // same pair was compared before
-				}
-				p = p.prev
-			}
-			oldms := d.sortedMethods(old)
-			newms := d.sortedMethods(new)
-			for i, om := range oldms {
-				if i >= len(newms) {
-					return false
-				}
-				nm := newms[i]
-				if d.methodID(om) != d.methodID(nm) || !d.corr(om.Type(), nm.Type(), q) {
-					return false
-				}
-			}
-			return old.NumMethods() == new.NumMethods()
-		}
-
-	case *types.Named:
-		if new, ok := new.(*types.Named); ok {
-			return d.establishCorrespondence(old, new)
-		}
-		if new, ok := new.(*types.Basic); ok {
-			// Basic types are defined types, too, so we have to support them.
-
-			return d.establishCorrespondence(old, new)
-		}
-
-	default:
-		panic("unknown type kind")
-	}
-	return false
-}
-
-// Compare old and new field names. We are determining correspondence across packages,
-// so just compare names, not packages. For an unexported, embedded field of named
-// type (non-named embedded fields are possible with aliases), we check that the type
-// names correspond. We check the types for correspondence before this is called, so
-// we've established correspondence.
-func (d *differ) corrFieldNames(of, nf *types.Var) bool {
-	if of.Anonymous() && nf.Anonymous() && !of.Exported() && !nf.Exported() {
-		if on, ok := of.Type().(*types.Named); ok {
-			nn := nf.Type().(*types.Named)
-			return d.establishCorrespondence(on, nn)
-		}
-	}
-	return of.Name() == nf.Name()
-}
-
-// Establish that old corresponds with new if it does not already
-// correspond to something else.
-func (d *differ) establishCorrespondence(old *types.Named, new types.Type) bool {
-	oldname := old.Obj()
-	oldc := d.correspondMap[oldname]
-	if oldc == nil {
-		// For now, assume the types don't correspond unless they are from the old
-		// and new packages, respectively.
-		//
-		// This is too conservative. For instance,
-		//    [old] type A = q.B; [new] type A q.C
-		// could be OK if in package q, B is an alias for C.
-		// Or, using p as the name of the current old/new packages:
-		//    [old] type A = q.B; [new] type A int
-		// could be OK if in q,
-		//    [old] type B int; [new] type B = p.A
-		// In this case, p.A and q.B name the same type in both old and new worlds.
-		// Note that this case doesn't imply circular package imports: it's possible
-		// that in the old world, p imports q, but in the new, q imports p.
-		//
-		// However, if we didn't do something here, then we'd incorrectly allow cases
-		// like the first one above in which q.B is not an alias for q.C
-		//
-		// What we should do is check that the old type, in the new world's package
-		// of the same path, doesn't correspond to something other than the new type.
-		// That is a bit hard, because there is no easy way to find a new package
-		// matching an old one.
-		if newn, ok := new.(*types.Named); ok {
-			if old.Obj().Pkg() != d.old || newn.Obj().Pkg() != d.new {
-				return old.Obj().Id() == newn.Obj().Id()
-			}
-		}
-		// If there is no correspondence, create one.
-		d.correspondMap[oldname] = new
-		// Check that the corresponding types are compatible.
-		d.checkCompatibleDefined(oldname, old, new)
-		return true
-	}
-	return types.Identical(oldc, new)
-}
-
-func (d *differ) sortedMethods(iface *types.Interface) []*types.Func {
-	ms := make([]*types.Func, iface.NumMethods())
-	for i := 0; i < iface.NumMethods(); i++ {
-		ms[i] = iface.Method(i)
-	}
-	sort.Slice(ms, func(i, j int) bool { return d.methodID(ms[i]) < d.methodID(ms[j]) })
-	return ms
-}
-
-func (d *differ) methodID(m *types.Func) string {
-	// If the method belongs to one of the two packages being compared, use
-	// just its name even if it's unexported. That lets us treat unexported names
-	// from the old and new packages as equal.
-	if m.Pkg() == d.old || m.Pkg() == d.new {
-		return m.Name()
-	}
-	return m.Id()
-}
-
-// Copied from the go/types package:
-
-// An ifacePair is a node in a stack of interface type pairs compared for identity.
-type ifacePair struct {
-	x, y *types.Interface
-	prev *ifacePair
-}
-
-func (p *ifacePair) identical(q *ifacePair) bool {
-	return p.x == q.x && p.y == q.y || p.x == q.y && p.y == q.x
-}

+ 0 - 79
vendor/golang.org/x/exp/apidiff/messageset.go

@@ -1,79 +0,0 @@
-// TODO: show that two-non-empty dotjoin can happen, by using an anon struct as a field type
-// TODO: don't report removed/changed methods for both value and pointer method sets?
-
-package apidiff
-
-import (
-	"fmt"
-	"go/types"
-	"sort"
-	"strings"
-)
-
-// There can be at most one message for each object or part thereof.
-// Parts include interface methods and struct fields.
-//
-// The part thing is necessary. Method (Func) objects have sufficient info, but field
-// Vars do not: they just have a field name and a type, without the enclosing struct.
-type messageSet map[types.Object]map[string]string
-
-// Add a message for obj and part, overwriting a previous message
-// (shouldn't happen).
-// obj is required but part can be empty.
-func (m messageSet) add(obj types.Object, part, msg string) {
-	s := m[obj]
-	if s == nil {
-		s = map[string]string{}
-		m[obj] = s
-	}
-	if f, ok := s[part]; ok && f != msg {
-		fmt.Printf("! second, different message for obj %s, part %q\n", obj, part)
-		fmt.Printf("  first:  %s\n", f)
-		fmt.Printf("  second: %s\n", msg)
-	}
-	s[part] = msg
-}
-
-func (m messageSet) collect() []string {
-	var s []string
-	for obj, parts := range m {
-		// Format each object name relative to its own package.
-		objstring := objectString(obj)
-		for part, msg := range parts {
-			var p string
-
-			if strings.HasPrefix(part, ",") {
-				p = objstring + part
-			} else {
-				p = dotjoin(objstring, part)
-			}
-			s = append(s, p+": "+msg)
-		}
-	}
-	sort.Strings(s)
-	return s
-}
-
-func objectString(obj types.Object) string {
-	if f, ok := obj.(*types.Func); ok {
-		sig := f.Type().(*types.Signature)
-		if recv := sig.Recv(); recv != nil {
-			tn := types.TypeString(recv.Type(), types.RelativeTo(obj.Pkg()))
-			if tn[0] == '*' {
-				tn = "(" + tn + ")"
-			}
-			return fmt.Sprintf("%s.%s", tn, obj.Name())
-		}
-	}
-	return obj.Name()
-}
-
-func dotjoin(s1, s2 string) string {
-	if s1 == "" {
-		return s2
-	}
-	if s2 == "" {
-		return s1
-	}
-	return s1 + "." + s2
-}

+ 0 - 71
vendor/golang.org/x/exp/apidiff/report.go

@@ -1,71 +0,0 @@
-package apidiff
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-)
-
-// Report describes the changes detected by Changes.
-type Report struct {
-	Changes []Change
-}
-
-// A Change describes a single API change.
-type Change struct {
-	Message    string
-	Compatible bool
-}
-
-func (r Report) messages(compatible bool) []string {
-	var msgs []string
-	for _, c := range r.Changes {
-		if c.Compatible == compatible {
-			msgs = append(msgs, c.Message)
-		}
-	}
-	return msgs
-}
-
-func (r Report) String() string {
-	var buf bytes.Buffer
-	if err := r.Text(&buf); err != nil {
-		return fmt.Sprintf("!!%v", err)
-	}
-	return buf.String()
-}
-
-func (r Report) Text(w io.Writer) error {
-	if err := r.TextIncompatible(w, true); err != nil {
-		return err
-	}
-	return r.TextCompatible(w)
-}
-
-func (r Report) TextIncompatible(w io.Writer, withHeader bool) error {
-	if withHeader {
-		return r.writeMessages(w, "Incompatible changes:", r.messages(false))
-	}
-	return r.writeMessages(w, "", r.messages(false))
-}
-
-func (r Report) TextCompatible(w io.Writer) error {
-	return r.writeMessages(w, "Compatible changes:", r.messages(true))
-}
-
-func (r Report) writeMessages(w io.Writer, header string, msgs []string) error {
-	if len(msgs) == 0 {
-		return nil
-	}
-	if header != "" {
-		if _, err := fmt.Fprintf(w, "%s\n", header); err != nil {
-			return err
-		}
-	}
-	for _, m := range msgs {
-		if _, err := fmt.Fprintf(w, "- %s\n", m); err != nil {
-			return err
-		}
-	}
-	return nil
-}

+ 0 - 142
vendor/golang.org/x/exp/cmd/apidiff/main.go

@@ -1,142 +0,0 @@
-// Command apidiff determines whether two versions of a package are compatible
-package main
-
-import (
-	"bufio"
-	"flag"
-	"fmt"
-	"go/token"
-	"go/types"
-	"os"
-
-	"golang.org/x/exp/apidiff"
-	"golang.org/x/tools/go/gcexportdata"
-	"golang.org/x/tools/go/packages"
-)
-
-var (
-	exportDataOutfile = flag.String("w", "", "file for export data")
-	incompatibleOnly  = flag.Bool("incompatible", false, "display only incompatible changes")
-)
-
-func main() {
-	flag.Usage = func() {
-		w := flag.CommandLine.Output()
-		fmt.Fprintf(w, "usage:\n")
-		fmt.Fprintf(w, "apidiff OLD NEW\n")
-		fmt.Fprintf(w, "   compares OLD and NEW package APIs\n")
-		fmt.Fprintf(w, "   where OLD and NEW are either import paths or files of export data\n")
-		fmt.Fprintf(w, "apidiff -w FILE IMPORT_PATH\n")
-		fmt.Fprintf(w, "   writes export data of the package at IMPORT_PATH to FILE\n")
-		fmt.Fprintf(w, "   NOTE: In a GOPATH-less environment, this option consults the\n")
-		fmt.Fprintf(w, "   module cache by default, unless used in the directory that\n")
-		fmt.Fprintf(w, "   contains the go.mod module definition that IMPORT_PATH belongs\n")
-		fmt.Fprintf(w, "   to. In most cases users want the latter behavior, so be sure\n")
-		fmt.Fprintf(w, "   to cd to the exact directory which contains the module\n")
-		fmt.Fprintf(w, "   definition of IMPORT_PATH.\n")
-		flag.PrintDefaults()
-	}
-
-	flag.Parse()
-	if *exportDataOutfile != "" {
-		if len(flag.Args()) != 1 {
-			flag.Usage()
-			os.Exit(2)
-		}
-		pkg := mustLoadPackage(flag.Arg(0))
-		if err := writeExportData(pkg, *exportDataOutfile); err != nil {
-			die("writing export data: %v", err)
-		}
-	} else {
-		if len(flag.Args()) != 2 {
-			flag.Usage()
-			os.Exit(2)
-		}
-		oldpkg := mustLoadOrRead(flag.Arg(0))
-		newpkg := mustLoadOrRead(flag.Arg(1))
-
-		report := apidiff.Changes(oldpkg, newpkg)
-		var err error
-		if *incompatibleOnly {
-			err = report.TextIncompatible(os.Stdout, false)
-		} else {
-			err = report.Text(os.Stdout)
-		}
-		if err != nil {
-			die("writing report: %v", err)
-		}
-	}
-}
-
-func mustLoadOrRead(importPathOrFile string) *types.Package {
-	fileInfo, err := os.Stat(importPathOrFile)
-	if err == nil && fileInfo.Mode().IsRegular() {
-		pkg, err := readExportData(importPathOrFile)
-		if err != nil {
-			die("reading export data from %s: %v", importPathOrFile, err)
-		}
-		return pkg
-	} else {
-		return mustLoadPackage(importPathOrFile).Types
-	}
-}
-
-func mustLoadPackage(importPath string) *packages.Package {
-	pkg, err := loadPackage(importPath)
-	if err != nil {
-		die("loading %s: %v", importPath, err)
-	}
-	return pkg
-}
-
-func loadPackage(importPath string) (*packages.Package, error) {
-	cfg := &packages.Config{Mode: packages.LoadTypes}
-	pkgs, err := packages.Load(cfg, importPath)
-	if err != nil {
-		return nil, err
-	}
-	if len(pkgs) == 0 {
-		return nil, fmt.Errorf("found no packages for import %s", importPath)
-	}
-	if len(pkgs[0].Errors) > 0 {
-		return nil, pkgs[0].Errors[0]
-	}
-	return pkgs[0], nil
-}
-
-func readExportData(filename string) (*types.Package, error) {
-	f, err := os.Open(filename)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	r := bufio.NewReader(f)
-	m := map[string]*types.Package{}
-	pkgPath, err := r.ReadString('\n')
-	if err != nil {
-		return nil, err
-	}
-	pkgPath = pkgPath[:len(pkgPath)-1] // remove delimiter
-	return gcexportdata.Read(r, token.NewFileSet(), m, pkgPath)
-}
-
-func writeExportData(pkg *packages.Package, filename string) error {
-	f, err := os.Create(filename)
-	if err != nil {
-		return err
-	}
-	// Include the package path in the file. The exportdata format does
-	// not record the path of the package being written.
-	fmt.Fprintln(f, pkg.PkgPath)
-	err1 := gcexportdata.Write(f, pkg.Fset, pkg.Types)
-	err2 := f.Close()
-	if err1 != nil {
-		return err1
-	}
-	return err2
-}
-
-func die(format string, args ...interface{}) {
-	fmt.Fprintf(os.Stderr, format+"\n", args...)
-	os.Exit(1)
-}

+ 0 - 186
vendor/golang.org/x/tools/go/ast/inspector/inspector.go

@@ -1,186 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package inspector provides helper functions for traversal over the
-// syntax trees of a package, including node filtering by type, and
-// materialization of the traversal stack.
-//
-// During construction, the inspector does a complete traversal and
-// builds a list of push/pop events and their node type. Subsequent
-// method calls that request a traversal scan this list, rather than walk
-// the AST, and perform type filtering using efficient bit sets.
-//
-// Experiments suggest the inspector's traversals are about 2.5x faster
-// than ast.Inspect, but it may take around 5 traversals for this
-// benefit to amortize the inspector's construction cost.
-// If efficiency is the primary concern, do not use Inspector for
-// one-off traversals.
-package inspector
-
-// There are four orthogonal features in a traversal:
-//  1 type filtering
-//  2 pruning
-//  3 postorder calls to f
-//  4 stack
-// Rather than offer all of them in the API,
-// only a few combinations are exposed:
-// - Preorder is the fastest and has fewest features,
-//   but is the most commonly needed traversal.
-// - Nodes and WithStack both provide pruning and postorder calls,
-//   even though few clients need it, because supporting two versions
-//   is not justified.
-// More combinations could be supported by expressing them as
-// wrappers around a more generic traversal, but this was measured
-// and found to degrade performance significantly (30%).
-
-import (
-	"go/ast"
-)
-
-// An Inspector provides methods for inspecting
-// (traversing) the syntax trees of a package.
-type Inspector struct {
-	events []event
-}
-
-// New returns an Inspector for the specified syntax trees.
-func New(files []*ast.File) *Inspector {
-	return &Inspector{traverse(files)}
-}
-
-// An event represents a push or a pop
-// of an ast.Node during a traversal.
-type event struct {
-	node  ast.Node
-	typ   uint64 // typeOf(node)
-	index int    // 1 + index of corresponding pop event, or 0 if this is a pop
-}
-
-// Preorder visits all the nodes of the files supplied to New in
-// depth-first order. It calls f(n) for each node n before it visits
-// n's children.
-//
-// The types argument, if non-empty, enables type-based filtering of
-// events. The function f if is called only for nodes whose type
-// matches an element of the types slice.
-func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
-	// Because it avoids postorder calls to f, and the pruning
-	// check, Preorder is almost twice as fast as Nodes. The two
-	// features seem to contribute similar slowdowns (~1.4x each).
-
-	mask := maskOf(types)
-	for i := 0; i < len(in.events); {
-		ev := in.events[i]
-		if ev.typ&mask != 0 {
-			if ev.index > 0 {
-				f(ev.node)
-			}
-		}
-		i++
-	}
-}
-
-// Nodes visits the nodes of the files supplied to New in depth-first
-// order. It calls f(n, true) for each node n before it visits n's
-// children. If f returns true, Nodes invokes f recursively for each
-// of the non-nil children of the node, followed by a call of
-// f(n, false).
-//
-// The types argument, if non-empty, enables type-based filtering of
-// events. The function f if is called only for nodes whose type
-// matches an element of the types slice.
-func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) {
-	mask := maskOf(types)
-	for i := 0; i < len(in.events); {
-		ev := in.events[i]
-		if ev.typ&mask != 0 {
-			if ev.index > 0 {
-				// push
-				if !f(ev.node, true) {
-					i = ev.index // jump to corresponding pop + 1
-					continue
-				}
-			} else {
-				// pop
-				f(ev.node, false)
-			}
-		}
-		i++
-	}
-}
-
-// WithStack visits nodes in a similar manner to Nodes, but it
-// supplies each call to f an additional argument, the current
-// traversal stack. The stack's first element is the outermost node,
-// an *ast.File; its last is the innermost, n.
-func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) {
-	mask := maskOf(types)
-	var stack []ast.Node
-	for i := 0; i < len(in.events); {
-		ev := in.events[i]
-		if ev.index > 0 {
-			// push
-			stack = append(stack, ev.node)
-			if ev.typ&mask != 0 {
-				if !f(ev.node, true, stack) {
-					i = ev.index
-					stack = stack[:len(stack)-1]
-					continue
-				}
-			}
-		} else {
-			// pop
-			if ev.typ&mask != 0 {
-				f(ev.node, false, stack)
-			}
-			stack = stack[:len(stack)-1]
-		}
-		i++
-	}
-}
-
-// traverse builds the table of events representing a traversal.
-func traverse(files []*ast.File) []event {
-	// Preallocate approximate number of events
-	// based on source file extent.
-	// This makes traverse faster by 4x (!).
-	var extent int
-	for _, f := range files {
-		extent += int(f.End() - f.Pos())
-	}
-	// This estimate is based on the net/http package.
-	capacity := extent * 33 / 100
-	if capacity > 1e6 {
-		capacity = 1e6 // impose some reasonable maximum
-	}
-	events := make([]event, 0, capacity)
-
-	var stack []event
-	for _, f := range files {
-		ast.Inspect(f, func(n ast.Node) bool {
-			if n != nil {
-				// push
-				ev := event{
-					node:  n,
-					typ:   typeOf(n),
-					index: len(events), // push event temporarily holds own index
-				}
-				stack = append(stack, ev)
-				events = append(events, ev)
-			} else {
-				// pop
-				ev := stack[len(stack)-1]
-				stack = stack[:len(stack)-1]
-
-				events[ev.index].index = len(events) + 1 // make push refer to pop
-
-				ev.index = 0 // turn ev into a pop event
-				events = append(events, ev)
-			}
-			return true
-		})
-	}
-
-	return events
-}

+ 0 - 220
vendor/golang.org/x/tools/go/ast/inspector/typeof.go

@@ -1,220 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package inspector
-
-// This file defines func typeOf(ast.Node) uint64.
-//
-// The initial map-based implementation was too slow;
-// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196
-
-import "go/ast"
-
-const (
-	nArrayType = iota
-	nAssignStmt
-	nBadDecl
-	nBadExpr
-	nBadStmt
-	nBasicLit
-	nBinaryExpr
-	nBlockStmt
-	nBranchStmt
-	nCallExpr
-	nCaseClause
-	nChanType
-	nCommClause
-	nComment
-	nCommentGroup
-	nCompositeLit
-	nDeclStmt
-	nDeferStmt
-	nEllipsis
-	nEmptyStmt
-	nExprStmt
-	nField
-	nFieldList
-	nFile
-	nForStmt
-	nFuncDecl
-	nFuncLit
-	nFuncType
-	nGenDecl
-	nGoStmt
-	nIdent
-	nIfStmt
-	nImportSpec
-	nIncDecStmt
-	nIndexExpr
-	nInterfaceType
-	nKeyValueExpr
-	nLabeledStmt
-	nMapType
-	nPackage
-	nParenExpr
-	nRangeStmt
-	nReturnStmt
-	nSelectStmt
-	nSelectorExpr
-	nSendStmt
-	nSliceExpr
-	nStarExpr
-	nStructType
-	nSwitchStmt
-	nTypeAssertExpr
-	nTypeSpec
-	nTypeSwitchStmt
-	nUnaryExpr
-	nValueSpec
-)
-
-// typeOf returns a distinct single-bit value that represents the type of n.
-//
-// Various implementations were benchmarked with BenchmarkNewInspector:
-//								GOGC=off
-// - type switch				4.9-5.5ms	2.1ms
-// - binary search over a sorted list of types  5.5-5.9ms	2.5ms
-// - linear scan, frequency-ordered list 	5.9-6.1ms	2.7ms
-// - linear scan, unordered list		6.4ms		2.7ms
-// - hash table					6.5ms		3.1ms
-// A perfect hash seemed like overkill.
-//
-// The compiler's switch statement is the clear winner
-// as it produces a binary tree in code,
-// with constant conditions and good branch prediction.
-// (Sadly it is the most verbose in source code.)
-// Binary search suffered from poor branch prediction.
-//
-func typeOf(n ast.Node) uint64 {
-	// Fast path: nearly half of all nodes are identifiers.
-	if _, ok := n.(*ast.Ident); ok {
-		return 1 << nIdent
-	}
-
-	// These cases include all nodes encountered by ast.Inspect.
-	switch n.(type) {
-	case *ast.ArrayType:
-		return 1 << nArrayType
-	case *ast.AssignStmt:
-		return 1 << nAssignStmt
-	case *ast.BadDecl:
-		return 1 << nBadDecl
-	case *ast.BadExpr:
-		return 1 << nBadExpr
-	case *ast.BadStmt:
-		return 1 << nBadStmt
-	case *ast.BasicLit:
-		return 1 << nBasicLit
-	case *ast.BinaryExpr:
-		return 1 << nBinaryExpr
-	case *ast.BlockStmt:
-		return 1 << nBlockStmt
-	case *ast.BranchStmt:
-		return 1 << nBranchStmt
-	case *ast.CallExpr:
-		return 1 << nCallExpr
-	case *ast.CaseClause:
-		return 1 << nCaseClause
-	case *ast.ChanType:
-		return 1 << nChanType
-	case *ast.CommClause:
-		return 1 << nCommClause
-	case *ast.Comment:
-		return 1 << nComment
-	case *ast.CommentGroup:
-		return 1 << nCommentGroup
-	case *ast.CompositeLit:
-		return 1 << nCompositeLit
-	case *ast.DeclStmt:
-		return 1 << nDeclStmt
-	case *ast.DeferStmt:
-		return 1 << nDeferStmt
-	case *ast.Ellipsis:
-		return 1 << nEllipsis
-	case *ast.EmptyStmt:
-		return 1 << nEmptyStmt
-	case *ast.ExprStmt:
-		return 1 << nExprStmt
-	case *ast.Field:
-		return 1 << nField
-	case *ast.FieldList:
-		return 1 << nFieldList
-	case *ast.File:
-		return 1 << nFile
-	case *ast.ForStmt:
-		return 1 << nForStmt
-	case *ast.FuncDecl:
-		return 1 << nFuncDecl
-	case *ast.FuncLit:
-		return 1 << nFuncLit
-	case *ast.FuncType:
-		return 1 << nFuncType
-	case *ast.GenDecl:
-		return 1 << nGenDecl
-	case *ast.GoStmt:
-		return 1 << nGoStmt
-	case *ast.Ident:
-		return 1 << nIdent
-	case *ast.IfStmt:
-		return 1 << nIfStmt
-	case *ast.ImportSpec:
-		return 1 << nImportSpec
-	case *ast.IncDecStmt:
-		return 1 << nIncDecStmt
-	case *ast.IndexExpr:
-		return 1 << nIndexExpr
-	case *ast.InterfaceType:
-		return 1 << nInterfaceType
-	case *ast.KeyValueExpr:
-		return 1 << nKeyValueExpr
-	case *ast.LabeledStmt:
-		return 1 << nLabeledStmt
-	case *ast.MapType:
-		return 1 << nMapType
-	case *ast.Package:
-		return 1 << nPackage
-	case *ast.ParenExpr:
-		return 1 << nParenExpr
-	case *ast.RangeStmt:
-		return 1 << nRangeStmt
-	case *ast.ReturnStmt:
-		return 1 << nReturnStmt
-	case *ast.SelectStmt:
-		return 1 << nSelectStmt
-	case *ast.SelectorExpr:
-		return 1 << nSelectorExpr
-	case *ast.SendStmt:
-		return 1 << nSendStmt
-	case *ast.SliceExpr:
-		return 1 << nSliceExpr
-	case *ast.StarExpr:
-		return 1 << nStarExpr
-	case *ast.StructType:
-		return 1 << nStructType
-	case *ast.SwitchStmt:
-		return 1 << nSwitchStmt
-	case *ast.TypeAssertExpr:
-		return 1 << nTypeAssertExpr
-	case *ast.TypeSpec:
-		return 1 << nTypeSpec
-	case *ast.TypeSwitchStmt:
-		return 1 << nTypeSwitchStmt
-	case *ast.UnaryExpr:
-		return 1 << nUnaryExpr
-	case *ast.ValueSpec:
-		return 1 << nValueSpec
-	}
-	return 0
-}
-
-func maskOf(nodes []ast.Node) uint64 {
-	if nodes == nil {
-		return 1<<64 - 1 // match all node types
-	}
-	var mask uint64
-	for _, n := range nodes {
-		mask |= typeOf(n)
-	}
-	return mask
-}

+ 0 - 198
vendor/golang.org/x/tools/go/buildutil/allpackages.go

@@ -1,198 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package buildutil provides utilities related to the go/build
-// package in the standard library.
-//
-// All I/O is done via the build.Context file system interface, which must
-// be concurrency-safe.
-package buildutil // import "golang.org/x/tools/go/buildutil"
-
-import (
-	"go/build"
-	"os"
-	"path/filepath"
-	"sort"
-	"strings"
-	"sync"
-)
-
-// AllPackages returns the package path of each Go package in any source
-// directory of the specified build context (e.g. $GOROOT or an element
-// of $GOPATH).  Errors are ignored.  The results are sorted.
-// All package paths are canonical, and thus may contain "/vendor/".
-//
-// The result may include import paths for directories that contain no
-// *.go files, such as "archive" (in $GOROOT/src).
-//
-// All I/O is done via the build.Context file system interface,
-// which must be concurrency-safe.
-//
-func AllPackages(ctxt *build.Context) []string {
-	var list []string
-	ForEachPackage(ctxt, func(pkg string, _ error) {
-		list = append(list, pkg)
-	})
-	sort.Strings(list)
-	return list
-}
-
-// ForEachPackage calls the found function with the package path of
-// each Go package it finds in any source directory of the specified
-// build context (e.g. $GOROOT or an element of $GOPATH).
-// All package paths are canonical, and thus may contain "/vendor/".
-//
-// If the package directory exists but could not be read, the second
-// argument to the found function provides the error.
-//
-// All I/O is done via the build.Context file system interface,
-// which must be concurrency-safe.
-//
-func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) {
-	ch := make(chan item)
-
-	var wg sync.WaitGroup
-	for _, root := range ctxt.SrcDirs() {
-		root := root
-		wg.Add(1)
-		go func() {
-			allPackages(ctxt, root, ch)
-			wg.Done()
-		}()
-	}
-	go func() {
-		wg.Wait()
-		close(ch)
-	}()
-
-	// All calls to found occur in the caller's goroutine.
-	for i := range ch {
-		found(i.importPath, i.err)
-	}
-}
-
-type item struct {
-	importPath string
-	err        error // (optional)
-}
-
-// We use a process-wide counting semaphore to limit
-// the number of parallel calls to ReadDir.
-var ioLimit = make(chan bool, 20)
-
-func allPackages(ctxt *build.Context, root string, ch chan<- item) {
-	root = filepath.Clean(root) + string(os.PathSeparator)
-
-	var wg sync.WaitGroup
-
-	var walkDir func(dir string)
-	walkDir = func(dir string) {
-		// Avoid .foo, _foo, and testdata directory trees.
-		base := filepath.Base(dir)
-		if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" {
-			return
-		}
-
-		pkg := filepath.ToSlash(strings.TrimPrefix(dir, root))
-
-		// Prune search if we encounter any of these import paths.
-		switch pkg {
-		case "builtin":
-			return
-		}
-
-		ioLimit <- true
-		files, err := ReadDir(ctxt, dir)
-		<-ioLimit
-		if pkg != "" || err != nil {
-			ch <- item{pkg, err}
-		}
-		for _, fi := range files {
-			fi := fi
-			if fi.IsDir() {
-				wg.Add(1)
-				go func() {
-					walkDir(filepath.Join(dir, fi.Name()))
-					wg.Done()
-				}()
-			}
-		}
-	}
-
-	walkDir(root)
-	wg.Wait()
-}
-
-// ExpandPatterns returns the set of packages matched by patterns,
-// which may have the following forms:
-//
-//		golang.org/x/tools/cmd/guru     # a single package
-//		golang.org/x/tools/...          # all packages beneath dir
-//		...                             # the entire workspace.
-//
-// Order is significant: a pattern preceded by '-' removes matching
-// packages from the set.  For example, these patterns match all encoding
-// packages except encoding/xml:
-//
-// 	encoding/... -encoding/xml
-//
-// A trailing slash in a pattern is ignored.  (Path components of Go
-// package names are separated by slash, not the platform's path separator.)
-//
-func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool {
-	// TODO(adonovan): support other features of 'go list':
-	// - "std"/"cmd"/"all" meta-packages
-	// - "..." not at the end of a pattern
-	// - relative patterns using "./" or "../" prefix
-
-	pkgs := make(map[string]bool)
-	doPkg := func(pkg string, neg bool) {
-		if neg {
-			delete(pkgs, pkg)
-		} else {
-			pkgs[pkg] = true
-		}
-	}
-
-	// Scan entire workspace if wildcards are present.
-	// TODO(adonovan): opt: scan only the necessary subtrees of the workspace.
-	var all []string
-	for _, arg := range patterns {
-		if strings.HasSuffix(arg, "...") {
-			all = AllPackages(ctxt)
-			break
-		}
-	}
-
-	for _, arg := range patterns {
-		if arg == "" {
-			continue
-		}
-
-		neg := arg[0] == '-'
-		if neg {
-			arg = arg[1:]
-		}
-
-		if arg == "..." {
-			// ... matches all packages
-			for _, pkg := range all {
-				doPkg(pkg, neg)
-			}
-		} else if dir := strings.TrimSuffix(arg, "/..."); dir != arg {
-			// dir/... matches all packages beneath dir
-			for _, pkg := range all {
-				if strings.HasPrefix(pkg, dir) &&
-					(len(pkg) == len(dir) || pkg[len(dir)] == '/') {
-					doPkg(pkg, neg)
-				}
-			}
-		} else {
-			// single package
-			doPkg(strings.TrimSuffix(arg, "/"), neg)
-		}
-	}
-
-	return pkgs
-}

+ 0 - 113
vendor/golang.org/x/tools/go/buildutil/fakecontext.go

@@ -1,113 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package buildutil
-
-import (
-	"fmt"
-	"go/build"
-	"io"
-	"io/ioutil"
-	"os"
-	"path"
-	"path/filepath"
-	"sort"
-	"strings"
-	"time"
-)
-
-// FakeContext returns a build.Context for the fake file tree specified
-// by pkgs, which maps package import paths to a mapping from file base
-// names to contents.
-//
-// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides
-// the necessary file access methods to read from memory instead of the
-// real file system.
-//
-// Unlike a real file tree, the fake one has only two levels---packages
-// and files---so ReadDir("/go/src/") returns all packages under
-// /go/src/ including, for instance, "math" and "math/big".
-// ReadDir("/go/src/math/big") would return all the files in the
-// "math/big" package.
-//
-func FakeContext(pkgs map[string]map[string]string) *build.Context {
-	clean := func(filename string) string {
-		f := path.Clean(filepath.ToSlash(filename))
-		// Removing "/go/src" while respecting segment
-		// boundaries has this unfortunate corner case:
-		if f == "/go/src" {
-			return ""
-		}
-		return strings.TrimPrefix(f, "/go/src/")
-	}
-
-	ctxt := build.Default // copy
-	ctxt.GOROOT = "/go"
-	ctxt.GOPATH = ""
-	ctxt.Compiler = "gc"
-	ctxt.IsDir = func(dir string) bool {
-		dir = clean(dir)
-		if dir == "" {
-			return true // needed by (*build.Context).SrcDirs
-		}
-		return pkgs[dir] != nil
-	}
-	ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) {
-		dir = clean(dir)
-		var fis []os.FileInfo
-		if dir == "" {
-			// enumerate packages
-			for importPath := range pkgs {
-				fis = append(fis, fakeDirInfo(importPath))
-			}
-		} else {
-			// enumerate files of package
-			for basename := range pkgs[dir] {
-				fis = append(fis, fakeFileInfo(basename))
-			}
-		}
-		sort.Sort(byName(fis))
-		return fis, nil
-	}
-	ctxt.OpenFile = func(filename string) (io.ReadCloser, error) {
-		filename = clean(filename)
-		dir, base := path.Split(filename)
-		content, ok := pkgs[path.Clean(dir)][base]
-		if !ok {
-			return nil, fmt.Errorf("file not found: %s", filename)
-		}
-		return ioutil.NopCloser(strings.NewReader(content)), nil
-	}
-	ctxt.IsAbsPath = func(path string) bool {
-		path = filepath.ToSlash(path)
-		// Don't rely on the default (filepath.Path) since on
-		// Windows, it reports virtual paths as non-absolute.
-		return strings.HasPrefix(path, "/")
-	}
-	return &ctxt
-}
-
-type byName []os.FileInfo
-
-func (s byName) Len() int           { return len(s) }
-func (s byName) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
-
-type fakeFileInfo string
-
-func (fi fakeFileInfo) Name() string    { return string(fi) }
-func (fakeFileInfo) Sys() interface{}   { return nil }
-func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
-func (fakeFileInfo) IsDir() bool        { return false }
-func (fakeFileInfo) Size() int64        { return 0 }
-func (fakeFileInfo) Mode() os.FileMode  { return 0644 }
-
-type fakeDirInfo string
-
-func (fd fakeDirInfo) Name() string    { return string(fd) }
-func (fakeDirInfo) Sys() interface{}   { return nil }
-func (fakeDirInfo) ModTime() time.Time { return time.Time{} }
-func (fakeDirInfo) IsDir() bool        { return true }
-func (fakeDirInfo) Size() int64        { return 0 }
-func (fakeDirInfo) Mode() os.FileMode  { return 0755 }

+ 0 - 103
vendor/golang.org/x/tools/go/buildutil/overlay.go

@@ -1,103 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package buildutil
-
-import (
-	"bufio"
-	"bytes"
-	"fmt"
-	"go/build"
-	"io"
-	"io/ioutil"
-	"path/filepath"
-	"strconv"
-	"strings"
-)
-
-// OverlayContext overlays a build.Context with additional files from
-// a map. Files in the map take precedence over other files.
-//
-// In addition to plain string comparison, two file names are
-// considered equal if their base names match and their directory
-// components point at the same directory on the file system. That is,
-// symbolic links are followed for directories, but not files.
-//
-// A common use case for OverlayContext is to allow editors to pass in
-// a set of unsaved, modified files.
-//
-// Currently, only the Context.OpenFile function will respect the
-// overlay. This may change in the future.
-func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context {
-	// TODO(dominikh): Implement IsDir, HasSubdir and ReadDir
-
-	rc := func(data []byte) (io.ReadCloser, error) {
-		return ioutil.NopCloser(bytes.NewBuffer(data)), nil
-	}
-
-	copy := *orig // make a copy
-	ctxt := &copy
-	ctxt.OpenFile = func(path string) (io.ReadCloser, error) {
-		// Fast path: names match exactly.
-		if content, ok := overlay[path]; ok {
-			return rc(content)
-		}
-
-		// Slow path: check for same file under a different
-		// alias, perhaps due to a symbolic link.
-		for filename, content := range overlay {
-			if sameFile(path, filename) {
-				return rc(content)
-			}
-		}
-
-		return OpenFile(orig, path)
-	}
-	return ctxt
-}
-
-// ParseOverlayArchive parses an archive containing Go files and their
-// contents. The result is intended to be used with OverlayContext.
-//
-//
-// Archive format
-//
-// The archive consists of a series of files. Each file consists of a
-// name, a decimal file size and the file contents, separated by
-// newlines. No newline follows after the file contents.
-func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) {
-	overlay := make(map[string][]byte)
-	r := bufio.NewReader(archive)
-	for {
-		// Read file name.
-		filename, err := r.ReadString('\n')
-		if err != nil {
-			if err == io.EOF {
-				break // OK
-			}
-			return nil, fmt.Errorf("reading archive file name: %v", err)
-		}
-		filename = filepath.Clean(strings.TrimSpace(filename))
-
-		// Read file size.
-		sz, err := r.ReadString('\n')
-		if err != nil {
-			return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err)
-		}
-		sz = strings.TrimSpace(sz)
-		size, err := strconv.ParseUint(sz, 10, 32)
-		if err != nil {
-			return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err)
-		}
-
-		// Read file content.
-		content := make([]byte, size)
-		if _, err := io.ReadFull(r, content); err != nil {
-			return nil, fmt.Errorf("reading archive file %s: %v", filename, err)
-		}
-		overlay[filename] = content
-	}
-
-	return overlay, nil
-}

+ 0 - 79
vendor/golang.org/x/tools/go/buildutil/tags.go

@@ -1,79 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package buildutil
-
-// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go.
-
-import "fmt"
-
-const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " +
-	"For more information about build tags, see the description of " +
-	"build constraints in the documentation for the go/build package"
-
-// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses
-// a flag value in the same manner as go build's -tags flag and
-// populates a []string slice.
-//
-// See $GOROOT/src/go/build/doc.go for description of build tags.
-// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag.
-//
-// Example:
-// 	flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc)
-type TagsFlag []string
-
-func (v *TagsFlag) Set(s string) error {
-	var err error
-	*v, err = splitQuotedFields(s)
-	if *v == nil {
-		*v = []string{}
-	}
-	return err
-}
-
-func (v *TagsFlag) Get() interface{} { return *v }
-
-func splitQuotedFields(s string) ([]string, error) {
-	// Split fields allowing '' or "" around elements.
-	// Quotes further inside the string do not count.
-	var f []string
-	for len(s) > 0 {
-		for len(s) > 0 && isSpaceByte(s[0]) {
-			s = s[1:]
-		}
-		if len(s) == 0 {
-			break
-		}
-		// Accepted quoted string. No unescaping inside.
-		if s[0] == '"' || s[0] == '\'' {
-			quote := s[0]
-			s = s[1:]
-			i := 0
-			for i < len(s) && s[i] != quote {
-				i++
-			}
-			if i >= len(s) {
-				return nil, fmt.Errorf("unterminated %c string", quote)
-			}
-			f = append(f, s[:i])
-			s = s[i+1:]
-			continue
-		}
-		i := 0
-		for i < len(s) && !isSpaceByte(s[i]) {
-			i++
-		}
-		f = append(f, s[:i])
-		s = s[i:]
-	}
-	return f, nil
-}
-
-func (v *TagsFlag) String() string {
-	return "<tagsFlag>"
-}
-
-func isSpaceByte(c byte) bool {
-	return c == ' ' || c == '\t' || c == '\n' || c == '\r'
-}

+ 0 - 212
vendor/golang.org/x/tools/go/buildutil/util.go

@@ -1,212 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package buildutil
-
-import (
-	"fmt"
-	"go/ast"
-	"go/build"
-	"go/parser"
-	"go/token"
-	"io"
-	"io/ioutil"
-	"os"
-	"path"
-	"path/filepath"
-	"strings"
-)
-
-// ParseFile behaves like parser.ParseFile,
-// but uses the build context's file system interface, if any.
-//
-// If file is not absolute (as defined by IsAbsPath), the (dir, file)
-// components are joined using JoinPath; dir must be absolute.
-//
-// The displayPath function, if provided, is used to transform the
-// filename that will be attached to the ASTs.
-//
-// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws.
-//
-func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) {
-	if !IsAbsPath(ctxt, file) {
-		file = JoinPath(ctxt, dir, file)
-	}
-	rd, err := OpenFile(ctxt, file)
-	if err != nil {
-		return nil, err
-	}
-	defer rd.Close() // ignore error
-	if displayPath != nil {
-		file = displayPath(file)
-	}
-	return parser.ParseFile(fset, file, rd, mode)
-}
-
-// ContainingPackage returns the package containing filename.
-//
-// If filename is not absolute, it is interpreted relative to working directory dir.
-// All I/O is via the build context's file system interface, if any.
-//
-// The '...Files []string' fields of the resulting build.Package are not
-// populated (build.FindOnly mode).
-//
-func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) {
-	if !IsAbsPath(ctxt, filename) {
-		filename = JoinPath(ctxt, dir, filename)
-	}
-
-	// We must not assume the file tree uses
-	// "/" always,
-	// `\` always,
-	// or os.PathSeparator (which varies by platform),
-	// but to make any progress, we are forced to assume that
-	// paths will not use `\` unless the PathSeparator
-	// is also `\`, thus we can rely on filepath.ToSlash for some sanity.
-
-	dirSlash := path.Dir(filepath.ToSlash(filename)) + "/"
-
-	// We assume that no source root (GOPATH[i] or GOROOT) contains any other.
-	for _, srcdir := range ctxt.SrcDirs() {
-		srcdirSlash := filepath.ToSlash(srcdir) + "/"
-		if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok {
-			return ctxt.Import(importPath, dir, build.FindOnly)
-		}
-	}
-
-	return nil, fmt.Errorf("can't find package containing %s", filename)
-}
-
-// -- Effective methods of file system interface -------------------------
-
-// (go/build.Context defines these as methods, but does not export them.)
-
-// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
-// the local file system to answer the question.
-func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) {
-	if f := ctxt.HasSubdir; f != nil {
-		return f(root, dir)
-	}
-
-	// Try using paths we received.
-	if rel, ok = hasSubdir(root, dir); ok {
-		return
-	}
-
-	// Try expanding symlinks and comparing
-	// expanded against unexpanded and
-	// expanded against expanded.
-	rootSym, _ := filepath.EvalSymlinks(root)
-	dirSym, _ := filepath.EvalSymlinks(dir)
-
-	if rel, ok = hasSubdir(rootSym, dir); ok {
-		return
-	}
-	if rel, ok = hasSubdir(root, dirSym); ok {
-		return
-	}
-	return hasSubdir(rootSym, dirSym)
-}
-
-func hasSubdir(root, dir string) (rel string, ok bool) {
-	const sep = string(filepath.Separator)
-	root = filepath.Clean(root)
-	if !strings.HasSuffix(root, sep) {
-		root += sep
-	}
-
-	dir = filepath.Clean(dir)
-	if !strings.HasPrefix(dir, root) {
-		return "", false
-	}
-
-	return filepath.ToSlash(dir[len(root):]), true
-}
-
-// FileExists returns true if the specified file exists,
-// using the build context's file system interface.
-func FileExists(ctxt *build.Context, path string) bool {
-	if ctxt.OpenFile != nil {
-		r, err := ctxt.OpenFile(path)
-		if err != nil {
-			return false
-		}
-		r.Close() // ignore error
-		return true
-	}
-	_, err := os.Stat(path)
-	return err == nil
-}
-
-// OpenFile behaves like os.Open,
-// but uses the build context's file system interface, if any.
-func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) {
-	if ctxt.OpenFile != nil {
-		return ctxt.OpenFile(path)
-	}
-	return os.Open(path)
-}
-
-// IsAbsPath behaves like filepath.IsAbs,
-// but uses the build context's file system interface, if any.
-func IsAbsPath(ctxt *build.Context, path string) bool {
-	if ctxt.IsAbsPath != nil {
-		return ctxt.IsAbsPath(path)
-	}
-	return filepath.IsAbs(path)
-}
-
-// JoinPath behaves like filepath.Join,
-// but uses the build context's file system interface, if any.
-func JoinPath(ctxt *build.Context, path ...string) string {
-	if ctxt.JoinPath != nil {
-		return ctxt.JoinPath(path...)
-	}
-	return filepath.Join(path...)
-}
-
-// IsDir behaves like os.Stat plus IsDir,
-// but uses the build context's file system interface, if any.
-func IsDir(ctxt *build.Context, path string) bool {
-	if ctxt.IsDir != nil {
-		return ctxt.IsDir(path)
-	}
-	fi, err := os.Stat(path)
-	return err == nil && fi.IsDir()
-}
-
-// ReadDir behaves like ioutil.ReadDir,
-// but uses the build context's file system interface, if any.
-func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) {
-	if ctxt.ReadDir != nil {
-		return ctxt.ReadDir(path)
-	}
-	return ioutil.ReadDir(path)
-}
-
-// SplitPathList behaves like filepath.SplitList,
-// but uses the build context's file system interface, if any.
-func SplitPathList(ctxt *build.Context, s string) []string {
-	if ctxt.SplitPathList != nil {
-		return ctxt.SplitPathList(s)
-	}
-	return filepath.SplitList(s)
-}
-
-// sameFile returns true if x and y have the same basename and denote
-// the same file.
-//
-func sameFile(x, y string) bool {
-	if path.Clean(x) == path.Clean(y) {
-		return true
-	}
-	if filepath.Base(x) == filepath.Base(y) { // (optimisation)
-		if xi, err := os.Stat(x); err == nil {
-			if yi, err := os.Stat(y); err == nil {
-				return os.SameFile(xi, yi)
-			}
-		}
-	}
-	return false
-}

+ 0 - 220
vendor/golang.org/x/tools/go/internal/cgo/cgo.go

@@ -1,220 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cgo handles cgo preprocessing of files containing `import "C"`.
-//
-// DESIGN
-//
-// The approach taken is to run the cgo processor on the package's
-// CgoFiles and parse the output, faking the filenames of the
-// resulting ASTs so that the synthetic file containing the C types is
-// called "C" (e.g. "~/go/src/net/C") and the preprocessed files
-// have their original names (e.g. "~/go/src/net/cgo_unix.go"),
-// not the names of the actual temporary files.
-//
-// The advantage of this approach is its fidelity to 'go build'.  The
-// downside is that the token.Position.Offset for each AST node is
-// incorrect, being an offset within the temporary file.  Line numbers
-// should still be correct because of the //line comments.
-//
-// The logic of this file is mostly plundered from the 'go build'
-// tool, which also invokes the cgo preprocessor.
-//
-//
-// REJECTED ALTERNATIVE
-//
-// An alternative approach that we explored is to extend go/types'
-// Importer mechanism to provide the identity of the importing package
-// so that each time `import "C"` appears it resolves to a different
-// synthetic package containing just the objects needed in that case.
-// The loader would invoke cgo but parse only the cgo_types.go file
-// defining the package-level objects, discarding the other files
-// resulting from preprocessing.
-//
-// The benefit of this approach would have been that source-level
-// syntax information would correspond exactly to the original cgo
-// file, with no preprocessing involved, making source tools like
-// godoc, guru, and eg happy.  However, the approach was rejected
-// due to the additional complexity it would impose on go/types.  (It
-// made for a beautiful demo, though.)
-//
-// cgo files, despite their *.go extension, are not legal Go source
-// files per the specification since they may refer to unexported
-// members of package "C" such as C.int.  Also, a function such as
-// C.getpwent has in effect two types, one matching its C type and one
-// which additionally returns (errno C.int).  The cgo preprocessor
-// uses name mangling to distinguish these two functions in the
-// processed code, but go/types would need to duplicate this logic in
-// its handling of function calls, analogous to the treatment of map
-// lookups in which y=m[k] and y,ok=m[k] are both legal.
-
-package cgo
-
-import (
-	"fmt"
-	"go/ast"
-	"go/build"
-	"go/parser"
-	"go/token"
-	exec "golang.org/x/sys/execabs"
-	"io/ioutil"
-	"log"
-	"os"
-	"path/filepath"
-	"regexp"
-	"strings"
-)
-
-// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses
-// the output and returns the resulting ASTs.
-//
-func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
-	tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
-	if err != nil {
-		return nil, err
-	}
-	defer os.RemoveAll(tmpdir)
-
-	pkgdir := bp.Dir
-	if DisplayPath != nil {
-		pkgdir = DisplayPath(pkgdir)
-	}
-
-	cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false)
-	if err != nil {
-		return nil, err
-	}
-	var files []*ast.File
-	for i := range cgoFiles {
-		rd, err := os.Open(cgoFiles[i])
-		if err != nil {
-			return nil, err
-		}
-		display := filepath.Join(bp.Dir, cgoDisplayFiles[i])
-		f, err := parser.ParseFile(fset, display, rd, mode)
-		rd.Close()
-		if err != nil {
-			return nil, err
-		}
-		files = append(files, f)
-	}
-	return files, nil
-}
-
-var cgoRe = regexp.MustCompile(`[/\\:]`)
-
-// Run invokes the cgo preprocessor on bp.CgoFiles and returns two
-// lists of files: the resulting processed files (in temporary
-// directory tmpdir) and the corresponding names of the unprocessed files.
-//
-// Run is adapted from (*builder).cgo in
-// $GOROOT/src/cmd/go/build.go, but these features are unsupported:
-// Objective C, CGOPKGPATH, CGO_FLAGS.
-//
-// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in
-// to the cgo preprocessor. This in turn will set the // line comments
-// referring to those files to use absolute paths. This is needed for
-// go/packages using the legacy go list support so it is able to find
-// the original files.
-func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) {
-	cgoCPPFLAGS, _, _, _ := cflags(bp, true)
-	_, cgoexeCFLAGS, _, _ := cflags(bp, false)
-
-	if len(bp.CgoPkgConfig) > 0 {
-		pcCFLAGS, err := pkgConfigFlags(bp)
-		if err != nil {
-			return nil, nil, err
-		}
-		cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
-	}
-
-	// Allows including _cgo_export.h from .[ch] files in the package.
-	cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir)
-
-	// _cgo_gotypes.go (displayed "C") contains the type definitions.
-	files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go"))
-	displayFiles = append(displayFiles, "C")
-	for _, fn := range bp.CgoFiles {
-		// "foo.cgo1.go" (displayed "foo.go") is the processed Go source.
-		f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_")
-		files = append(files, filepath.Join(tmpdir, f+"cgo1.go"))
-		displayFiles = append(displayFiles, fn)
-	}
-
-	var cgoflags []string
-	if bp.Goroot && bp.ImportPath == "runtime/cgo" {
-		cgoflags = append(cgoflags, "-import_runtime_cgo=false")
-	}
-	if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" {
-		cgoflags = append(cgoflags, "-import_syscall=false")
-	}
-
-	var cgoFiles []string = bp.CgoFiles
-	if useabs {
-		cgoFiles = make([]string, len(bp.CgoFiles))
-		for i := range cgoFiles {
-			cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i])
-		}
-	}
-
-	args := stringList(
-		"go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
-		cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles,
-	)
-	if false {
-		log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
-	}
-	cmd := exec.Command(args[0], args[1:]...)
-	cmd.Dir = pkgdir
-	cmd.Stdout = os.Stderr
-	cmd.Stderr = os.Stderr
-	if err := cmd.Run(); err != nil {
-		return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err)
-	}
-
-	return files, displayFiles, nil
-}
-
-// -- unmodified from 'go build' ---------------------------------------
-
-// Return the flags to use when invoking the C or C++ compilers, or cgo.
-func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
-	var defaults string
-	if def {
-		defaults = "-g -O2"
-	}
-
-	cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
-	cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
-	cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
-	ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
-	return
-}
-
-// envList returns the value of the given environment variable broken
-// into fields, using the default value when the variable is empty.
-func envList(key, def string) []string {
-	v := os.Getenv(key)
-	if v == "" {
-		v = def
-	}
-	return strings.Fields(v)
-}
-
-// stringList's arguments should be a sequence of string or []string values.
-// stringList flattens them into a single []string.
-func stringList(args ...interface{}) []string {
-	var x []string
-	for _, arg := range args {
-		switch arg := arg.(type) {
-		case []string:
-			x = append(x, arg...)
-		case string:
-			x = append(x, arg)
-		default:
-			panic("stringList: invalid argument")
-		}
-	}
-	return x
-}

+ 0 - 39
vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go

@@ -1,39 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cgo
-
-import (
-	"errors"
-	"fmt"
-	"go/build"
-	exec "golang.org/x/sys/execabs"
-	"strings"
-)
-
-// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints.
-func pkgConfig(mode string, pkgs []string) (flags []string, err error) {
-	cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...)
-	out, err := cmd.CombinedOutput()
-	if err != nil {
-		s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err)
-		if len(out) > 0 {
-			s = fmt.Sprintf("%s: %s", s, out)
-		}
-		return nil, errors.New(s)
-	}
-	if len(out) > 0 {
-		flags = strings.Fields(string(out))
-	}
-	return
-}
-
-// pkgConfigFlags calls pkg-config if needed and returns the cflags
-// needed to build the package.
-func pkgConfigFlags(p *build.Package) (cflags []string, err error) {
-	if len(p.CgoPkgConfig) == 0 {
-		return nil, nil
-	}
-	return pkgConfig("--cflags", p.CgoPkgConfig)
-}

+ 0 - 49
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go

@@ -1,49 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package packagesdriver fetches type sizes for go/packages and go/analysis.
-package packagesdriver
-
-import (
-	"context"
-	"fmt"
-	"go/types"
-	"strings"
-
-	"golang.org/x/tools/internal/gocommand"
-)
-
-var debug = false
-
-func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) {
-	inv.Verb = "list"
-	inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}
-	stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
-	var goarch, compiler string
-	if rawErr != nil {
-		if strings.Contains(rawErr.Error(), "cannot find main module") {
-			// User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc.
-			// TODO(matloob): Is this a problem in practice?
-			inv.Verb = "env"
-			inv.Args = []string{"GOARCH"}
-			envout, enverr := gocmdRunner.Run(ctx, inv)
-			if enverr != nil {
-				return nil, enverr
-			}
-			goarch = strings.TrimSpace(envout.String())
-			compiler = "gc"
-		} else {
-			return nil, friendlyErr
-		}
-	} else {
-		fields := strings.Fields(stdout.String())
-		if len(fields) < 2 {
-			return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>",
-				stdout.String(), stderr.String())
-		}
-		goarch = fields[0]
-		compiler = fields[1]
-	}
-	return types.SizesFor(compiler, goarch), nil
-}

+ 0 - 204
vendor/golang.org/x/tools/go/loader/doc.go

@@ -1,204 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package loader loads a complete Go program from source code, parsing
-// and type-checking the initial packages plus their transitive closure
-// of dependencies.  The ASTs and the derived facts are retained for
-// later use.
-//
-// Deprecated: This is an older API and does not have support
-// for modules. Use golang.org/x/tools/go/packages instead.
-//
-// The package defines two primary types: Config, which specifies a
-// set of initial packages to load and various other options; and
-// Program, which is the result of successfully loading the packages
-// specified by a configuration.
-//
-// The configuration can be set directly, but *Config provides various
-// convenience methods to simplify the common cases, each of which can
-// be called any number of times.  Finally, these are followed by a
-// call to Load() to actually load and type-check the program.
-//
-//      var conf loader.Config
-//
-//      // Use the command-line arguments to specify
-//      // a set of initial packages to load from source.
-//      // See FromArgsUsage for help.
-//      rest, err := conf.FromArgs(os.Args[1:], wantTests)
-//
-//      // Parse the specified files and create an ad hoc package with path "foo".
-//      // All files must have the same 'package' declaration.
-//      conf.CreateFromFilenames("foo", "foo.go", "bar.go")
-//
-//      // Create an ad hoc package with path "foo" from
-//      // the specified already-parsed files.
-//      // All ASTs must have the same 'package' declaration.
-//      conf.CreateFromFiles("foo", parsedFiles)
-//
-//      // Add "runtime" to the set of packages to be loaded.
-//      conf.Import("runtime")
-//
-//      // Adds "fmt" and "fmt_test" to the set of packages
-//      // to be loaded.  "fmt" will include *_test.go files.
-//      conf.ImportWithTests("fmt")
-//
-//      // Finally, load all the packages specified by the configuration.
-//      prog, err := conf.Load()
-//
-// See examples_test.go for examples of API usage.
-//
-//
-// CONCEPTS AND TERMINOLOGY
-//
-// The WORKSPACE is the set of packages accessible to the loader.  The
-// workspace is defined by Config.Build, a *build.Context.  The
-// default context treats subdirectories of $GOROOT and $GOPATH as
-// packages, but this behavior may be overridden.
-//
-// An AD HOC package is one specified as a set of source files on the
-// command line.  In the simplest case, it may consist of a single file
-// such as $GOROOT/src/net/http/triv.go.
-//
-// EXTERNAL TEST packages are those comprised of a set of *_test.go
-// files all with the same 'package foo_test' declaration, all in the
-// same directory.  (go/build.Package calls these files XTestFiles.)
-//
-// An IMPORTABLE package is one that can be referred to by some import
-// spec.  Every importable package is uniquely identified by its
-// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json",
-// or "cmd/vendor/golang.org/x/arch/x86/x86asm".  A package path
-// typically denotes a subdirectory of the workspace.
-//
-// An import declaration uses an IMPORT PATH to refer to a package.
-// Most import declarations use the package path as the import path.
-//
-// Due to VENDORING (https://golang.org/s/go15vendor), the
-// interpretation of an import path may depend on the directory in which
-// it appears.  To resolve an import path to a package path, go/build
-// must search the enclosing directories for a subdirectory named
-// "vendor".
-//
-// ad hoc packages and external test packages are NON-IMPORTABLE.  The
-// path of an ad hoc package is inferred from the package
-// declarations of its files and is therefore not a unique package key.
-// For example, Config.CreatePkgs may specify two initial ad hoc
-// packages, both with path "main".
-//
-// An AUGMENTED package is an importable package P plus all the
-// *_test.go files with same 'package foo' declaration as P.
-// (go/build.Package calls these files TestFiles.)
-//
-// The INITIAL packages are those specified in the configuration.  A
-// DEPENDENCY is a package loaded to satisfy an import in an initial
-// package or another dependency.
-//
-package loader
-
-// IMPLEMENTATION NOTES
-//
-// 'go test', in-package test files, and import cycles
-// ---------------------------------------------------
-//
-// An external test package may depend upon members of the augmented
-// package that are not in the unaugmented package, such as functions
-// that expose internals.  (See bufio/export_test.go for an example.)
-// So, the loader must ensure that for each external test package
-// it loads, it also augments the corresponding non-test package.
-//
-// The import graph over n unaugmented packages must be acyclic; the
-// import graph over n-1 unaugmented packages plus one augmented
-// package must also be acyclic.  ('go test' relies on this.)  But the
-// import graph over n augmented packages may contain cycles.
-//
-// First, all the (unaugmented) non-test packages and their
-// dependencies are imported in the usual way; the loader reports an
-// error if it detects an import cycle.
-//
-// Then, each package P for which testing is desired is augmented by
-// the list P' of its in-package test files, by calling
-// (*types.Checker).Files.  This arrangement ensures that P' may
-// reference definitions within P, but P may not reference definitions
-// within P'.  Furthermore, P' may import any other package, including
-// ones that depend upon P, without an import cycle error.
-//
-// Consider two packages A and B, both of which have lists of
-// in-package test files we'll call A' and B', and which have the
-// following import graph edges:
-//    B  imports A
-//    B' imports A
-//    A' imports B
-// This last edge would be expected to create an error were it not
-// for the special type-checking discipline above.
-// Cycles of size greater than two are possible.  For example:
-//   compress/bzip2/bzip2_test.go (package bzip2)  imports "io/ioutil"
-//   io/ioutil/tempfile_test.go   (package ioutil) imports "regexp"
-//   regexp/exec_test.go          (package regexp) imports "compress/bzip2"
-//
-//
-// Concurrency
-// -----------
-//
-// Let us define the import dependency graph as follows.  Each node is a
-// list of files passed to (Checker).Files at once.  Many of these lists
-// are the production code of an importable Go package, so those nodes
-// are labelled by the package's path.  The remaining nodes are
-// ad hoc packages and lists of in-package *_test.go files that augment
-// an importable package; those nodes have no label.
-//
-// The edges of the graph represent import statements appearing within a
-// file.  An edge connects a node (a list of files) to the node it
-// imports, which is importable and thus always labelled.
-//
-// Loading is controlled by this dependency graph.
-//
-// To reduce I/O latency, we start loading a package's dependencies
-// asynchronously as soon as we've parsed its files and enumerated its
-// imports (scanImports).  This performs a preorder traversal of the
-// import dependency graph.
-//
-// To exploit hardware parallelism, we type-check unrelated packages in
-// parallel, where "unrelated" means not ordered by the partial order of
-// the import dependency graph.
-//
-// We use a concurrency-safe non-blocking cache (importer.imported) to
-// record the results of type-checking, whether success or failure.  An
-// entry is created in this cache by startLoad the first time the
-// package is imported.  The first goroutine to request an entry becomes
-// responsible for completing the task and broadcasting completion to
-// subsequent requestors, which block until then.
-//
-// Type checking occurs in (parallel) postorder: we cannot type-check a
-// set of files until we have loaded and type-checked all of their
-// immediate dependencies (and thus all of their transitive
-// dependencies). If the input were guaranteed free of import cycles,
-// this would be trivial: we could simply wait for completion of the
-// dependencies and then invoke the typechecker.
-//
-// But as we saw in the 'go test' section above, some cycles in the
-// import graph over packages are actually legal, so long as the
-// cycle-forming edge originates in the in-package test files that
-// augment the package.  This explains why the nodes of the import
-// dependency graph are not packages, but lists of files: the unlabelled
-// nodes avoid the cycles.  Consider packages A and B where B imports A
-// and A's in-package tests AT import B.  The naively constructed import
-// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but
-// the graph over lists of files is AT --> B --> A, where AT is an
-// unlabelled node.
-//
-// Awaiting completion of the dependencies in a cyclic graph would
-// deadlock, so we must materialize the import dependency graph (as
-// importer.graph) and check whether each import edge forms a cycle.  If
-// x imports y, and the graph already contains a path from y to x, then
-// there is an import cycle, in which case the processing of x must not
-// wait for the completion of processing of y.
-//
-// When the type-checker makes a callback (doImport) to the loader for a
-// given import edge, there are two possible cases.  In the normal case,
-// the dependency has already been completely type-checked; doImport
-// does a cache lookup and returns it.  In the cyclic case, the entry in
-// the cache is still necessarily incomplete, indicating a cycle.  We
-// perform the cycle check again to obtain the error message, and return
-// the error.
-//
-// The result of using concurrency is about a 2.5x speedup for stdlib_test.

+ 0 - 1086
vendor/golang.org/x/tools/go/loader/loader.go

@@ -1,1086 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package loader
-
-// See doc.go for package documentation and implementation notes.
-
-import (
-	"errors"
-	"fmt"
-	"go/ast"
-	"go/build"
-	"go/parser"
-	"go/token"
-	"go/types"
-	"os"
-	"path/filepath"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-
-	"golang.org/x/tools/go/ast/astutil"
-	"golang.org/x/tools/go/internal/cgo"
-)
-
-var ignoreVendor build.ImportMode
-
-const trace = false // show timing info for type-checking
-
-// Config specifies the configuration for loading a whole program from
-// Go source code.
-// The zero value for Config is a ready-to-use default configuration.
-type Config struct {
-	// Fset is the file set for the parser to use when loading the
-	// program.  If nil, it may be lazily initialized by any
-	// method of Config.
-	Fset *token.FileSet
-
-	// ParserMode specifies the mode to be used by the parser when
-	// loading source packages.
-	ParserMode parser.Mode
-
-	// TypeChecker contains options relating to the type checker.
-	//
-	// The supplied IgnoreFuncBodies is not used; the effective
-	// value comes from the TypeCheckFuncBodies func below.
-	// The supplied Import function is not used either.
-	TypeChecker types.Config
-
-	// TypeCheckFuncBodies is a predicate over package paths.
-	// A package for which the predicate is false will
-	// have its package-level declarations type checked, but not
-	// its function bodies; this can be used to quickly load
-	// dependencies from source.  If nil, all func bodies are type
-	// checked.
-	TypeCheckFuncBodies func(path string) bool
-
-	// If Build is non-nil, it is used to locate source packages.
-	// Otherwise &build.Default is used.
-	//
-	// By default, cgo is invoked to preprocess Go files that
-	// import the fake package "C".  This behaviour can be
-	// disabled by setting CGO_ENABLED=0 in the environment prior
-	// to startup, or by setting Build.CgoEnabled=false.
-	Build *build.Context
-
-	// The current directory, used for resolving relative package
-	// references such as "./go/loader".  If empty, os.Getwd will be
-	// used instead.
-	Cwd string
-
-	// If DisplayPath is non-nil, it is used to transform each
-	// file name obtained from Build.Import().  This can be used
-	// to prevent a virtualized build.Config's file names from
-	// leaking into the user interface.
-	DisplayPath func(path string) string
-
-	// If AllowErrors is true, Load will return a Program even
-	// if some of the its packages contained I/O, parser or type
-	// errors; such errors are accessible via PackageInfo.Errors.  If
-	// false, Load will fail if any package had an error.
-	AllowErrors bool
-
-	// CreatePkgs specifies a list of non-importable initial
-	// packages to create.  The resulting packages will appear in
-	// the corresponding elements of the Program.Created slice.
-	CreatePkgs []PkgSpec
-
-	// ImportPkgs specifies a set of initial packages to load.
-	// The map keys are package paths.
-	//
-	// The map value indicates whether to load tests.  If true, Load
-	// will add and type-check two lists of files to the package:
-	// non-test files followed by in-package *_test.go files.  In
-	// addition, it will append the external test package (if any)
-	// to Program.Created.
-	ImportPkgs map[string]bool
-
-	// FindPackage is called during Load to create the build.Package
-	// for a given import path from a given directory.
-	// If FindPackage is nil, (*build.Context).Import is used.
-	// A client may use this hook to adapt to a proprietary build
-	// system that does not follow the "go build" layout
-	// conventions, for example.
-	//
-	// It must be safe to call concurrently from multiple goroutines.
-	FindPackage func(ctxt *build.Context, importPath, fromDir string, mode build.ImportMode) (*build.Package, error)
-
-	// AfterTypeCheck is called immediately after a list of files
-	// has been type-checked and appended to info.Files.
-	//
-	// This optional hook function is the earliest opportunity for
-	// the client to observe the output of the type checker,
-	// which may be useful to reduce analysis latency when loading
-	// a large program.
-	//
-	// The function is permitted to modify info.Info, for instance
-	// to clear data structures that are no longer needed, which can
-	// dramatically reduce peak memory consumption.
-	//
-	// The function may be called twice for the same PackageInfo:
-	// once for the files of the package and again for the
-	// in-package test files.
-	//
-	// It must be safe to call concurrently from multiple goroutines.
-	AfterTypeCheck func(info *PackageInfo, files []*ast.File)
-}
-
-// A PkgSpec specifies a non-importable package to be created by Load.
-// Files are processed first, but typically only one of Files and
-// Filenames is provided.  The path needn't be globally unique.
-//
-// For vendoring purposes, the package's directory is the one that
-// contains the first file.
-type PkgSpec struct {
-	Path      string      // package path ("" => use package declaration)
-	Files     []*ast.File // ASTs of already-parsed files
-	Filenames []string    // names of files to be parsed
-}
-
-// A Program is a Go program loaded from source as specified by a Config.
-type Program struct {
-	Fset *token.FileSet // the file set for this program
-
-	// Created[i] contains the initial package whose ASTs or
-	// filenames were supplied by Config.CreatePkgs[i], followed by
-	// the external test package, if any, of each package in
-	// Config.ImportPkgs ordered by ImportPath.
-	//
-	// NOTE: these files must not import "C".  Cgo preprocessing is
-	// only performed on imported packages, not ad hoc packages.
-	//
-	// TODO(adonovan): we need to copy and adapt the logic of
-	// goFilesPackage (from $GOROOT/src/cmd/go/build.go) and make
-	// Config.Import and Config.Create methods return the same kind
-	// of entity, essentially a build.Package.
-	// Perhaps we can even reuse that type directly.
-	Created []*PackageInfo
-
-	// Imported contains the initially imported packages,
-	// as specified by Config.ImportPkgs.
-	Imported map[string]*PackageInfo
-
-	// AllPackages contains the PackageInfo of every package
-	// encountered by Load: all initial packages and all
-	// dependencies, including incomplete ones.
-	AllPackages map[*types.Package]*PackageInfo
-
-	// importMap is the canonical mapping of package paths to
-	// packages.  It contains all Imported initial packages, but not
-	// Created ones, and all imported dependencies.
-	importMap map[string]*types.Package
-}
-
-// PackageInfo holds the ASTs and facts derived by the type-checker
-// for a single package.
-//
-// Not mutated once exposed via the API.
-//
-type PackageInfo struct {
-	Pkg                   *types.Package
-	Importable            bool        // true if 'import "Pkg.Path()"' would resolve to this
-	TransitivelyErrorFree bool        // true if Pkg and all its dependencies are free of errors
-	Files                 []*ast.File // syntax trees for the package's files
-	Errors                []error     // non-nil if the package had errors
-	types.Info                        // type-checker deductions.
-	dir                   string      // package directory
-
-	checker   *types.Checker // transient type-checker state
-	errorFunc func(error)
-}
-
-func (info *PackageInfo) String() string { return info.Pkg.Path() }
-
-func (info *PackageInfo) appendError(err error) {
-	if info.errorFunc != nil {
-		info.errorFunc(err)
-	} else {
-		fmt.Fprintln(os.Stderr, err)
-	}
-	info.Errors = append(info.Errors, err)
-}
-
-func (conf *Config) fset() *token.FileSet {
-	if conf.Fset == nil {
-		conf.Fset = token.NewFileSet()
-	}
-	return conf.Fset
-}
-
-// ParseFile is a convenience function (intended for testing) that invokes
-// the parser using the Config's FileSet, which is initialized if nil.
-//
-// src specifies the parser input as a string, []byte, or io.Reader, and
-// filename is its apparent name.  If src is nil, the contents of
-// filename are read from the file system.
-//
-func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) {
-	// TODO(adonovan): use conf.build() etc like parseFiles does.
-	return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode)
-}
-
-// FromArgsUsage is a partial usage message that applications calling
-// FromArgs may wish to include in their -help output.
-const FromArgsUsage = `
-<args> is a list of arguments denoting a set of initial packages.
-It may take one of two forms:
-
-1. A list of *.go source files.
-
-   All of the specified files are loaded, parsed and type-checked
-   as a single package.  All the files must belong to the same directory.
-
-2. A list of import paths, each denoting a package.
-
-   The package's directory is found relative to the $GOROOT and
-   $GOPATH using similar logic to 'go build', and the *.go files in
-   that directory are loaded, parsed and type-checked as a single
-   package.
-
-   In addition, all *_test.go files in the directory are then loaded
-   and parsed.  Those files whose package declaration equals that of
-   the non-*_test.go files are included in the primary package.  Test
-   files whose package declaration ends with "_test" are type-checked
-   as another package, the 'external' test package, so that a single
-   import path may denote two packages.  (Whether this behaviour is
-   enabled is tool-specific, and may depend on additional flags.)
-
-A '--' argument terminates the list of packages.
-`
-
-// FromArgs interprets args as a set of initial packages to load from
-// source and updates the configuration.  It returns the list of
-// unconsumed arguments.
-//
-// It is intended for use in command-line interfaces that require a
-// set of initial packages to be specified; see FromArgsUsage message
-// for details.
-//
-// Only superficial errors are reported at this stage; errors dependent
-// on I/O are detected during Load.
-//
-func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) {
-	var rest []string
-	for i, arg := range args {
-		if arg == "--" {
-			rest = args[i+1:]
-			args = args[:i]
-			break // consume "--" and return the remaining args
-		}
-	}
-
-	if len(args) > 0 && strings.HasSuffix(args[0], ".go") {
-		// Assume args is a list of a *.go files
-		// denoting a single ad hoc package.
-		for _, arg := range args {
-			if !strings.HasSuffix(arg, ".go") {
-				return nil, fmt.Errorf("named files must be .go files: %s", arg)
-			}
-		}
-		conf.CreateFromFilenames("", args...)
-	} else {
-		// Assume args are directories each denoting a
-		// package and (perhaps) an external test, iff xtest.
-		for _, arg := range args {
-			if xtest {
-				conf.ImportWithTests(arg)
-			} else {
-				conf.Import(arg)
-			}
-		}
-	}
-
-	return rest, nil
-}
-
-// CreateFromFilenames is a convenience function that adds
-// a conf.CreatePkgs entry to create a package of the specified *.go
-// files.
-//
-func (conf *Config) CreateFromFilenames(path string, filenames ...string) {
-	conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames})
-}
-
-// CreateFromFiles is a convenience function that adds a conf.CreatePkgs
-// entry to create package of the specified path and parsed files.
-//
-func (conf *Config) CreateFromFiles(path string, files ...*ast.File) {
-	conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files})
-}
-
-// ImportWithTests is a convenience function that adds path to
-// ImportPkgs, the set of initial source packages located relative to
-// $GOPATH.  The package will be augmented by any *_test.go files in
-// its directory that contain a "package x" (not "package x_test")
-// declaration.
-//
-// In addition, if any *_test.go files contain a "package x_test"
-// declaration, an additional package comprising just those files will
-// be added to CreatePkgs.
-//
-func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) }
-
-// Import is a convenience function that adds path to ImportPkgs, the
-// set of initial packages that will be imported from source.
-//
-func (conf *Config) Import(path string) { conf.addImport(path, false) }
-
-func (conf *Config) addImport(path string, tests bool) {
-	if path == "C" {
-		return // ignore; not a real package
-	}
-	if conf.ImportPkgs == nil {
-		conf.ImportPkgs = make(map[string]bool)
-	}
-	conf.ImportPkgs[path] = conf.ImportPkgs[path] || tests
-}
-
-// PathEnclosingInterval returns the PackageInfo and ast.Node that
-// contain source interval [start, end), and all the node's ancestors
-// up to the AST root.  It searches all ast.Files of all packages in prog.
-// exact is defined as for astutil.PathEnclosingInterval.
-//
-// The zero value is returned if not found.
-//
-func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) {
-	for _, info := range prog.AllPackages {
-		for _, f := range info.Files {
-			if f.Pos() == token.NoPos {
-				// This can happen if the parser saw
-				// too many errors and bailed out.
-				// (Use parser.AllErrors to prevent that.)
-				continue
-			}
-			if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) {
-				continue
-			}
-			if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil {
-				return info, path, exact
-			}
-		}
-	}
-	return nil, nil, false
-}
-
-// InitialPackages returns a new slice containing the set of initial
-// packages (Created + Imported) in unspecified order.
-//
-func (prog *Program) InitialPackages() []*PackageInfo {
-	infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported))
-	infos = append(infos, prog.Created...)
-	for _, info := range prog.Imported {
-		infos = append(infos, info)
-	}
-	return infos
-}
-
-// Package returns the ASTs and results of type checking for the
-// specified package.
-func (prog *Program) Package(path string) *PackageInfo {
-	if info, ok := prog.AllPackages[prog.importMap[path]]; ok {
-		return info
-	}
-	for _, info := range prog.Created {
-		if path == info.Pkg.Path() {
-			return info
-		}
-	}
-	return nil
-}
-
-// ---------- Implementation ----------
-
-// importer holds the working state of the algorithm.
-type importer struct {
-	conf  *Config   // the client configuration
-	start time.Time // for logging
-
-	progMu sync.Mutex // guards prog
-	prog   *Program   // the resulting program
-
-	// findpkg is a memoization of FindPackage.
-	findpkgMu sync.Mutex // guards findpkg
-	findpkg   map[findpkgKey]*findpkgValue
-
-	importedMu sync.Mutex             // guards imported
-	imported   map[string]*importInfo // all imported packages (incl. failures) by import path
-
-	// import dependency graph: graph[x][y] => x imports y
-	//
-	// Since non-importable packages cannot be cyclic, we ignore
-	// their imports, thus we only need the subgraph over importable
-	// packages.  Nodes are identified by their import paths.
-	graphMu sync.Mutex
-	graph   map[string]map[string]bool
-}
-
-type findpkgKey struct {
-	importPath string
-	fromDir    string
-	mode       build.ImportMode
-}
-
-type findpkgValue struct {
-	ready chan struct{} // closed to broadcast readiness
-	bp    *build.Package
-	err   error
-}
-
-// importInfo tracks the success or failure of a single import.
-//
-// Upon completion, exactly one of info and err is non-nil:
-// info on successful creation of a package, err otherwise.
-// A successful package may still contain type errors.
-//
-type importInfo struct {
-	path     string        // import path
-	info     *PackageInfo  // results of typechecking (including errors)
-	complete chan struct{} // closed to broadcast that info is set.
-}
-
-// awaitCompletion blocks until ii is complete,
-// i.e. the info field is safe to inspect.
-func (ii *importInfo) awaitCompletion() {
-	<-ii.complete // wait for close
-}
-
-// Complete marks ii as complete.
-// Its info and err fields will not be subsequently updated.
-func (ii *importInfo) Complete(info *PackageInfo) {
-	if info == nil {
-		panic("info == nil")
-	}
-	ii.info = info
-	close(ii.complete)
-}
-
-type importError struct {
-	path string // import path
-	err  error  // reason for failure to create a package
-}
-
-// Load creates the initial packages specified by conf.{Create,Import}Pkgs,
-// loading their dependencies packages as needed.
-//
-// On success, Load returns a Program containing a PackageInfo for
-// each package.  On failure, it returns an error.
-//
-// If AllowErrors is true, Load will return a Program even if some
-// packages contained I/O, parser or type errors, or if dependencies
-// were missing.  (Such errors are accessible via PackageInfo.Errors.  If
-// false, Load will fail if any package had an error.
-//
-// It is an error if no packages were loaded.
-//
-func (conf *Config) Load() (*Program, error) {
-	// Create a simple default error handler for parse/type errors.
-	if conf.TypeChecker.Error == nil {
-		conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) }
-	}
-
-	// Set default working directory for relative package references.
-	if conf.Cwd == "" {
-		var err error
-		conf.Cwd, err = os.Getwd()
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	// Install default FindPackage hook using go/build logic.
-	if conf.FindPackage == nil {
-		conf.FindPackage = (*build.Context).Import
-	}
-
-	prog := &Program{
-		Fset:        conf.fset(),
-		Imported:    make(map[string]*PackageInfo),
-		importMap:   make(map[string]*types.Package),
-		AllPackages: make(map[*types.Package]*PackageInfo),
-	}
-
-	imp := importer{
-		conf:     conf,
-		prog:     prog,
-		findpkg:  make(map[findpkgKey]*findpkgValue),
-		imported: make(map[string]*importInfo),
-		start:    time.Now(),
-		graph:    make(map[string]map[string]bool),
-	}
-
-	// -- loading proper (concurrent phase) --------------------------------
-
-	var errpkgs []string // packages that contained errors
-
-	// Load the initially imported packages and their dependencies,
-	// in parallel.
-	// No vendor check on packages imported from the command line.
-	infos, importErrors := imp.importAll("", conf.Cwd, conf.ImportPkgs, ignoreVendor)
-	for _, ie := range importErrors {
-		conf.TypeChecker.Error(ie.err) // failed to create package
-		errpkgs = append(errpkgs, ie.path)
-	}
-	for _, info := range infos {
-		prog.Imported[info.Pkg.Path()] = info
-	}
-
-	// Augment the designated initial packages by their tests.
-	// Dependencies are loaded in parallel.
-	var xtestPkgs []*build.Package
-	for importPath, augment := range conf.ImportPkgs {
-		if !augment {
-			continue
-		}
-
-		// No vendor check on packages imported from command line.
-		bp, err := imp.findPackage(importPath, conf.Cwd, ignoreVendor)
-		if err != nil {
-			// Package not found, or can't even parse package declaration.
-			// Already reported by previous loop; ignore it.
-			continue
-		}
-
-		// Needs external test package?
-		if len(bp.XTestGoFiles) > 0 {
-			xtestPkgs = append(xtestPkgs, bp)
-		}
-
-		// Consult the cache using the canonical package path.
-		path := bp.ImportPath
-		imp.importedMu.Lock() // (unnecessary, we're sequential here)
-		ii, ok := imp.imported[path]
-		// Paranoid checks added due to issue #11012.
-		if !ok {
-			// Unreachable.
-			// The previous loop called importAll and thus
-			// startLoad for each path in ImportPkgs, which
-			// populates imp.imported[path] with a non-zero value.
-			panic(fmt.Sprintf("imported[%q] not found", path))
-		}
-		if ii == nil {
-			// Unreachable.
-			// The ii values in this loop are the same as in
-			// the previous loop, which enforced the invariant
-			// that at least one of ii.err and ii.info is non-nil.
-			panic(fmt.Sprintf("imported[%q] == nil", path))
-		}
-		if ii.info == nil {
-			// Unreachable.
-			// awaitCompletion has the postcondition
-			// ii.info != nil.
-			panic(fmt.Sprintf("imported[%q].info = nil", path))
-		}
-		info := ii.info
-		imp.importedMu.Unlock()
-
-		// Parse the in-package test files.
-		files, errs := imp.conf.parsePackageFiles(bp, 't')
-		for _, err := range errs {
-			info.appendError(err)
-		}
-
-		// The test files augmenting package P cannot be imported,
-		// but may import packages that import P,
-		// so we must disable the cycle check.
-		imp.addFiles(info, files, false)
-	}
-
-	createPkg := func(path, dir string, files []*ast.File, errs []error) {
-		info := imp.newPackageInfo(path, dir)
-		for _, err := range errs {
-			info.appendError(err)
-		}
-
-		// Ad hoc packages are non-importable,
-		// so no cycle check is needed.
-		// addFiles loads dependencies in parallel.
-		imp.addFiles(info, files, false)
-		prog.Created = append(prog.Created, info)
-	}
-
-	// Create packages specified by conf.CreatePkgs.
-	for _, cp := range conf.CreatePkgs {
-		files, errs := parseFiles(conf.fset(), conf.build(), nil, conf.Cwd, cp.Filenames, conf.ParserMode)
-		files = append(files, cp.Files...)
-
-		path := cp.Path
-		if path == "" {
-			if len(files) > 0 {
-				path = files[0].Name.Name
-			} else {
-				path = "(unnamed)"
-			}
-		}
-
-		dir := conf.Cwd
-		if len(files) > 0 && files[0].Pos().IsValid() {
-			dir = filepath.Dir(conf.fset().File(files[0].Pos()).Name())
-		}
-		createPkg(path, dir, files, errs)
-	}
-
-	// Create external test packages.
-	sort.Sort(byImportPath(xtestPkgs))
-	for _, bp := range xtestPkgs {
-		files, errs := imp.conf.parsePackageFiles(bp, 'x')
-		createPkg(bp.ImportPath+"_test", bp.Dir, files, errs)
-	}
-
-	// -- finishing up (sequential) ----------------------------------------
-
-	if len(prog.Imported)+len(prog.Created) == 0 {
-		return nil, errors.New("no initial packages were loaded")
-	}
-
-	// Create infos for indirectly imported packages.
-	// e.g. incomplete packages without syntax, loaded from export data.
-	for _, obj := range prog.importMap {
-		info := prog.AllPackages[obj]
-		if info == nil {
-			prog.AllPackages[obj] = &PackageInfo{Pkg: obj, Importable: true}
-		} else {
-			// finished
-			info.checker = nil
-			info.errorFunc = nil
-		}
-	}
-
-	if !conf.AllowErrors {
-		// Report errors in indirectly imported packages.
-		for _, info := range prog.AllPackages {
-			if len(info.Errors) > 0 {
-				errpkgs = append(errpkgs, info.Pkg.Path())
-			}
-		}
-		if errpkgs != nil {
-			var more string
-			if len(errpkgs) > 3 {
-				more = fmt.Sprintf(" and %d more", len(errpkgs)-3)
-				errpkgs = errpkgs[:3]
-			}
-			return nil, fmt.Errorf("couldn't load packages due to errors: %s%s",
-				strings.Join(errpkgs, ", "), more)
-		}
-	}
-
-	markErrorFreePackages(prog.AllPackages)
-
-	return prog, nil
-}
-
-type byImportPath []*build.Package
-
-func (b byImportPath) Len() int           { return len(b) }
-func (b byImportPath) Less(i, j int) bool { return b[i].ImportPath < b[j].ImportPath }
-func (b byImportPath) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }
-
-// markErrorFreePackages sets the TransitivelyErrorFree flag on all
-// applicable packages.
-func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) {
-	// Build the transpose of the import graph.
-	importedBy := make(map[*types.Package]map[*types.Package]bool)
-	for P := range allPackages {
-		for _, Q := range P.Imports() {
-			clients, ok := importedBy[Q]
-			if !ok {
-				clients = make(map[*types.Package]bool)
-				importedBy[Q] = clients
-			}
-			clients[P] = true
-		}
-	}
-
-	// Find all packages reachable from some error package.
-	reachable := make(map[*types.Package]bool)
-	var visit func(*types.Package)
-	visit = func(p *types.Package) {
-		if !reachable[p] {
-			reachable[p] = true
-			for q := range importedBy[p] {
-				visit(q)
-			}
-		}
-	}
-	for _, info := range allPackages {
-		if len(info.Errors) > 0 {
-			visit(info.Pkg)
-		}
-	}
-
-	// Mark the others as "transitively error-free".
-	for _, info := range allPackages {
-		if !reachable[info.Pkg] {
-			info.TransitivelyErrorFree = true
-		}
-	}
-}
-
-// build returns the effective build context.
-func (conf *Config) build() *build.Context {
-	if conf.Build != nil {
-		return conf.Build
-	}
-	return &build.Default
-}
-
-// parsePackageFiles enumerates the files belonging to package path,
-// then loads, parses and returns them, plus a list of I/O or parse
-// errors that were encountered.
-//
-// 'which' indicates which files to include:
-//    'g': include non-test *.go source files (GoFiles + processed CgoFiles)
-//    't': include in-package *_test.go source files (TestGoFiles)
-//    'x': include external *_test.go source files. (XTestGoFiles)
-//
-func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) {
-	if bp.ImportPath == "unsafe" {
-		return nil, nil
-	}
-	var filenames []string
-	switch which {
-	case 'g':
-		filenames = bp.GoFiles
-	case 't':
-		filenames = bp.TestGoFiles
-	case 'x':
-		filenames = bp.XTestGoFiles
-	default:
-		panic(which)
-	}
-
-	files, errs := parseFiles(conf.fset(), conf.build(), conf.DisplayPath, bp.Dir, filenames, conf.ParserMode)
-
-	// Preprocess CgoFiles and parse the outputs (sequentially).
-	if which == 'g' && bp.CgoFiles != nil {
-		cgofiles, err := cgo.ProcessFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode)
-		if err != nil {
-			errs = append(errs, err)
-		} else {
-			files = append(files, cgofiles...)
-		}
-	}
-
-	return files, errs
-}
-
-// doImport imports the package denoted by path.
-// It implements the types.Importer signature.
-//
-// It returns an error if a package could not be created
-// (e.g. go/build or parse error), but type errors are reported via
-// the types.Config.Error callback (the first of which is also saved
-// in the package's PackageInfo).
-//
-// Idempotent.
-//
-func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) {
-	if to == "C" {
-		// This should be unreachable, but ad hoc packages are
-		// not currently subject to cgo preprocessing.
-		// See https://golang.org/issue/11627.
-		return nil, fmt.Errorf(`the loader doesn't cgo-process ad hoc packages like %q; see Go issue 11627`,
-			from.Pkg.Path())
-	}
-
-	bp, err := imp.findPackage(to, from.dir, 0)
-	if err != nil {
-		return nil, err
-	}
-
-	// The standard unsafe package is handled specially,
-	// and has no PackageInfo.
-	if bp.ImportPath == "unsafe" {
-		return types.Unsafe, nil
-	}
-
-	// Look for the package in the cache using its canonical path.
-	path := bp.ImportPath
-	imp.importedMu.Lock()
-	ii := imp.imported[path]
-	imp.importedMu.Unlock()
-	if ii == nil {
-		panic("internal error: unexpected import: " + path)
-	}
-	if ii.info != nil {
-		return ii.info.Pkg, nil
-	}
-
-	// Import of incomplete package: this indicates a cycle.
-	fromPath := from.Pkg.Path()
-	if cycle := imp.findPath(path, fromPath); cycle != nil {
-		// Normalize cycle: start from alphabetically largest node.
-		pos, start := -1, ""
-		for i, s := range cycle {
-			if pos < 0 || s > start {
-				pos, start = i, s
-			}
-		}
-		cycle = append(cycle, cycle[:pos]...)[pos:] // rotate cycle to start from largest
-		cycle = append(cycle, cycle[0])             // add start node to end to show cycliness
-		return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> "))
-	}
-
-	panic("internal error: import of incomplete (yet acyclic) package: " + fromPath)
-}
-
-// findPackage locates the package denoted by the importPath in the
-// specified directory.
-func (imp *importer) findPackage(importPath, fromDir string, mode build.ImportMode) (*build.Package, error) {
-	// We use a non-blocking duplicate-suppressing cache (gopl.io §9.7)
-	// to avoid holding the lock around FindPackage.
-	key := findpkgKey{importPath, fromDir, mode}
-	imp.findpkgMu.Lock()
-	v, ok := imp.findpkg[key]
-	if ok {
-		// cache hit
-		imp.findpkgMu.Unlock()
-
-		<-v.ready // wait for entry to become ready
-	} else {
-		// Cache miss: this goroutine becomes responsible for
-		// populating the map entry and broadcasting its readiness.
-		v = &findpkgValue{ready: make(chan struct{})}
-		imp.findpkg[key] = v
-		imp.findpkgMu.Unlock()
-
-		ioLimit <- true
-		v.bp, v.err = imp.conf.FindPackage(imp.conf.build(), importPath, fromDir, mode)
-		<-ioLimit
-
-		if _, ok := v.err.(*build.NoGoError); ok {
-			v.err = nil // empty directory is not an error
-		}
-
-		close(v.ready) // broadcast ready condition
-	}
-	return v.bp, v.err
-}
-
-// importAll loads, parses, and type-checks the specified packages in
-// parallel and returns their completed importInfos in unspecified order.
-//
-// fromPath is the package path of the importing package, if it is
-// importable, "" otherwise.  It is used for cycle detection.
-//
-// fromDir is the directory containing the import declaration that
-// caused these imports.
-//
-func (imp *importer) importAll(fromPath, fromDir string, imports map[string]bool, mode build.ImportMode) (infos []*PackageInfo, errors []importError) {
-	// TODO(adonovan): opt: do the loop in parallel once
-	// findPackage is non-blocking.
-	var pending []*importInfo
-	for importPath := range imports {
-		bp, err := imp.findPackage(importPath, fromDir, mode)
-		if err != nil {
-			errors = append(errors, importError{
-				path: importPath,
-				err:  err,
-			})
-			continue
-		}
-		pending = append(pending, imp.startLoad(bp))
-	}
-
-	if fromPath != "" {
-		// We're loading a set of imports.
-		//
-		// We must record graph edges from the importing package
-		// to its dependencies, and check for cycles.
-		imp.graphMu.Lock()
-		deps, ok := imp.graph[fromPath]
-		if !ok {
-			deps = make(map[string]bool)
-			imp.graph[fromPath] = deps
-		}
-		for _, ii := range pending {
-			deps[ii.path] = true
-		}
-		imp.graphMu.Unlock()
-	}
-
-	for _, ii := range pending {
-		if fromPath != "" {
-			if cycle := imp.findPath(ii.path, fromPath); cycle != nil {
-				// Cycle-forming import: we must not await its
-				// completion since it would deadlock.
-				//
-				// We don't record the error in ii since
-				// the error is really associated with the
-				// cycle-forming edge, not the package itself.
-				// (Also it would complicate the
-				// invariants of importPath completion.)
-				if trace {
-					fmt.Fprintf(os.Stderr, "import cycle: %q\n", cycle)
-				}
-				continue
-			}
-		}
-		ii.awaitCompletion()
-		infos = append(infos, ii.info)
-	}
-
-	return infos, errors
-}
-
-// findPath returns an arbitrary path from 'from' to 'to' in the import
-// graph, or nil if there was none.
-func (imp *importer) findPath(from, to string) []string {
-	imp.graphMu.Lock()
-	defer imp.graphMu.Unlock()
-
-	seen := make(map[string]bool)
-	var search func(stack []string, importPath string) []string
-	search = func(stack []string, importPath string) []string {
-		if !seen[importPath] {
-			seen[importPath] = true
-			stack = append(stack, importPath)
-			if importPath == to {
-				return stack
-			}
-			for x := range imp.graph[importPath] {
-				if p := search(stack, x); p != nil {
-					return p
-				}
-			}
-		}
-		return nil
-	}
-	return search(make([]string, 0, 20), from)
-}
-
-// startLoad initiates the loading, parsing and type-checking of the
-// specified package and its dependencies, if it has not already begun.
-//
-// It returns an importInfo, not necessarily in a completed state.  The
-// caller must call awaitCompletion() before accessing its info field.
-//
-// startLoad is concurrency-safe and idempotent.
-//
-func (imp *importer) startLoad(bp *build.Package) *importInfo {
-	path := bp.ImportPath
-	imp.importedMu.Lock()
-	ii, ok := imp.imported[path]
-	if !ok {
-		ii = &importInfo{path: path, complete: make(chan struct{})}
-		imp.imported[path] = ii
-		go func() {
-			info := imp.load(bp)
-			ii.Complete(info)
-		}()
-	}
-	imp.importedMu.Unlock()
-
-	return ii
-}
-
-// load implements package loading by parsing Go source files
-// located by go/build.
-func (imp *importer) load(bp *build.Package) *PackageInfo {
-	info := imp.newPackageInfo(bp.ImportPath, bp.Dir)
-	info.Importable = true
-	files, errs := imp.conf.parsePackageFiles(bp, 'g')
-	for _, err := range errs {
-		info.appendError(err)
-	}
-
-	imp.addFiles(info, files, true)
-
-	imp.progMu.Lock()
-	imp.prog.importMap[bp.ImportPath] = info.Pkg
-	imp.progMu.Unlock()
-
-	return info
-}
-
-// addFiles adds and type-checks the specified files to info, loading
-// their dependencies if needed.  The order of files determines the
-// package initialization order.  It may be called multiple times on the
-// same package.  Errors are appended to the info.Errors field.
-//
-// cycleCheck determines whether the imports within files create
-// dependency edges that should be checked for potential cycles.
-//
-func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) {
-	// Ensure the dependencies are loaded, in parallel.
-	var fromPath string
-	if cycleCheck {
-		fromPath = info.Pkg.Path()
-	}
-	// TODO(adonovan): opt: make the caller do scanImports.
-	// Callers with a build.Package can skip it.
-	imp.importAll(fromPath, info.dir, scanImports(files), 0)
-
-	if trace {
-		fmt.Fprintf(os.Stderr, "%s: start %q (%d)\n",
-			time.Since(imp.start), info.Pkg.Path(), len(files))
-	}
-
-	// Don't call checker.Files on Unsafe, even with zero files,
-	// because it would mutate the package, which is a global.
-	if info.Pkg == types.Unsafe {
-		if len(files) > 0 {
-			panic(`"unsafe" package contains unexpected files`)
-		}
-	} else {
-		// Ignore the returned (first) error since we
-		// already collect them all in the PackageInfo.
-		info.checker.Files(files)
-		info.Files = append(info.Files, files...)
-	}
-
-	if imp.conf.AfterTypeCheck != nil {
-		imp.conf.AfterTypeCheck(info, files)
-	}
-
-	if trace {
-		fmt.Fprintf(os.Stderr, "%s: stop %q\n",
-			time.Since(imp.start), info.Pkg.Path())
-	}
-}
-
-func (imp *importer) newPackageInfo(path, dir string) *PackageInfo {
-	var pkg *types.Package
-	if path == "unsafe" {
-		pkg = types.Unsafe
-	} else {
-		pkg = types.NewPackage(path, "")
-	}
-	info := &PackageInfo{
-		Pkg: pkg,
-		Info: types.Info{
-			Types:      make(map[ast.Expr]types.TypeAndValue),
-			Defs:       make(map[*ast.Ident]types.Object),
-			Uses:       make(map[*ast.Ident]types.Object),
-			Implicits:  make(map[ast.Node]types.Object),
-			Scopes:     make(map[ast.Node]*types.Scope),
-			Selections: make(map[*ast.SelectorExpr]*types.Selection),
-		},
-		errorFunc: imp.conf.TypeChecker.Error,
-		dir:       dir,
-	}
-
-	// Copy the types.Config so we can vary it across PackageInfos.
-	tc := imp.conf.TypeChecker
-	tc.IgnoreFuncBodies = false
-	if f := imp.conf.TypeCheckFuncBodies; f != nil {
-		tc.IgnoreFuncBodies = !f(path)
-	}
-	tc.Importer = closure{imp, info}
-	tc.Error = info.appendError // appendError wraps the user's Error function
-
-	info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info)
-	imp.progMu.Lock()
-	imp.prog.AllPackages[pkg] = info
-	imp.progMu.Unlock()
-	return info
-}
-
-type closure struct {
-	imp  *importer
-	info *PackageInfo
-}
-
-func (c closure) Import(to string) (*types.Package, error) { return c.imp.doImport(c.info, to) }

+ 0 - 124
vendor/golang.org/x/tools/go/loader/util.go

@@ -1,124 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package loader
-
-import (
-	"go/ast"
-	"go/build"
-	"go/parser"
-	"go/token"
-	"io"
-	"os"
-	"strconv"
-	"sync"
-
-	"golang.org/x/tools/go/buildutil"
-)
-
-// We use a counting semaphore to limit
-// the number of parallel I/O calls per process.
-var ioLimit = make(chan bool, 10)
-
-// parseFiles parses the Go source files within directory dir and
-// returns the ASTs of the ones that could be at least partially parsed,
-// along with a list of I/O and parse errors encountered.
-//
-// I/O is done via ctxt, which may specify a virtual file system.
-// displayPath is used to transform the filenames attached to the ASTs.
-//
-func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) {
-	if displayPath == nil {
-		displayPath = func(path string) string { return path }
-	}
-	var wg sync.WaitGroup
-	n := len(files)
-	parsed := make([]*ast.File, n)
-	errors := make([]error, n)
-	for i, file := range files {
-		if !buildutil.IsAbsPath(ctxt, file) {
-			file = buildutil.JoinPath(ctxt, dir, file)
-		}
-		wg.Add(1)
-		go func(i int, file string) {
-			ioLimit <- true // wait
-			defer func() {
-				wg.Done()
-				<-ioLimit // signal
-			}()
-			var rd io.ReadCloser
-			var err error
-			if ctxt.OpenFile != nil {
-				rd, err = ctxt.OpenFile(file)
-			} else {
-				rd, err = os.Open(file)
-			}
-			if err != nil {
-				errors[i] = err // open failed
-				return
-			}
-
-			// ParseFile may return both an AST and an error.
-			parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode)
-			rd.Close()
-		}(i, file)
-	}
-	wg.Wait()
-
-	// Eliminate nils, preserving order.
-	var o int
-	for _, f := range parsed {
-		if f != nil {
-			parsed[o] = f
-			o++
-		}
-	}
-	parsed = parsed[:o]
-
-	o = 0
-	for _, err := range errors {
-		if err != nil {
-			errors[o] = err
-			o++
-		}
-	}
-	errors = errors[:o]
-
-	return parsed, errors
-}
-
-// scanImports returns the set of all import paths from all
-// import specs in the specified files.
-func scanImports(files []*ast.File) map[string]bool {
-	imports := make(map[string]bool)
-	for _, f := range files {
-		for _, decl := range f.Decls {
-			if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT {
-				for _, spec := range decl.Specs {
-					spec := spec.(*ast.ImportSpec)
-
-					// NB: do not assume the program is well-formed!
-					path, err := strconv.Unquote(spec.Path.Value)
-					if err != nil {
-						continue // quietly ignore the error
-					}
-					if path == "C" {
-						continue // skip pseudopackage
-					}
-					imports[path] = true
-				}
-			}
-		}
-	}
-	return imports
-}
-
-// ---------- Internal helpers ----------
-
-// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
-func tokenFileContainsPos(f *token.File, pos token.Pos) bool {
-	p := int(pos)
-	base := f.Base()
-	return base <= p && p < base+f.Size()
-}

+ 0 - 221
vendor/golang.org/x/tools/go/packages/doc.go

@@ -1,221 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package packages loads Go packages for inspection and analysis.
-
-The Load function takes as input a list of patterns and return a list of Package
-structs describing individual packages matched by those patterns.
-The LoadMode controls the amount of detail in the loaded packages.
-
-Load passes most patterns directly to the underlying build tool,
-but all patterns with the prefix "query=", where query is a
-non-empty string of letters from [a-z], are reserved and may be
-interpreted as query operators.
-
-Two query operators are currently supported: "file" and "pattern".
-
-The query "file=path/to/file.go" matches the package or packages enclosing
-the Go source file path/to/file.go.  For example "file=~/go/src/fmt/print.go"
-might return the packages "fmt" and "fmt [fmt.test]".
-
-The query "pattern=string" causes "string" to be passed directly to
-the underlying build tool. In most cases this is unnecessary,
-but an application can use Load("pattern=" + x) as an escaping mechanism
-to ensure that x is not interpreted as a query operator if it contains '='.
-
-All other query operators are reserved for future use and currently
-cause Load to report an error.
-
-The Package struct provides basic information about the package, including
-
-  - ID, a unique identifier for the package in the returned set;
-  - GoFiles, the names of the package's Go source files;
-  - Imports, a map from source import strings to the Packages they name;
-  - Types, the type information for the package's exported symbols;
-  - Syntax, the parsed syntax trees for the package's source code; and
-  - TypeInfo, the result of a complete type-check of the package syntax trees.
-
-(See the documentation for type Package for the complete list of fields
-and more detailed descriptions.)
-
-For example,
-
-	Load(nil, "bytes", "unicode...")
-
-returns four Package structs describing the standard library packages
-bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
-can match multiple packages and that a package might be matched by
-multiple patterns: in general it is not possible to determine which
-packages correspond to which patterns.
-
-Note that the list returned by Load contains only the packages matched
-by the patterns. Their dependencies can be found by walking the import
-graph using the Imports fields.
-
-The Load function can be configured by passing a pointer to a Config as
-the first argument. A nil Config is equivalent to the zero Config, which
-causes Load to run in LoadFiles mode, collecting minimal information.
-See the documentation for type Config for details.
-
-As noted earlier, the Config.Mode controls the amount of detail
-reported about the loaded packages. See the documentation for type LoadMode
-for details.
-
-Most tools should pass their command-line arguments (after any flags)
-uninterpreted to the loader, so that the loader can interpret them
-according to the conventions of the underlying build system.
-See the Example function for typical usage.
-
-*/
-package packages // import "golang.org/x/tools/go/packages"
-
-/*
-
-Motivation and design considerations
-
-The new package's design solves problems addressed by two existing
-packages: go/build, which locates and describes packages, and
-golang.org/x/tools/go/loader, which loads, parses and type-checks them.
-The go/build.Package structure encodes too much of the 'go build' way
-of organizing projects, leaving us in need of a data type that describes a
-package of Go source code independent of the underlying build system.
-We wanted something that works equally well with go build and vgo, and
-also other build systems such as Bazel and Blaze, making it possible to
-construct analysis tools that work in all these environments.
-Tools such as errcheck and staticcheck were essentially unavailable to
-the Go community at Google, and some of Google's internal tools for Go
-are unavailable externally.
-This new package provides a uniform way to obtain package metadata by
-querying each of these build systems, optionally supporting their
-preferred command-line notations for packages, so that tools integrate
-neatly with users' build environments. The Metadata query function
-executes an external query tool appropriate to the current workspace.
-
-Loading packages always returns the complete import graph "all the way down",
-even if all you want is information about a single package, because the query
-mechanisms of all the build systems we currently support ({go,vgo} list, and
-blaze/bazel aspect-based query) cannot provide detailed information
-about one package without visiting all its dependencies too, so there is
-no additional asymptotic cost to providing transitive information.
-(This property might not be true of a hypothetical 5th build system.)
-
-In calls to TypeCheck, all initial packages, and any package that
-transitively depends on one of them, must be loaded from source.
-Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
-source; D may be loaded from export data, and E may not be loaded at all
-(though it's possible that D's export data mentions it, so a
-types.Package may be created for it and exposed.)
-
-The old loader had a feature to suppress type-checking of function
-bodies on a per-package basis, primarily intended to reduce the work of
-obtaining type information for imported packages. Now that imports are
-satisfied by export data, the optimization no longer seems necessary.
-
-Despite some early attempts, the old loader did not exploit export data,
-instead always using the equivalent of WholeProgram mode. This was due
-to the complexity of mixing source and export data packages (now
-resolved by the upward traversal mentioned above), and because export data
-files were nearly always missing or stale. Now that 'go build' supports
-caching, all the underlying build systems can guarantee to produce
-export data in a reasonable (amortized) time.
-
-Test "main" packages synthesized by the build system are now reported as
-first-class packages, avoiding the need for clients (such as go/ssa) to
-reinvent this generation logic.
-
-One way in which go/packages is simpler than the old loader is in its
-treatment of in-package tests. In-package tests are packages that
-consist of all the files of the library under test, plus the test files.
-The old loader constructed in-package tests by a two-phase process of
-mutation called "augmentation": first it would construct and type check
-all the ordinary library packages and type-check the packages that
-depend on them; then it would add more (test) files to the package and
-type-check again. This two-phase approach had four major problems:
-1) in processing the tests, the loader modified the library package,
-   leaving no way for a client application to see both the test
-   package and the library package; one would mutate into the other.
-2) because test files can declare additional methods on types defined in
-   the library portion of the package, the dispatch of method calls in
-   the library portion was affected by the presence of the test files.
-   This should have been a clue that the packages were logically
-   different.
-3) this model of "augmentation" assumed at most one in-package test
-   per library package, which is true of projects using 'go build',
-   but not other build systems.
-4) because of the two-phase nature of test processing, all packages that
-   import the library package had to be processed before augmentation,
-   forcing a "one-shot" API and preventing the client from calling Load
-   in several times in sequence as is now possible in WholeProgram mode.
-   (TypeCheck mode has a similar one-shot restriction for a different reason.)
-
-Early drafts of this package supported "multi-shot" operation.
-Although it allowed clients to make a sequence of calls (or concurrent
-calls) to Load, building up the graph of Packages incrementally,
-it was of marginal value: it complicated the API
-(since it allowed some options to vary across calls but not others),
-it complicated the implementation,
-it cannot be made to work in Types mode, as explained above,
-and it was less efficient than making one combined call (when this is possible).
-Among the clients we have inspected, none made multiple calls to load
-but could not be easily and satisfactorily modified to make only a single call.
-However, applications changes may be required.
-For example, the ssadump command loads the user-specified packages
-and in addition the runtime package.  It is tempting to simply append
-"runtime" to the user-provided list, but that does not work if the user
-specified an ad-hoc package such as [a.go b.go].
-Instead, ssadump no longer requests the runtime package,
-but seeks it among the dependencies of the user-specified packages,
-and emits an error if it is not found.
-
-Overlays: The Overlay field in the Config allows providing alternate contents
-for Go source files, by providing a mapping from file path to contents.
-go/packages will pull in new imports added in overlay files when go/packages
-is run in LoadImports mode or greater.
-Overlay support for the go list driver isn't complete yet: if the file doesn't
-exist on disk, it will only be recognized in an overlay if it is a non-test file
-and the package would be reported even without the overlay.
-
-Questions & Tasks
-
-- Add GOARCH/GOOS?
-  They are not portable concepts, but could be made portable.
-  Our goal has been to allow users to express themselves using the conventions
-  of the underlying build system: if the build system honors GOARCH
-  during a build and during a metadata query, then so should
-  applications built atop that query mechanism.
-  Conversely, if the target architecture of the build is determined by
-  command-line flags, the application can pass the relevant
-  flags through to the build system using a command such as:
-    myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
-  However, this approach is low-level, unwieldy, and non-portable.
-  GOOS and GOARCH seem important enough to warrant a dedicated option.
-
-- How should we handle partial failures such as a mixture of good and
-  malformed patterns, existing and non-existent packages, successful and
-  failed builds, import failures, import cycles, and so on, in a call to
-  Load?
-
-- Support bazel, blaze, and go1.10 list, not just go1.11 list.
-
-- Handle (and test) various partial success cases, e.g.
-  a mixture of good packages and:
-  invalid patterns
-  nonexistent packages
-  empty packages
-  packages with malformed package or import declarations
-  unreadable files
-  import cycles
-  other parse errors
-  type errors
-  Make sure we record errors at the correct place in the graph.
-
-- Missing packages among initial arguments are not reported.
-  Return bogus packages for them, like golist does.
-
-- "undeclared name" errors (for example) are reported out of source file
-  order. I suspect this is due to the breadth-first resolution now used
-  by go/types. Is that a bug? Discuss with gri.
-
-*/

+ 0 - 101
vendor/golang.org/x/tools/go/packages/external.go

@@ -1,101 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file enables an external tool to intercept package requests.
-// If the tool is present then its results are used in preference to
-// the go list command.
-
-package packages
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	exec "golang.org/x/sys/execabs"
-	"os"
-	"strings"
-)
-
-// The Driver Protocol
-//
-// The driver, given the inputs to a call to Load, returns metadata about the packages specified.
-// This allows for different build systems to support go/packages by telling go/packages how the
-// packages' source is organized.
-// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in
-// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package
-// documentation in doc.go for the full description of the patterns that need to be supported.
-// A driver receives as a JSON-serialized driverRequest struct in standard input and will
-// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output.
-
-// driverRequest is used to provide the portion of Load's Config that is needed by a driver.
-type driverRequest struct {
-	Mode LoadMode `json:"mode"`
-	// Env specifies the environment the underlying build system should be run in.
-	Env []string `json:"env"`
-	// BuildFlags are flags that should be passed to the underlying build system.
-	BuildFlags []string `json:"build_flags"`
-	// Tests specifies whether the patterns should also return test packages.
-	Tests bool `json:"tests"`
-	// Overlay maps file paths (relative to the driver's working directory) to the byte contents
-	// of overlay files.
-	Overlay map[string][]byte `json:"overlay"`
-}
-
-// findExternalDriver returns the file path of a tool that supplies
-// the build system package structure, or "" if not found."
-// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
-// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
-func findExternalDriver(cfg *Config) driver {
-	const toolPrefix = "GOPACKAGESDRIVER="
-	tool := ""
-	for _, env := range cfg.Env {
-		if val := strings.TrimPrefix(env, toolPrefix); val != env {
-			tool = val
-		}
-	}
-	if tool != "" && tool == "off" {
-		return nil
-	}
-	if tool == "" {
-		var err error
-		tool, err = exec.LookPath("gopackagesdriver")
-		if err != nil {
-			return nil
-		}
-	}
-	return func(cfg *Config, words ...string) (*driverResponse, error) {
-		req, err := json.Marshal(driverRequest{
-			Mode:       cfg.Mode,
-			Env:        cfg.Env,
-			BuildFlags: cfg.BuildFlags,
-			Tests:      cfg.Tests,
-			Overlay:    cfg.Overlay,
-		})
-		if err != nil {
-			return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
-		}
-
-		buf := new(bytes.Buffer)
-		stderr := new(bytes.Buffer)
-		cmd := exec.CommandContext(cfg.Context, tool, words...)
-		cmd.Dir = cfg.Dir
-		cmd.Env = cfg.Env
-		cmd.Stdin = bytes.NewReader(req)
-		cmd.Stdout = buf
-		cmd.Stderr = stderr
-
-		if err := cmd.Run(); err != nil {
-			return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
-		}
-		if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
-			fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr)
-		}
-
-		var response driverResponse
-		if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
-			return nil, err
-		}
-		return &response, nil
-	}
-}

+ 0 - 1096
vendor/golang.org/x/tools/go/packages/golist.go

@@ -1,1096 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packages
-
-import (
-	"bytes"
-	"context"
-	"encoding/json"
-	"fmt"
-	"go/types"
-	exec "golang.org/x/sys/execabs"
-	"io/ioutil"
-	"log"
-	"os"
-	"path"
-	"path/filepath"
-	"reflect"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-	"unicode"
-
-	"golang.org/x/tools/go/internal/packagesdriver"
-	"golang.org/x/tools/internal/gocommand"
-	"golang.org/x/xerrors"
-)
-
-// debug controls verbose logging.
-var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG"))
-
-// A goTooOldError reports that the go command
-// found by exec.LookPath is too old to use the new go list behavior.
-type goTooOldError struct {
-	error
-}
-
-// responseDeduper wraps a driverResponse, deduplicating its contents.
-type responseDeduper struct {
-	seenRoots    map[string]bool
-	seenPackages map[string]*Package
-	dr           *driverResponse
-}
-
-func newDeduper() *responseDeduper {
-	return &responseDeduper{
-		dr:           &driverResponse{},
-		seenRoots:    map[string]bool{},
-		seenPackages: map[string]*Package{},
-	}
-}
-
-// addAll fills in r with a driverResponse.
-func (r *responseDeduper) addAll(dr *driverResponse) {
-	for _, pkg := range dr.Packages {
-		r.addPackage(pkg)
-	}
-	for _, root := range dr.Roots {
-		r.addRoot(root)
-	}
-}
-
-func (r *responseDeduper) addPackage(p *Package) {
-	if r.seenPackages[p.ID] != nil {
-		return
-	}
-	r.seenPackages[p.ID] = p
-	r.dr.Packages = append(r.dr.Packages, p)
-}
-
-func (r *responseDeduper) addRoot(id string) {
-	if r.seenRoots[id] {
-		return
-	}
-	r.seenRoots[id] = true
-	r.dr.Roots = append(r.dr.Roots, id)
-}
-
-type golistState struct {
-	cfg *Config
-	ctx context.Context
-
-	envOnce    sync.Once
-	goEnvError error
-	goEnv      map[string]string
-
-	rootsOnce     sync.Once
-	rootDirsError error
-	rootDirs      map[string]string
-
-	goVersionOnce  sync.Once
-	goVersionError error
-	goVersion      int // The X in Go 1.X.
-
-	// vendorDirs caches the (non)existence of vendor directories.
-	vendorDirs map[string]bool
-}
-
-// getEnv returns Go environment variables. Only specific variables are
-// populated -- computing all of them is slow.
-func (state *golistState) getEnv() (map[string]string, error) {
-	state.envOnce.Do(func() {
-		var b *bytes.Buffer
-		b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH")
-		if state.goEnvError != nil {
-			return
-		}
-
-		state.goEnv = make(map[string]string)
-		decoder := json.NewDecoder(b)
-		if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil {
-			return
-		}
-	})
-	return state.goEnv, state.goEnvError
-}
-
-// mustGetEnv is a convenience function that can be used if getEnv has already succeeded.
-func (state *golistState) mustGetEnv() map[string]string {
-	env, err := state.getEnv()
-	if err != nil {
-		panic(fmt.Sprintf("mustGetEnv: %v", err))
-	}
-	return env
-}
-
-// goListDriver uses the go list command to interpret the patterns and produce
-// the build system package structure.
-// See driver for more details.
-func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
-	// Make sure that any asynchronous go commands are killed when we return.
-	parentCtx := cfg.Context
-	if parentCtx == nil {
-		parentCtx = context.Background()
-	}
-	ctx, cancel := context.WithCancel(parentCtx)
-	defer cancel()
-
-	response := newDeduper()
-
-	state := &golistState{
-		cfg:        cfg,
-		ctx:        ctx,
-		vendorDirs: map[string]bool{},
-	}
-
-	// Fill in response.Sizes asynchronously if necessary.
-	var sizeserr error
-	var sizeswg sync.WaitGroup
-	if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
-		sizeswg.Add(1)
-		go func() {
-			var sizes types.Sizes
-			sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner)
-			// types.SizesFor always returns nil or a *types.StdSizes.
-			response.dr.Sizes, _ = sizes.(*types.StdSizes)
-			sizeswg.Done()
-		}()
-	}
-
-	// Determine files requested in contains patterns
-	var containFiles []string
-	restPatterns := make([]string, 0, len(patterns))
-	// Extract file= and other [querytype]= patterns. Report an error if querytype
-	// doesn't exist.
-extractQueries:
-	for _, pattern := range patterns {
-		eqidx := strings.Index(pattern, "=")
-		if eqidx < 0 {
-			restPatterns = append(restPatterns, pattern)
-		} else {
-			query, value := pattern[:eqidx], pattern[eqidx+len("="):]
-			switch query {
-			case "file":
-				containFiles = append(containFiles, value)
-			case "pattern":
-				restPatterns = append(restPatterns, value)
-			case "": // not a reserved query
-				restPatterns = append(restPatterns, pattern)
-			default:
-				for _, rune := range query {
-					if rune < 'a' || rune > 'z' { // not a reserved query
-						restPatterns = append(restPatterns, pattern)
-						continue extractQueries
-					}
-				}
-				// Reject all other patterns containing "="
-				return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
-			}
-		}
-	}
-
-	// See if we have any patterns to pass through to go list. Zero initial
-	// patterns also requires a go list call, since it's the equivalent of
-	// ".".
-	if len(restPatterns) > 0 || len(patterns) == 0 {
-		dr, err := state.createDriverResponse(restPatterns...)
-		if err != nil {
-			return nil, err
-		}
-		response.addAll(dr)
-	}
-
-	if len(containFiles) != 0 {
-		if err := state.runContainsQueries(response, containFiles); err != nil {
-			return nil, err
-		}
-	}
-
-	// Only use go/packages' overlay processing if we're using a Go version
-	// below 1.16. Otherwise, go list handles it.
-	if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 {
-		modifiedPkgs, needPkgs, err := state.processGolistOverlay(response)
-		if err != nil {
-			return nil, err
-		}
-
-		var containsCandidates []string
-		if len(containFiles) > 0 {
-			containsCandidates = append(containsCandidates, modifiedPkgs...)
-			containsCandidates = append(containsCandidates, needPkgs...)
-		}
-		if err := state.addNeededOverlayPackages(response, needPkgs); err != nil {
-			return nil, err
-		}
-		// Check candidate packages for containFiles.
-		if len(containFiles) > 0 {
-			for _, id := range containsCandidates {
-				pkg, ok := response.seenPackages[id]
-				if !ok {
-					response.addPackage(&Package{
-						ID: id,
-						Errors: []Error{{
-							Kind: ListError,
-							Msg:  fmt.Sprintf("package %s expected but not seen", id),
-						}},
-					})
-					continue
-				}
-				for _, f := range containFiles {
-					for _, g := range pkg.GoFiles {
-						if sameFile(f, g) {
-							response.addRoot(id)
-						}
-					}
-				}
-			}
-		}
-		// Add root for any package that matches a pattern. This applies only to
-		// packages that are modified by overlays, since they are not added as
-		// roots automatically.
-		for _, pattern := range restPatterns {
-			match := matchPattern(pattern)
-			for _, pkgID := range modifiedPkgs {
-				pkg, ok := response.seenPackages[pkgID]
-				if !ok {
-					continue
-				}
-				if match(pkg.PkgPath) {
-					response.addRoot(pkg.ID)
-				}
-			}
-		}
-	}
-
-	sizeswg.Wait()
-	if sizeserr != nil {
-		return nil, sizeserr
-	}
-	return response.dr, nil
-}
-
-func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error {
-	if len(pkgs) == 0 {
-		return nil
-	}
-	dr, err := state.createDriverResponse(pkgs...)
-	if err != nil {
-		return err
-	}
-	for _, pkg := range dr.Packages {
-		response.addPackage(pkg)
-	}
-	_, needPkgs, err := state.processGolistOverlay(response)
-	if err != nil {
-		return err
-	}
-	return state.addNeededOverlayPackages(response, needPkgs)
-}
-
-func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error {
-	for _, query := range queries {
-		// TODO(matloob): Do only one query per directory.
-		fdir := filepath.Dir(query)
-		// Pass absolute path of directory to go list so that it knows to treat it as a directory,
-		// not a package path.
-		pattern, err := filepath.Abs(fdir)
-		if err != nil {
-			return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
-		}
-		dirResponse, err := state.createDriverResponse(pattern)
-
-		// If there was an error loading the package, or the package is returned
-		// with errors, try to load the file as an ad-hoc package.
-		// Usually the error will appear in a returned package, but may not if we're
-		// in module mode and the ad-hoc is located outside a module.
-		if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 &&
-			len(dirResponse.Packages[0].Errors) == 1 {
-			var queryErr error
-			if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil {
-				return err // return the original error
-			}
-		}
-		isRoot := make(map[string]bool, len(dirResponse.Roots))
-		for _, root := range dirResponse.Roots {
-			isRoot[root] = true
-		}
-		for _, pkg := range dirResponse.Packages {
-			// Add any new packages to the main set
-			// We don't bother to filter packages that will be dropped by the changes of roots,
-			// that will happen anyway during graph construction outside this function.
-			// Over-reporting packages is not a problem.
-			response.addPackage(pkg)
-			// if the package was not a root one, it cannot have the file
-			if !isRoot[pkg.ID] {
-				continue
-			}
-			for _, pkgFile := range pkg.GoFiles {
-				if filepath.Base(query) == filepath.Base(pkgFile) {
-					response.addRoot(pkg.ID)
-					break
-				}
-			}
-		}
-	}
-	return nil
-}
-
-// adhocPackage attempts to load or construct an ad-hoc package for a given
-// query, if the original call to the driver produced inadequate results.
-func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) {
-	response, err := state.createDriverResponse(query)
-	if err != nil {
-		return nil, err
-	}
-	// If we get nothing back from `go list`,
-	// try to make this file into its own ad-hoc package.
-	// TODO(rstambler): Should this check against the original response?
-	if len(response.Packages) == 0 {
-		response.Packages = append(response.Packages, &Package{
-			ID:              "command-line-arguments",
-			PkgPath:         query,
-			GoFiles:         []string{query},
-			CompiledGoFiles: []string{query},
-			Imports:         make(map[string]*Package),
-		})
-		response.Roots = append(response.Roots, "command-line-arguments")
-	}
-	// Handle special cases.
-	if len(response.Packages) == 1 {
-		// golang/go#33482: If this is a file= query for ad-hoc packages where
-		// the file only exists on an overlay, and exists outside of a module,
-		// add the file to the package and remove the errors.
-		if response.Packages[0].ID == "command-line-arguments" ||
-			filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) {
-			if len(response.Packages[0].GoFiles) == 0 {
-				filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
-				// TODO(matloob): check if the file is outside of a root dir?
-				for path := range state.cfg.Overlay {
-					if path == filename {
-						response.Packages[0].Errors = nil
-						response.Packages[0].GoFiles = []string{path}
-						response.Packages[0].CompiledGoFiles = []string{path}
-					}
-				}
-			}
-		}
-	}
-	return response, nil
-}
-
-// Fields must match go list;
-// see $GOROOT/src/cmd/go/internal/load/pkg.go.
-type jsonPackage struct {
-	ImportPath        string
-	Dir               string
-	Name              string
-	Export            string
-	GoFiles           []string
-	CompiledGoFiles   []string
-	IgnoredGoFiles    []string
-	IgnoredOtherFiles []string
-	CFiles            []string
-	CgoFiles          []string
-	CXXFiles          []string
-	MFiles            []string
-	HFiles            []string
-	FFiles            []string
-	SFiles            []string
-	SwigFiles         []string
-	SwigCXXFiles      []string
-	SysoFiles         []string
-	Imports           []string
-	ImportMap         map[string]string
-	Deps              []string
-	Module            *Module
-	TestGoFiles       []string
-	TestImports       []string
-	XTestGoFiles      []string
-	XTestImports      []string
-	ForTest           string // q in a "p [q.test]" package, else ""
-	DepOnly           bool
-
-	Error *jsonPackageError
-}
-
-type jsonPackageError struct {
-	ImportStack []string
-	Pos         string
-	Err         string
-}
-
-func otherFiles(p *jsonPackage) [][]string {
-	return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
-}
-
-// createDriverResponse uses the "go list" command to expand the pattern
-// words and return a response for the specified packages.
-func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) {
-	// go list uses the following identifiers in ImportPath and Imports:
-	//
-	// 	"p"			-- importable package or main (command)
-	// 	"q.test"		-- q's test executable
-	// 	"p [q.test]"		-- variant of p as built for q's test executable
-	// 	"q_test [q.test]"	-- q's external test package
-	//
-	// The packages p that are built differently for a test q.test
-	// are q itself, plus any helpers used by the external test q_test,
-	// typically including "testing" and all its dependencies.
-
-	// Run "go list" for complete
-	// information on the specified packages.
-	buf, err := state.invokeGo("list", golistargs(state.cfg, words)...)
-	if err != nil {
-		return nil, err
-	}
-	seen := make(map[string]*jsonPackage)
-	pkgs := make(map[string]*Package)
-	additionalErrors := make(map[string][]Error)
-	// Decode the JSON and convert it to Package form.
-	var response driverResponse
-	for dec := json.NewDecoder(buf); dec.More(); {
-		p := new(jsonPackage)
-		if err := dec.Decode(p); err != nil {
-			return nil, fmt.Errorf("JSON decoding failed: %v", err)
-		}
-
-		if p.ImportPath == "" {
-			// The documentation for go list says that “[e]rroneous packages will have
-			// a non-empty ImportPath”. If for some reason it comes back empty, we
-			// prefer to error out rather than silently discarding data or handing
-			// back a package without any way to refer to it.
-			if p.Error != nil {
-				return nil, Error{
-					Pos: p.Error.Pos,
-					Msg: p.Error.Err,
-				}
-			}
-			return nil, fmt.Errorf("package missing import path: %+v", p)
-		}
-
-		// Work around https://golang.org/issue/33157:
-		// go list -e, when given an absolute path, will find the package contained at
-		// that directory. But when no package exists there, it will return a fake package
-		// with an error and the ImportPath set to the absolute path provided to go list.
-		// Try to convert that absolute path to what its package path would be if it's
-		// contained in a known module or GOPATH entry. This will allow the package to be
-		// properly "reclaimed" when overlays are processed.
-		if filepath.IsAbs(p.ImportPath) && p.Error != nil {
-			pkgPath, ok, err := state.getPkgPath(p.ImportPath)
-			if err != nil {
-				return nil, err
-			}
-			if ok {
-				p.ImportPath = pkgPath
-			}
-		}
-
-		if old, found := seen[p.ImportPath]; found {
-			// If one version of the package has an error, and the other doesn't, assume
-			// that this is a case where go list is reporting a fake dependency variant
-			// of the imported package: When a package tries to invalidly import another
-			// package, go list emits a variant of the imported package (with the same
-			// import path, but with an error on it, and the package will have a
-			// DepError set on it). An example of when this can happen is for imports of
-			// main packages: main packages can not be imported, but they may be
-			// separately matched and listed by another pattern.
-			// See golang.org/issue/36188 for more details.
-
-			// The plan is that eventually, hopefully in Go 1.15, the error will be
-			// reported on the importing package rather than the duplicate "fake"
-			// version of the imported package. Once all supported versions of Go
-			// have the new behavior this logic can be deleted.
-			// TODO(matloob): delete the workaround logic once all supported versions of
-			// Go return the errors on the proper package.
-
-			// There should be exactly one version of a package that doesn't have an
-			// error.
-			if old.Error == nil && p.Error == nil {
-				if !reflect.DeepEqual(p, old) {
-					return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
-				}
-				continue
-			}
-
-			// Determine if this package's error needs to be bubbled up.
-			// This is a hack, and we expect for go list to eventually set the error
-			// on the package.
-			if old.Error != nil {
-				var errkind string
-				if strings.Contains(old.Error.Err, "not an importable package") {
-					errkind = "not an importable package"
-				} else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") {
-					errkind = "use of internal package not allowed"
-				}
-				if errkind != "" {
-					if len(old.Error.ImportStack) < 1 {
-						return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind)
-					}
-					importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1]
-					if importingPkg == old.ImportPath {
-						// Using an older version of Go which put this package itself on top of import
-						// stack, instead of the importer. Look for importer in second from top
-						// position.
-						if len(old.Error.ImportStack) < 2 {
-							return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind)
-						}
-						importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2]
-					}
-					additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{
-						Pos:  old.Error.Pos,
-						Msg:  old.Error.Err,
-						Kind: ListError,
-					})
-				}
-			}
-
-			// Make sure that if there's a version of the package without an error,
-			// that's the one reported to the user.
-			if old.Error == nil {
-				continue
-			}
-
-			// This package will replace the old one at the end of the loop.
-		}
-		seen[p.ImportPath] = p
-
-		pkg := &Package{
-			Name:            p.Name,
-			ID:              p.ImportPath,
-			GoFiles:         absJoin(p.Dir, p.GoFiles, p.CgoFiles),
-			CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
-			OtherFiles:      absJoin(p.Dir, otherFiles(p)...),
-			IgnoredFiles:    absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles),
-			forTest:         p.ForTest,
-			Module:          p.Module,
-		}
-
-		if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 {
-			if len(p.CompiledGoFiles) > len(p.GoFiles) {
-				// We need the cgo definitions, which are in the first
-				// CompiledGoFile after the non-cgo ones. This is a hack but there
-				// isn't currently a better way to find it. We also need the pure
-				// Go files and unprocessed cgo files, all of which are already
-				// in pkg.GoFiles.
-				cgoTypes := p.CompiledGoFiles[len(p.GoFiles)]
-				pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...)
-			} else {
-				// golang/go#38990: go list silently fails to do cgo processing
-				pkg.CompiledGoFiles = nil
-				pkg.Errors = append(pkg.Errors, Error{
-					Msg:  "go list failed to return CompiledGoFiles; https://golang.org/issue/38990?",
-					Kind: ListError,
-				})
-			}
-		}
-
-		// Work around https://golang.org/issue/28749:
-		// cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
-		// Filter out any elements of CompiledGoFiles that are also in OtherFiles.
-		// We have to keep this workaround in place until go1.12 is a distant memory.
-		if len(pkg.OtherFiles) > 0 {
-			other := make(map[string]bool, len(pkg.OtherFiles))
-			for _, f := range pkg.OtherFiles {
-				other[f] = true
-			}
-
-			out := pkg.CompiledGoFiles[:0]
-			for _, f := range pkg.CompiledGoFiles {
-				if other[f] {
-					continue
-				}
-				out = append(out, f)
-			}
-			pkg.CompiledGoFiles = out
-		}
-
-		// Extract the PkgPath from the package's ID.
-		if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
-			pkg.PkgPath = pkg.ID[:i]
-		} else {
-			pkg.PkgPath = pkg.ID
-		}
-
-		if pkg.PkgPath == "unsafe" {
-			pkg.GoFiles = nil // ignore fake unsafe.go file
-		}
-
-		// Assume go list emits only absolute paths for Dir.
-		if p.Dir != "" && !filepath.IsAbs(p.Dir) {
-			log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
-		}
-
-		if p.Export != "" && !filepath.IsAbs(p.Export) {
-			pkg.ExportFile = filepath.Join(p.Dir, p.Export)
-		} else {
-			pkg.ExportFile = p.Export
-		}
-
-		// imports
-		//
-		// Imports contains the IDs of all imported packages.
-		// ImportsMap records (path, ID) only where they differ.
-		ids := make(map[string]bool)
-		for _, id := range p.Imports {
-			ids[id] = true
-		}
-		pkg.Imports = make(map[string]*Package)
-		for path, id := range p.ImportMap {
-			pkg.Imports[path] = &Package{ID: id} // non-identity import
-			delete(ids, id)
-		}
-		for id := range ids {
-			if id == "C" {
-				continue
-			}
-
-			pkg.Imports[id] = &Package{ID: id} // identity import
-		}
-		if !p.DepOnly {
-			response.Roots = append(response.Roots, pkg.ID)
-		}
-
-		// Work around for pre-go.1.11 versions of go list.
-		// TODO(matloob): they should be handled by the fallback.
-		// Can we delete this?
-		if len(pkg.CompiledGoFiles) == 0 {
-			pkg.CompiledGoFiles = pkg.GoFiles
-		}
-
-		// Temporary work-around for golang/go#39986. Parse filenames out of
-		// error messages. This happens if there are unrecoverable syntax
-		// errors in the source, so we can't match on a specific error message.
-		if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) {
-			addFilenameFromPos := func(pos string) bool {
-				split := strings.Split(pos, ":")
-				if len(split) < 1 {
-					return false
-				}
-				filename := strings.TrimSpace(split[0])
-				if filename == "" {
-					return false
-				}
-				if !filepath.IsAbs(filename) {
-					filename = filepath.Join(state.cfg.Dir, filename)
-				}
-				info, _ := os.Stat(filename)
-				if info == nil {
-					return false
-				}
-				pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename)
-				pkg.GoFiles = append(pkg.GoFiles, filename)
-				return true
-			}
-			found := addFilenameFromPos(err.Pos)
-			// In some cases, go list only reports the error position in the
-			// error text, not the error position. One such case is when the
-			// file's package name is a keyword (see golang.org/issue/39763).
-			if !found {
-				addFilenameFromPos(err.Err)
-			}
-		}
-
-		if p.Error != nil {
-			msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363.
-			// Address golang.org/issue/35964 by appending import stack to error message.
-			if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 {
-				msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack)
-			}
-			pkg.Errors = append(pkg.Errors, Error{
-				Pos:  p.Error.Pos,
-				Msg:  msg,
-				Kind: ListError,
-			})
-		}
-
-		pkgs[pkg.ID] = pkg
-	}
-
-	for id, errs := range additionalErrors {
-		if p, ok := pkgs[id]; ok {
-			p.Errors = append(p.Errors, errs...)
-		}
-	}
-	for _, pkg := range pkgs {
-		response.Packages = append(response.Packages, pkg)
-	}
-	sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID })
-
-	return &response, nil
-}
-
-func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
-	if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 {
-		return false
-	}
-
-	goV, err := state.getGoVersion()
-	if err != nil {
-		return false
-	}
-
-	// On Go 1.14 and earlier, only add filenames from errors if the import stack is empty.
-	// The import stack behaves differently for these versions than newer Go versions.
-	if goV < 15 {
-		return len(p.Error.ImportStack) == 0
-	}
-
-	// On Go 1.15 and later, only parse filenames out of error if there's no import stack,
-	// or the current package is at the top of the import stack. This is not guaranteed
-	// to work perfectly, but should avoid some cases where files in errors don't belong to this
-	// package.
-	return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath
-}
-
-func (state *golistState) getGoVersion() (int, error) {
-	state.goVersionOnce.Do(func() {
-		state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner)
-	})
-	return state.goVersion, state.goVersionError
-}
-
-// getPkgPath finds the package path of a directory if it's relative to a root
-// directory.
-func (state *golistState) getPkgPath(dir string) (string, bool, error) {
-	absDir, err := filepath.Abs(dir)
-	if err != nil {
-		return "", false, err
-	}
-	roots, err := state.determineRootDirs()
-	if err != nil {
-		return "", false, err
-	}
-
-	for rdir, rpath := range roots {
-		// Make sure that the directory is in the module,
-		// to avoid creating a path relative to another module.
-		if !strings.HasPrefix(absDir, rdir) {
-			continue
-		}
-		// TODO(matloob): This doesn't properly handle symlinks.
-		r, err := filepath.Rel(rdir, dir)
-		if err != nil {
-			continue
-		}
-		if rpath != "" {
-			// We choose only one root even though the directory even it can belong in multiple modules
-			// or GOPATH entries. This is okay because we only need to work with absolute dirs when a
-			// file is missing from disk, for instance when gopls calls go/packages in an overlay.
-			// Once the file is saved, gopls, or the next invocation of the tool will get the correct
-			// result straight from golist.
-			// TODO(matloob): Implement module tiebreaking?
-			return path.Join(rpath, filepath.ToSlash(r)), true, nil
-		}
-		return filepath.ToSlash(r), true, nil
-	}
-	return "", false, nil
-}
-
-// absJoin absolutizes and flattens the lists of files.
-func absJoin(dir string, fileses ...[]string) (res []string) {
-	for _, files := range fileses {
-		for _, file := range files {
-			if !filepath.IsAbs(file) {
-				file = filepath.Join(dir, file)
-			}
-			res = append(res, file)
-		}
-	}
-	return res
-}
-
-func golistargs(cfg *Config, words []string) []string {
-	const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
-	fullargs := []string{
-		"-e", "-json",
-		fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0),
-		fmt.Sprintf("-test=%t", cfg.Tests),
-		fmt.Sprintf("-export=%t", usesExportData(cfg)),
-		fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0),
-		// go list doesn't let you pass -test and -find together,
-		// probably because you'd just get the TestMain.
-		fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0),
-	}
-	fullargs = append(fullargs, cfg.BuildFlags...)
-	fullargs = append(fullargs, "--")
-	fullargs = append(fullargs, words...)
-	return fullargs
-}
-
-// cfgInvocation returns an Invocation that reflects cfg's settings.
-func (state *golistState) cfgInvocation() gocommand.Invocation {
-	cfg := state.cfg
-	return gocommand.Invocation{
-		BuildFlags: cfg.BuildFlags,
-		ModFile:    cfg.modFile,
-		ModFlag:    cfg.modFlag,
-		CleanEnv:   cfg.Env != nil,
-		Env:        cfg.Env,
-		Logf:       cfg.Logf,
-		WorkingDir: cfg.Dir,
-	}
-}
-
-// invokeGo returns the stdout of a go command invocation.
-func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) {
-	cfg := state.cfg
-
-	inv := state.cfgInvocation()
-
-	// For Go versions 1.16 and above, `go list` accepts overlays directly via
-	// the -overlay flag. Set it, if it's available.
-	//
-	// The check for "list" is not necessarily required, but we should avoid
-	// getting the go version if possible.
-	if verb == "list" {
-		goVersion, err := state.getGoVersion()
-		if err != nil {
-			return nil, err
-		}
-		if goVersion >= 16 {
-			filename, cleanup, err := state.writeOverlays()
-			if err != nil {
-				return nil, err
-			}
-			defer cleanup()
-			inv.Overlay = filename
-		}
-	}
-	inv.Verb = verb
-	inv.Args = args
-	gocmdRunner := cfg.gocmdRunner
-	if gocmdRunner == nil {
-		gocmdRunner = &gocommand.Runner{}
-	}
-	stdout, stderr, _, err := gocmdRunner.RunRaw(cfg.Context, inv)
-	if err != nil {
-		// Check for 'go' executable not being found.
-		if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
-			return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound)
-		}
-
-		exitErr, ok := err.(*exec.ExitError)
-		if !ok {
-			// Catastrophic error:
-			// - context cancellation
-			return nil, xerrors.Errorf("couldn't run 'go': %w", err)
-		}
-
-		// Old go version?
-		if strings.Contains(stderr.String(), "flag provided but not defined") {
-			return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
-		}
-
-		// Related to #24854
-		if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") {
-			return nil, fmt.Errorf("%s", stderr.String())
-		}
-
-		// Is there an error running the C compiler in cgo? This will be reported in the "Error" field
-		// and should be suppressed by go list -e.
-		//
-		// This condition is not perfect yet because the error message can include other error messages than runtime/cgo.
-		isPkgPathRune := func(r rune) bool {
-			// From https://golang.org/ref/spec#Import_declarations:
-			//    Implementation restriction: A compiler may restrict ImportPaths to non-empty strings
-			//    using only characters belonging to Unicode's L, M, N, P, and S general categories
-			//    (the Graphic characters without spaces) and may also exclude the
-			//    characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD.
-			return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) &&
-				!strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r)
-		}
-		// golang/go#36770: Handle case where cmd/go prints module download messages before the error.
-		msg := stderr.String()
-		for strings.HasPrefix(msg, "go: downloading") {
-			msg = msg[strings.IndexRune(msg, '\n')+1:]
-		}
-		if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") {
-			msg := msg[len("# "):]
-			if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") {
-				return stdout, nil
-			}
-			// Treat pkg-config errors as a special case (golang.org/issue/36770).
-			if strings.HasPrefix(msg, "pkg-config") {
-				return stdout, nil
-			}
-		}
-
-		// This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show
-		// the error in the Err section of stdout in case -e option is provided.
-		// This fix is provided for backwards compatibility.
-		if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") {
-			output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
-				strings.Trim(stderr.String(), "\n"))
-			return bytes.NewBufferString(output), nil
-		}
-
-		// Similar to the previous error, but currently lacks a fix in Go.
-		if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") {
-			output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
-				strings.Trim(stderr.String(), "\n"))
-			return bytes.NewBufferString(output), nil
-		}
-
-		// Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath.
-		// If the package doesn't exist, put the absolute path of the directory into the error message,
-		// as Go 1.13 list does.
-		const noSuchDirectory = "no such directory"
-		if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) {
-			errstr := stderr.String()
-			abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):])
-			output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
-				abspath, strings.Trim(stderr.String(), "\n"))
-			return bytes.NewBufferString(output), nil
-		}
-
-		// Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist.
-		// Note that the error message we look for in this case is different that the one looked for above.
-		if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") {
-			output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
-				strings.Trim(stderr.String(), "\n"))
-			return bytes.NewBufferString(output), nil
-		}
-
-		// Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a
-		// directory outside any module.
-		if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") {
-			output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
-				// TODO(matloob): command-line-arguments isn't correct here.
-				"command-line-arguments", strings.Trim(stderr.String(), "\n"))
-			return bytes.NewBufferString(output), nil
-		}
-
-		// Another variation of the previous error
-		if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") {
-			output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
-				// TODO(matloob): command-line-arguments isn't correct here.
-				"command-line-arguments", strings.Trim(stderr.String(), "\n"))
-			return bytes.NewBufferString(output), nil
-		}
-
-		// Workaround for an instance of golang.org/issue/26755: go list -e  will return a non-zero exit
-		// status if there's a dependency on a package that doesn't exist. But it should return
-		// a zero exit status and set an error on that package.
-		if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") {
-			// Don't clobber stdout if `go list` actually returned something.
-			if len(stdout.String()) > 0 {
-				return stdout, nil
-			}
-			// try to extract package name from string
-			stderrStr := stderr.String()
-			var importPath string
-			colon := strings.Index(stderrStr, ":")
-			if colon > 0 && strings.HasPrefix(stderrStr, "go build ") {
-				importPath = stderrStr[len("go build "):colon]
-			}
-			output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
-				importPath, strings.Trim(stderrStr, "\n"))
-			return bytes.NewBufferString(output), nil
-		}
-
-		// Export mode entails a build.
-		// If that build fails, errors appear on stderr
-		// (despite the -e flag) and the Export field is blank.
-		// Do not fail in that case.
-		// The same is true if an ad-hoc package given to go list doesn't exist.
-		// TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when
-		// packages don't exist or a build fails.
-		if !usesExportData(cfg) && !containsGoFile(args) {
-			return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
-		}
-	}
-	return stdout, nil
-}
-
-// OverlayJSON is the format overlay files are expected to be in.
-// The Replace map maps from overlaid paths to replacement paths:
-// the Go command will forward all reads trying to open
-// each overlaid path to its replacement path, or consider the overlaid
-// path not to exist if the replacement path is empty.
-//
-// From golang/go#39958.
-type OverlayJSON struct {
-	Replace map[string]string `json:"replace,omitempty"`
-}
-
-// writeOverlays writes out files for go list's -overlay flag, as described
-// above.
-func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) {
-	// Do nothing if there are no overlays in the config.
-	if len(state.cfg.Overlay) == 0 {
-		return "", func() {}, nil
-	}
-	dir, err := ioutil.TempDir("", "gopackages-*")
-	if err != nil {
-		return "", nil, err
-	}
-	// The caller must clean up this directory, unless this function returns an
-	// error.
-	cleanup = func() {
-		os.RemoveAll(dir)
-	}
-	defer func() {
-		if err != nil {
-			cleanup()
-		}
-	}()
-	overlays := map[string]string{}
-	for k, v := range state.cfg.Overlay {
-		// Create a unique filename for the overlaid files, to avoid
-		// creating nested directories.
-		noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "")
-		f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator))
-		if err != nil {
-			return "", func() {}, err
-		}
-		if _, err := f.Write(v); err != nil {
-			return "", func() {}, err
-		}
-		if err := f.Close(); err != nil {
-			return "", func() {}, err
-		}
-		overlays[k] = f.Name()
-	}
-	b, err := json.Marshal(OverlayJSON{Replace: overlays})
-	if err != nil {
-		return "", func() {}, err
-	}
-	// Write out the overlay file that contains the filepath mappings.
-	filename = filepath.Join(dir, "overlay.json")
-	if err := ioutil.WriteFile(filename, b, 0665); err != nil {
-		return "", func() {}, err
-	}
-	return filename, cleanup, nil
-}
-
-func containsGoFile(s []string) bool {
-	for _, f := range s {
-		if strings.HasSuffix(f, ".go") {
-			return true
-		}
-	}
-	return false
-}
-
-func cmdDebugStr(cmd *exec.Cmd) string {
-	env := make(map[string]string)
-	for _, kv := range cmd.Env {
-		split := strings.SplitN(kv, "=", 2)
-		k, v := split[0], split[1]
-		env[k] = v
-	}
-
-	var args []string
-	for _, arg := range cmd.Args {
-		quoted := strconv.Quote(arg)
-		if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") {
-			args = append(args, quoted)
-		} else {
-			args = append(args, arg)
-		}
-	}
-	return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
-}

+ 0 - 575
vendor/golang.org/x/tools/go/packages/golist_overlay.go

@@ -1,575 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packages
-
-import (
-	"encoding/json"
-	"fmt"
-	"go/parser"
-	"go/token"
-	"os"
-	"path/filepath"
-	"regexp"
-	"sort"
-	"strconv"
-	"strings"
-
-	"golang.org/x/tools/internal/gocommand"
-)
-
-// processGolistOverlay provides rudimentary support for adding
-// files that don't exist on disk to an overlay. The results can be
-// sometimes incorrect.
-// TODO(matloob): Handle unsupported cases, including the following:
-// - determining the correct package to add given a new import path
-func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) {
-	havePkgs := make(map[string]string) // importPath -> non-test package ID
-	needPkgsSet := make(map[string]bool)
-	modifiedPkgsSet := make(map[string]bool)
-
-	pkgOfDir := make(map[string][]*Package)
-	for _, pkg := range response.dr.Packages {
-		// This is an approximation of import path to id. This can be
-		// wrong for tests, vendored packages, and a number of other cases.
-		havePkgs[pkg.PkgPath] = pkg.ID
-		dir, err := commonDir(pkg.GoFiles)
-		if err != nil {
-			return nil, nil, err
-		}
-		if dir != "" {
-			pkgOfDir[dir] = append(pkgOfDir[dir], pkg)
-		}
-	}
-
-	// If no new imports are added, it is safe to avoid loading any needPkgs.
-	// Otherwise, it's hard to tell which package is actually being loaded
-	// (due to vendoring) and whether any modified package will show up
-	// in the transitive set of dependencies (because new imports are added,
-	// potentially modifying the transitive set of dependencies).
-	var overlayAddsImports bool
-
-	// If both a package and its test package are created by the overlay, we
-	// need the real package first. Process all non-test files before test
-	// files, and make the whole process deterministic while we're at it.
-	var overlayFiles []string
-	for opath := range state.cfg.Overlay {
-		overlayFiles = append(overlayFiles, opath)
-	}
-	sort.Slice(overlayFiles, func(i, j int) bool {
-		iTest := strings.HasSuffix(overlayFiles[i], "_test.go")
-		jTest := strings.HasSuffix(overlayFiles[j], "_test.go")
-		if iTest != jTest {
-			return !iTest // non-tests are before tests.
-		}
-		return overlayFiles[i] < overlayFiles[j]
-	})
-	for _, opath := range overlayFiles {
-		contents := state.cfg.Overlay[opath]
-		base := filepath.Base(opath)
-		dir := filepath.Dir(opath)
-		var pkg *Package           // if opath belongs to both a package and its test variant, this will be the test variant
-		var testVariantOf *Package // if opath is a test file, this is the package it is testing
-		var fileExists bool
-		isTestFile := strings.HasSuffix(opath, "_test.go")
-		pkgName, ok := extractPackageName(opath, contents)
-		if !ok {
-			// Don't bother adding a file that doesn't even have a parsable package statement
-			// to the overlay.
-			continue
-		}
-		// If all the overlay files belong to a different package, change the
-		// package name to that package.
-		maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir])
-	nextPackage:
-		for _, p := range response.dr.Packages {
-			if pkgName != p.Name && p.ID != "command-line-arguments" {
-				continue
-			}
-			for _, f := range p.GoFiles {
-				if !sameFile(filepath.Dir(f), dir) {
-					continue
-				}
-				// Make sure to capture information on the package's test variant, if needed.
-				if isTestFile && !hasTestFiles(p) {
-					// TODO(matloob): Are there packages other than the 'production' variant
-					// of a package that this can match? This shouldn't match the test main package
-					// because the file is generated in another directory.
-					testVariantOf = p
-					continue nextPackage
-				} else if !isTestFile && hasTestFiles(p) {
-					// We're examining a test variant, but the overlaid file is
-					// a non-test file. Because the overlay implementation
-					// (currently) only adds a file to one package, skip this
-					// package, so that we can add the file to the production
-					// variant of the package. (https://golang.org/issue/36857
-					// tracks handling overlays on both the production and test
-					// variant of a package).
-					continue nextPackage
-				}
-				if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath {
-					// We have already seen the production version of the
-					// for which p is a test variant.
-					if hasTestFiles(p) {
-						testVariantOf = pkg
-					}
-				}
-				pkg = p
-				if filepath.Base(f) == base {
-					fileExists = true
-				}
-			}
-		}
-		// The overlay could have included an entirely new package or an
-		// ad-hoc package. An ad-hoc package is one that we have manually
-		// constructed from inadequate `go list` results for a file= query.
-		// It will have the ID command-line-arguments.
-		if pkg == nil || pkg.ID == "command-line-arguments" {
-			// Try to find the module or gopath dir the file is contained in.
-			// Then for modules, add the module opath to the beginning.
-			pkgPath, ok, err := state.getPkgPath(dir)
-			if err != nil {
-				return nil, nil, err
-			}
-			if !ok {
-				break
-			}
-			var forTest string // only set for x tests
-			isXTest := strings.HasSuffix(pkgName, "_test")
-			if isXTest {
-				forTest = pkgPath
-				pkgPath += "_test"
-			}
-			id := pkgPath
-			if isTestFile {
-				if isXTest {
-					id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest)
-				} else {
-					id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath)
-				}
-			}
-			if pkg != nil {
-				// TODO(rstambler): We should change the package's path and ID
-				// here. The only issue is that this messes with the roots.
-			} else {
-				// Try to reclaim a package with the same ID, if it exists in the response.
-				for _, p := range response.dr.Packages {
-					if reclaimPackage(p, id, opath, contents) {
-						pkg = p
-						break
-					}
-				}
-				// Otherwise, create a new package.
-				if pkg == nil {
-					pkg = &Package{
-						PkgPath: pkgPath,
-						ID:      id,
-						Name:    pkgName,
-						Imports: make(map[string]*Package),
-					}
-					response.addPackage(pkg)
-					havePkgs[pkg.PkgPath] = id
-					// Add the production package's sources for a test variant.
-					if isTestFile && !isXTest && testVariantOf != nil {
-						pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...)
-						pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...)
-						// Add the package under test and its imports to the test variant.
-						pkg.forTest = testVariantOf.PkgPath
-						for k, v := range testVariantOf.Imports {
-							pkg.Imports[k] = &Package{ID: v.ID}
-						}
-					}
-					if isXTest {
-						pkg.forTest = forTest
-					}
-				}
-			}
-		}
-		if !fileExists {
-			pkg.GoFiles = append(pkg.GoFiles, opath)
-			// TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior
-			// if the file will be ignored due to its build tags.
-			pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath)
-			modifiedPkgsSet[pkg.ID] = true
-		}
-		imports, err := extractImports(opath, contents)
-		if err != nil {
-			// Let the parser or type checker report errors later.
-			continue
-		}
-		for _, imp := range imports {
-			// TODO(rstambler): If the package is an x test and the import has
-			// a test variant, make sure to replace it.
-			if _, found := pkg.Imports[imp]; found {
-				continue
-			}
-			overlayAddsImports = true
-			id, ok := havePkgs[imp]
-			if !ok {
-				var err error
-				id, err = state.resolveImport(dir, imp)
-				if err != nil {
-					return nil, nil, err
-				}
-			}
-			pkg.Imports[imp] = &Package{ID: id}
-			// Add dependencies to the non-test variant version of this package as well.
-			if testVariantOf != nil {
-				testVariantOf.Imports[imp] = &Package{ID: id}
-			}
-		}
-	}
-
-	// toPkgPath guesses the package path given the id.
-	toPkgPath := func(sourceDir, id string) (string, error) {
-		if i := strings.IndexByte(id, ' '); i >= 0 {
-			return state.resolveImport(sourceDir, id[:i])
-		}
-		return state.resolveImport(sourceDir, id)
-	}
-
-	// Now that new packages have been created, do another pass to determine
-	// the new set of missing packages.
-	for _, pkg := range response.dr.Packages {
-		for _, imp := range pkg.Imports {
-			if len(pkg.GoFiles) == 0 {
-				return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath)
-			}
-			pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID)
-			if err != nil {
-				return nil, nil, err
-			}
-			if _, ok := havePkgs[pkgPath]; !ok {
-				needPkgsSet[pkgPath] = true
-			}
-		}
-	}
-
-	if overlayAddsImports {
-		needPkgs = make([]string, 0, len(needPkgsSet))
-		for pkg := range needPkgsSet {
-			needPkgs = append(needPkgs, pkg)
-		}
-	}
-	modifiedPkgs = make([]string, 0, len(modifiedPkgsSet))
-	for pkg := range modifiedPkgsSet {
-		modifiedPkgs = append(modifiedPkgs, pkg)
-	}
-	return modifiedPkgs, needPkgs, err
-}
-
-// resolveImport finds the ID of a package given its import path.
-// In particular, it will find the right vendored copy when in GOPATH mode.
-func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) {
-	env, err := state.getEnv()
-	if err != nil {
-		return "", err
-	}
-	if env["GOMOD"] != "" {
-		return importPath, nil
-	}
-
-	searchDir := sourceDir
-	for {
-		vendorDir := filepath.Join(searchDir, "vendor")
-		exists, ok := state.vendorDirs[vendorDir]
-		if !ok {
-			info, err := os.Stat(vendorDir)
-			exists = err == nil && info.IsDir()
-			state.vendorDirs[vendorDir] = exists
-		}
-
-		if exists {
-			vendoredPath := filepath.Join(vendorDir, importPath)
-			if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() {
-				// We should probably check for .go files here, but shame on anyone who fools us.
-				path, ok, err := state.getPkgPath(vendoredPath)
-				if err != nil {
-					return "", err
-				}
-				if ok {
-					return path, nil
-				}
-			}
-		}
-
-		// We know we've hit the top of the filesystem when we Dir / and get /,
-		// or C:\ and get C:\, etc.
-		next := filepath.Dir(searchDir)
-		if next == searchDir {
-			break
-		}
-		searchDir = next
-	}
-	return importPath, nil
-}
-
-func hasTestFiles(p *Package) bool {
-	for _, f := range p.GoFiles {
-		if strings.HasSuffix(f, "_test.go") {
-			return true
-		}
-	}
-	return false
-}
-
-// determineRootDirs returns a mapping from absolute directories that could
-// contain code to their corresponding import path prefixes.
-func (state *golistState) determineRootDirs() (map[string]string, error) {
-	env, err := state.getEnv()
-	if err != nil {
-		return nil, err
-	}
-	if env["GOMOD"] != "" {
-		state.rootsOnce.Do(func() {
-			state.rootDirs, state.rootDirsError = state.determineRootDirsModules()
-		})
-	} else {
-		state.rootsOnce.Do(func() {
-			state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH()
-		})
-	}
-	return state.rootDirs, state.rootDirsError
-}
-
-func (state *golistState) determineRootDirsModules() (map[string]string, error) {
-	// List all of the modules--the first will be the directory for the main
-	// module. Any replaced modules will also need to be treated as roots.
-	// Editing files in the module cache isn't a great idea, so we don't
-	// plan to ever support that.
-	out, err := state.invokeGo("list", "-m", "-json", "all")
-	if err != nil {
-		// 'go list all' will fail if we're outside of a module and
-		// GO111MODULE=on. Try falling back without 'all'.
-		var innerErr error
-		out, innerErr = state.invokeGo("list", "-m", "-json")
-		if innerErr != nil {
-			return nil, err
-		}
-	}
-	roots := map[string]string{}
-	modules := map[string]string{}
-	var i int
-	for dec := json.NewDecoder(out); dec.More(); {
-		mod := new(gocommand.ModuleJSON)
-		if err := dec.Decode(mod); err != nil {
-			return nil, err
-		}
-		if mod.Dir != "" && mod.Path != "" {
-			// This is a valid module; add it to the map.
-			absDir, err := filepath.Abs(mod.Dir)
-			if err != nil {
-				return nil, err
-			}
-			modules[absDir] = mod.Path
-			// The first result is the main module.
-			if i == 0 || mod.Replace != nil && mod.Replace.Path != "" {
-				roots[absDir] = mod.Path
-			}
-		}
-		i++
-	}
-	return roots, nil
-}
-
-func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) {
-	m := map[string]string{}
-	for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) {
-		absDir, err := filepath.Abs(dir)
-		if err != nil {
-			return nil, err
-		}
-		m[filepath.Join(absDir, "src")] = ""
-	}
-	return m, nil
-}
-
-func extractImports(filename string, contents []byte) ([]string, error) {
-	f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset?
-	if err != nil {
-		return nil, err
-	}
-	var res []string
-	for _, imp := range f.Imports {
-		quotedPath := imp.Path.Value
-		path, err := strconv.Unquote(quotedPath)
-		if err != nil {
-			return nil, err
-		}
-		res = append(res, path)
-	}
-	return res, nil
-}
-
-// reclaimPackage attempts to reuse a package that failed to load in an overlay.
-//
-// If the package has errors and has no Name, GoFiles, or Imports,
-// then it's possible that it doesn't yet exist on disk.
-func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool {
-	// TODO(rstambler): Check the message of the actual error?
-	// It differs between $GOPATH and module mode.
-	if pkg.ID != id {
-		return false
-	}
-	if len(pkg.Errors) != 1 {
-		return false
-	}
-	if pkg.Name != "" || pkg.ExportFile != "" {
-		return false
-	}
-	if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 {
-		return false
-	}
-	if len(pkg.Imports) > 0 {
-		return false
-	}
-	pkgName, ok := extractPackageName(filename, contents)
-	if !ok {
-		return false
-	}
-	pkg.Name = pkgName
-	pkg.Errors = nil
-	return true
-}
-
-func extractPackageName(filename string, contents []byte) (string, bool) {
-	// TODO(rstambler): Check the message of the actual error?
-	// It differs between $GOPATH and module mode.
-	f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset?
-	if err != nil {
-		return "", false
-	}
-	return f.Name.Name, true
-}
-
-// commonDir returns the directory that all files are in, "" if files is empty,
-// or an error if they aren't in the same directory.
-func commonDir(files []string) (string, error) {
-	seen := make(map[string]bool)
-	for _, f := range files {
-		seen[filepath.Dir(f)] = true
-	}
-	if len(seen) > 1 {
-		return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen)
-	}
-	for k := range seen {
-		// seen has only one element; return it.
-		return k, nil
-	}
-	return "", nil // no files
-}
-
-// It is possible that the files in the disk directory dir have a different package
-// name from newName, which is deduced from the overlays. If they all have a different
-// package name, and they all have the same package name, then that name becomes
-// the package name.
-// It returns true if it changes the package name, false otherwise.
-func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) {
-	names := make(map[string]int)
-	for _, p := range pkgsOfDir {
-		names[p.Name]++
-	}
-	if len(names) != 1 {
-		// some files are in different packages
-		return
-	}
-	var oldName string
-	for k := range names {
-		oldName = k
-	}
-	if newName == oldName {
-		return
-	}
-	// We might have a case where all of the package names in the directory are
-	// the same, but the overlay file is for an x test, which belongs to its
-	// own package. If the x test does not yet exist on disk, we may not yet
-	// have its package name on disk, but we should not rename the packages.
-	//
-	// We use a heuristic to determine if this file belongs to an x test:
-	// The test file should have a package name whose package name has a _test
-	// suffix or looks like "newName_test".
-	maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test")
-	if isTestFile && maybeXTest {
-		return
-	}
-	for _, p := range pkgsOfDir {
-		p.Name = newName
-	}
-}
-
-// This function is copy-pasted from
-// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360.
-// It should be deleted when we remove support for overlays from go/packages.
-//
-// NOTE: This does not handle any ./... or ./ style queries, as this function
-// doesn't know the working directory.
-//
-// matchPattern(pattern)(name) reports whether
-// name matches pattern. Pattern is a limited glob
-// pattern in which '...' means 'any string' and there
-// is no other special syntax.
-// Unfortunately, there are two special cases. Quoting "go help packages":
-//
-// First, /... at the end of the pattern can match an empty string,
-// so that net/... matches both net and packages in its subdirectories, like net/http.
-// Second, any slash-separated pattern element containing a wildcard never
-// participates in a match of the "vendor" element in the path of a vendored
-// package, so that ./... does not match packages in subdirectories of
-// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do.
-// Note, however, that a directory named vendor that itself contains code
-// is not a vendored package: cmd/vendor would be a command named vendor,
-// and the pattern cmd/... matches it.
-func matchPattern(pattern string) func(name string) bool {
-	// Convert pattern to regular expression.
-	// The strategy for the trailing /... is to nest it in an explicit ? expression.
-	// The strategy for the vendor exclusion is to change the unmatchable
-	// vendor strings to a disallowed code point (vendorChar) and to use
-	// "(anything but that codepoint)*" as the implementation of the ... wildcard.
-	// This is a bit complicated but the obvious alternative,
-	// namely a hand-written search like in most shell glob matchers,
-	// is too easy to make accidentally exponential.
-	// Using package regexp guarantees linear-time matching.
-
-	const vendorChar = "\x00"
-
-	if strings.Contains(pattern, vendorChar) {
-		return func(name string) bool { return false }
-	}
-
-	re := regexp.QuoteMeta(pattern)
-	re = replaceVendor(re, vendorChar)
-	switch {
-	case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`):
-		re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)`
-	case re == vendorChar+`/\.\.\.`:
-		re = `(/vendor|/` + vendorChar + `/\.\.\.)`
-	case strings.HasSuffix(re, `/\.\.\.`):
-		re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?`
-	}
-	re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`)
-
-	reg := regexp.MustCompile(`^` + re + `$`)
-
-	return func(name string) bool {
-		if strings.Contains(name, vendorChar) {
-			return false
-		}
-		return reg.MatchString(replaceVendor(name, vendorChar))
-	}
-}
-
-// replaceVendor returns the result of replacing
-// non-trailing vendor path elements in x with repl.
-func replaceVendor(x, repl string) string {
-	if !strings.Contains(x, "vendor") {
-		return x
-	}
-	elem := strings.Split(x, "/")
-	for i := 0; i < len(elem)-1; i++ {
-		if elem[i] == "vendor" {
-			elem[i] = repl
-		}
-	}
-	return strings.Join(elem, "/")
-}

+ 0 - 57
vendor/golang.org/x/tools/go/packages/loadmode_string.go

@@ -1,57 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packages
-
-import (
-	"fmt"
-	"strings"
-)
-
-var allModes = []LoadMode{
-	NeedName,
-	NeedFiles,
-	NeedCompiledGoFiles,
-	NeedImports,
-	NeedDeps,
-	NeedExportsFile,
-	NeedTypes,
-	NeedSyntax,
-	NeedTypesInfo,
-	NeedTypesSizes,
-}
-
-var modeStrings = []string{
-	"NeedName",
-	"NeedFiles",
-	"NeedCompiledGoFiles",
-	"NeedImports",
-	"NeedDeps",
-	"NeedExportsFile",
-	"NeedTypes",
-	"NeedSyntax",
-	"NeedTypesInfo",
-	"NeedTypesSizes",
-}
-
-func (mod LoadMode) String() string {
-	m := mod
-	if m == 0 {
-		return "LoadMode(0)"
-	}
-	var out []string
-	for i, x := range allModes {
-		if x > m {
-			break
-		}
-		if (m & x) != 0 {
-			out = append(out, modeStrings[i])
-			m = m ^ x
-		}
-	}
-	if m != 0 {
-		out = append(out, "Unknown")
-	}
-	return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|"))
-}

+ 0 - 1233
vendor/golang.org/x/tools/go/packages/packages.go

@@ -1,1233 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packages
-
-// See doc.go for package documentation and implementation notes.
-
-import (
-	"context"
-	"encoding/json"
-	"fmt"
-	"go/ast"
-	"go/parser"
-	"go/scanner"
-	"go/token"
-	"go/types"
-	"io/ioutil"
-	"log"
-	"os"
-	"path/filepath"
-	"strings"
-	"sync"
-	"time"
-
-	"golang.org/x/tools/go/gcexportdata"
-	"golang.org/x/tools/internal/gocommand"
-	"golang.org/x/tools/internal/packagesinternal"
-	"golang.org/x/tools/internal/typesinternal"
-)
-
-// A LoadMode controls the amount of detail to return when loading.
-// The bits below can be combined to specify which fields should be
-// filled in the result packages.
-// The zero value is a special case, equivalent to combining
-// the NeedName, NeedFiles, and NeedCompiledGoFiles bits.
-// ID and Errors (if present) will always be filled.
-// Load may return more information than requested.
-type LoadMode int
-
-// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to
-// NeedExportFile to make it consistent with the Package field it's adding.
-
-const (
-	// NeedName adds Name and PkgPath.
-	NeedName LoadMode = 1 << iota
-
-	// NeedFiles adds GoFiles and OtherFiles.
-	NeedFiles
-
-	// NeedCompiledGoFiles adds CompiledGoFiles.
-	NeedCompiledGoFiles
-
-	// NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain
-	// "placeholder" Packages with only the ID set.
-	NeedImports
-
-	// NeedDeps adds the fields requested by the LoadMode in the packages in Imports.
-	NeedDeps
-
-	// NeedExportsFile adds ExportFile.
-	NeedExportsFile
-
-	// NeedTypes adds Types, Fset, and IllTyped.
-	NeedTypes
-
-	// NeedSyntax adds Syntax.
-	NeedSyntax
-
-	// NeedTypesInfo adds TypesInfo.
-	NeedTypesInfo
-
-	// NeedTypesSizes adds TypesSizes.
-	NeedTypesSizes
-
-	// typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
-	// Modifies CompiledGoFiles and Types, and has no effect on its own.
-	typecheckCgo
-
-	// NeedModule adds Module.
-	NeedModule
-)
-
-const (
-	// Deprecated: LoadFiles exists for historical compatibility
-	// and should not be used. Please directly specify the needed fields using the Need values.
-	LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
-
-	// Deprecated: LoadImports exists for historical compatibility
-	// and should not be used. Please directly specify the needed fields using the Need values.
-	LoadImports = LoadFiles | NeedImports
-
-	// Deprecated: LoadTypes exists for historical compatibility
-	// and should not be used. Please directly specify the needed fields using the Need values.
-	LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
-
-	// Deprecated: LoadSyntax exists for historical compatibility
-	// and should not be used. Please directly specify the needed fields using the Need values.
-	LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
-
-	// Deprecated: LoadAllSyntax exists for historical compatibility
-	// and should not be used. Please directly specify the needed fields using the Need values.
-	LoadAllSyntax = LoadSyntax | NeedDeps
-)
-
-// A Config specifies details about how packages should be loaded.
-// The zero value is a valid configuration.
-// Calls to Load do not modify this struct.
-type Config struct {
-	// Mode controls the level of information returned for each package.
-	Mode LoadMode
-
-	// Context specifies the context for the load operation.
-	// If the context is cancelled, the loader may stop early
-	// and return an ErrCancelled error.
-	// If Context is nil, the load cannot be cancelled.
-	Context context.Context
-
-	// Logf is the logger for the config.
-	// If the user provides a logger, debug logging is enabled.
-	// If the GOPACKAGESDEBUG environment variable is set to true,
-	// but the logger is nil, default to log.Printf.
-	Logf func(format string, args ...interface{})
-
-	// Dir is the directory in which to run the build system's query tool
-	// that provides information about the packages.
-	// If Dir is empty, the tool is run in the current directory.
-	Dir string
-
-	// Env is the environment to use when invoking the build system's query tool.
-	// If Env is nil, the current environment is used.
-	// As in os/exec's Cmd, only the last value in the slice for
-	// each environment key is used. To specify the setting of only
-	// a few variables, append to the current environment, as in:
-	//
-	//	opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386")
-	//
-	Env []string
-
-	// gocmdRunner guards go command calls from concurrency errors.
-	gocmdRunner *gocommand.Runner
-
-	// BuildFlags is a list of command-line flags to be passed through to
-	// the build system's query tool.
-	BuildFlags []string
-
-	// modFile will be used for -modfile in go command invocations.
-	modFile string
-
-	// modFlag will be used for -modfile in go command invocations.
-	modFlag string
-
-	// Fset provides source position information for syntax trees and types.
-	// If Fset is nil, Load will use a new fileset, but preserve Fset's value.
-	Fset *token.FileSet
-
-	// ParseFile is called to read and parse each file
-	// when preparing a package's type-checked syntax tree.
-	// It must be safe to call ParseFile simultaneously from multiple goroutines.
-	// If ParseFile is nil, the loader will uses parser.ParseFile.
-	//
-	// ParseFile should parse the source from src and use filename only for
-	// recording position information.
-	//
-	// An application may supply a custom implementation of ParseFile
-	// to change the effective file contents or the behavior of the parser,
-	// or to modify the syntax tree. For example, selectively eliminating
-	// unwanted function bodies can significantly accelerate type checking.
-	ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error)
-
-	// If Tests is set, the loader includes not just the packages
-	// matching a particular pattern but also any related test packages,
-	// including test-only variants of the package and the test executable.
-	//
-	// For example, when using the go command, loading "fmt" with Tests=true
-	// returns four packages, with IDs "fmt" (the standard package),
-	// "fmt [fmt.test]" (the package as compiled for the test),
-	// "fmt_test" (the test functions from source files in package fmt_test),
-	// and "fmt.test" (the test binary).
-	//
-	// In build systems with explicit names for tests,
-	// setting Tests may have no effect.
-	Tests bool
-
-	// Overlay provides a mapping of absolute file paths to file contents.
-	// If the file with the given path already exists, the parser will use the
-	// alternative file contents provided by the map.
-	//
-	// Overlays provide incomplete support for when a given file doesn't
-	// already exist on disk. See the package doc above for more details.
-	Overlay map[string][]byte
-}
-
-// driver is the type for functions that query the build system for the
-// packages named by the patterns.
-type driver func(cfg *Config, patterns ...string) (*driverResponse, error)
-
-// driverResponse contains the results for a driver query.
-type driverResponse struct {
-	// NotHandled is returned if the request can't be handled by the current
-	// driver. If an external driver returns a response with NotHandled, the
-	// rest of the driverResponse is ignored, and go/packages will fallback
-	// to the next driver. If go/packages is extended in the future to support
-	// lists of multiple drivers, go/packages will fall back to the next driver.
-	NotHandled bool
-
-	// Sizes, if not nil, is the types.Sizes to use when type checking.
-	Sizes *types.StdSizes
-
-	// Roots is the set of package IDs that make up the root packages.
-	// We have to encode this separately because when we encode a single package
-	// we cannot know if it is one of the roots as that requires knowledge of the
-	// graph it is part of.
-	Roots []string `json:",omitempty"`
-
-	// Packages is the full set of packages in the graph.
-	// The packages are not connected into a graph.
-	// The Imports if populated will be stubs that only have their ID set.
-	// Imports will be connected and then type and syntax information added in a
-	// later pass (see refine).
-	Packages []*Package
-}
-
-// Load loads and returns the Go packages named by the given patterns.
-//
-// Config specifies loading options;
-// nil behaves the same as an empty Config.
-//
-// Load returns an error if any of the patterns was invalid
-// as defined by the underlying build system.
-// It may return an empty list of packages without an error,
-// for instance for an empty expansion of a valid wildcard.
-// Errors associated with a particular package are recorded in the
-// corresponding Package's Errors list, and do not cause Load to
-// return an error. Clients may need to handle such errors before
-// proceeding with further analysis. The PrintErrors function is
-// provided for convenient display of all errors.
-func Load(cfg *Config, patterns ...string) ([]*Package, error) {
-	l := newLoader(cfg)
-	response, err := defaultDriver(&l.Config, patterns...)
-	if err != nil {
-		return nil, err
-	}
-	l.sizes = response.Sizes
-	return l.refine(response.Roots, response.Packages...)
-}
-
-// defaultDriver is a driver that implements go/packages' fallback behavior.
-// It will try to request to an external driver, if one exists. If there's
-// no external driver, or the driver returns a response with NotHandled set,
-// defaultDriver will fall back to the go list driver.
-func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
-	driver := findExternalDriver(cfg)
-	if driver == nil {
-		driver = goListDriver
-	}
-	response, err := driver(cfg, patterns...)
-	if err != nil {
-		return response, err
-	} else if response.NotHandled {
-		return goListDriver(cfg, patterns...)
-	}
-	return response, nil
-}
-
-// A Package describes a loaded Go package.
-type Package struct {
-	// ID is a unique identifier for a package,
-	// in a syntax provided by the underlying build system.
-	//
-	// Because the syntax varies based on the build system,
-	// clients should treat IDs as opaque and not attempt to
-	// interpret them.
-	ID string
-
-	// Name is the package name as it appears in the package source code.
-	Name string
-
-	// PkgPath is the package path as used by the go/types package.
-	PkgPath string
-
-	// Errors contains any errors encountered querying the metadata
-	// of the package, or while parsing or type-checking its files.
-	Errors []Error
-
-	// GoFiles lists the absolute file paths of the package's Go source files.
-	GoFiles []string
-
-	// CompiledGoFiles lists the absolute file paths of the package's source
-	// files that are suitable for type checking.
-	// This may differ from GoFiles if files are processed before compilation.
-	CompiledGoFiles []string
-
-	// OtherFiles lists the absolute file paths of the package's non-Go source files,
-	// including assembly, C, C++, Fortran, Objective-C, SWIG, and so on.
-	OtherFiles []string
-
-	// IgnoredFiles lists source files that are not part of the package
-	// using the current build configuration but that might be part of
-	// the package using other build configurations.
-	IgnoredFiles []string
-
-	// ExportFile is the absolute path to a file containing type
-	// information for the package as provided by the build system.
-	ExportFile string
-
-	// Imports maps import paths appearing in the package's Go source files
-	// to corresponding loaded Packages.
-	Imports map[string]*Package
-
-	// Types provides type information for the package.
-	// The NeedTypes LoadMode bit sets this field for packages matching the
-	// patterns; type information for dependencies may be missing or incomplete,
-	// unless NeedDeps and NeedImports are also set.
-	Types *types.Package
-
-	// Fset provides position information for Types, TypesInfo, and Syntax.
-	// It is set only when Types is set.
-	Fset *token.FileSet
-
-	// IllTyped indicates whether the package or any dependency contains errors.
-	// It is set only when Types is set.
-	IllTyped bool
-
-	// Syntax is the package's syntax trees, for the files listed in CompiledGoFiles.
-	//
-	// The NeedSyntax LoadMode bit populates this field for packages matching the patterns.
-	// If NeedDeps and NeedImports are also set, this field will also be populated
-	// for dependencies.
-	Syntax []*ast.File
-
-	// TypesInfo provides type information about the package's syntax trees.
-	// It is set only when Syntax is set.
-	TypesInfo *types.Info
-
-	// TypesSizes provides the effective size function for types in TypesInfo.
-	TypesSizes types.Sizes
-
-	// forTest is the package under test, if any.
-	forTest string
-
-	// module is the module information for the package if it exists.
-	Module *Module
-}
-
-// Module provides module information for a package.
-type Module struct {
-	Path      string       // module path
-	Version   string       // module version
-	Replace   *Module      // replaced by this module
-	Time      *time.Time   // time version was created
-	Main      bool         // is this the main module?
-	Indirect  bool         // is this module only an indirect dependency of main module?
-	Dir       string       // directory holding files for this module, if any
-	GoMod     string       // path to go.mod file used when loading this module, if any
-	GoVersion string       // go version used in module
-	Error     *ModuleError // error loading module
-}
-
-// ModuleError holds errors loading a module.
-type ModuleError struct {
-	Err string // the error itself
-}
-
-func init() {
-	packagesinternal.GetForTest = func(p interface{}) string {
-		return p.(*Package).forTest
-	}
-	packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner {
-		return config.(*Config).gocmdRunner
-	}
-	packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {
-		config.(*Config).gocmdRunner = runner
-	}
-	packagesinternal.SetModFile = func(config interface{}, value string) {
-		config.(*Config).modFile = value
-	}
-	packagesinternal.SetModFlag = func(config interface{}, value string) {
-		config.(*Config).modFlag = value
-	}
-	packagesinternal.TypecheckCgo = int(typecheckCgo)
-}
-
-// An Error describes a problem with a package's metadata, syntax, or types.
-type Error struct {
-	Pos  string // "file:line:col" or "file:line" or "" or "-"
-	Msg  string
-	Kind ErrorKind
-}
-
-// ErrorKind describes the source of the error, allowing the user to
-// differentiate between errors generated by the driver, the parser, or the
-// type-checker.
-type ErrorKind int
-
-const (
-	UnknownError ErrorKind = iota
-	ListError
-	ParseError
-	TypeError
-)
-
-func (err Error) Error() string {
-	pos := err.Pos
-	if pos == "" {
-		pos = "-" // like token.Position{}.String()
-	}
-	return pos + ": " + err.Msg
-}
-
-// flatPackage is the JSON form of Package
-// It drops all the type and syntax fields, and transforms the Imports
-//
-// TODO(adonovan): identify this struct with Package, effectively
-// publishing the JSON protocol.
-type flatPackage struct {
-	ID              string
-	Name            string            `json:",omitempty"`
-	PkgPath         string            `json:",omitempty"`
-	Errors          []Error           `json:",omitempty"`
-	GoFiles         []string          `json:",omitempty"`
-	CompiledGoFiles []string          `json:",omitempty"`
-	OtherFiles      []string          `json:",omitempty"`
-	IgnoredFiles    []string          `json:",omitempty"`
-	ExportFile      string            `json:",omitempty"`
-	Imports         map[string]string `json:",omitempty"`
-}
-
-// MarshalJSON returns the Package in its JSON form.
-// For the most part, the structure fields are written out unmodified, and
-// the type and syntax fields are skipped.
-// The imports are written out as just a map of path to package id.
-// The errors are written using a custom type that tries to preserve the
-// structure of error types we know about.
-//
-// This method exists to enable support for additional build systems.  It is
-// not intended for use by clients of the API and we may change the format.
-func (p *Package) MarshalJSON() ([]byte, error) {
-	flat := &flatPackage{
-		ID:              p.ID,
-		Name:            p.Name,
-		PkgPath:         p.PkgPath,
-		Errors:          p.Errors,
-		GoFiles:         p.GoFiles,
-		CompiledGoFiles: p.CompiledGoFiles,
-		OtherFiles:      p.OtherFiles,
-		IgnoredFiles:    p.IgnoredFiles,
-		ExportFile:      p.ExportFile,
-	}
-	if len(p.Imports) > 0 {
-		flat.Imports = make(map[string]string, len(p.Imports))
-		for path, ipkg := range p.Imports {
-			flat.Imports[path] = ipkg.ID
-		}
-	}
-	return json.Marshal(flat)
-}
-
-// UnmarshalJSON reads in a Package from its JSON format.
-// See MarshalJSON for details about the format accepted.
-func (p *Package) UnmarshalJSON(b []byte) error {
-	flat := &flatPackage{}
-	if err := json.Unmarshal(b, &flat); err != nil {
-		return err
-	}
-	*p = Package{
-		ID:              flat.ID,
-		Name:            flat.Name,
-		PkgPath:         flat.PkgPath,
-		Errors:          flat.Errors,
-		GoFiles:         flat.GoFiles,
-		CompiledGoFiles: flat.CompiledGoFiles,
-		OtherFiles:      flat.OtherFiles,
-		ExportFile:      flat.ExportFile,
-	}
-	if len(flat.Imports) > 0 {
-		p.Imports = make(map[string]*Package, len(flat.Imports))
-		for path, id := range flat.Imports {
-			p.Imports[path] = &Package{ID: id}
-		}
-	}
-	return nil
-}
-
-func (p *Package) String() string { return p.ID }
-
-// loaderPackage augments Package with state used during the loading phase
-type loaderPackage struct {
-	*Package
-	importErrors map[string]error // maps each bad import to its error
-	loadOnce     sync.Once
-	color        uint8 // for cycle detection
-	needsrc      bool  // load from source (Mode >= LoadTypes)
-	needtypes    bool  // type information is either requested or depended on
-	initial      bool  // package was matched by a pattern
-}
-
-// loader holds the working state of a single call to load.
-type loader struct {
-	pkgs map[string]*loaderPackage
-	Config
-	sizes        types.Sizes
-	parseCache   map[string]*parseValue
-	parseCacheMu sync.Mutex
-	exportMu     sync.Mutex // enforces mutual exclusion of exportdata operations
-
-	// Config.Mode contains the implied mode (see impliedLoadMode).
-	// Implied mode contains all the fields we need the data for.
-	// In requestedMode there are the actually requested fields.
-	// We'll zero them out before returning packages to the user.
-	// This makes it easier for us to get the conditions where
-	// we need certain modes right.
-	requestedMode LoadMode
-}
-
-type parseValue struct {
-	f     *ast.File
-	err   error
-	ready chan struct{}
-}
-
-func newLoader(cfg *Config) *loader {
-	ld := &loader{
-		parseCache: map[string]*parseValue{},
-	}
-	if cfg != nil {
-		ld.Config = *cfg
-		// If the user has provided a logger, use it.
-		ld.Config.Logf = cfg.Logf
-	}
-	if ld.Config.Logf == nil {
-		// If the GOPACKAGESDEBUG environment variable is set to true,
-		// but the user has not provided a logger, default to log.Printf.
-		if debug {
-			ld.Config.Logf = log.Printf
-		} else {
-			ld.Config.Logf = func(format string, args ...interface{}) {}
-		}
-	}
-	if ld.Config.Mode == 0 {
-		ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility.
-	}
-	if ld.Config.Env == nil {
-		ld.Config.Env = os.Environ()
-	}
-	if ld.Config.gocmdRunner == nil {
-		ld.Config.gocmdRunner = &gocommand.Runner{}
-	}
-	if ld.Context == nil {
-		ld.Context = context.Background()
-	}
-	if ld.Dir == "" {
-		if dir, err := os.Getwd(); err == nil {
-			ld.Dir = dir
-		}
-	}
-
-	// Save the actually requested fields. We'll zero them out before returning packages to the user.
-	ld.requestedMode = ld.Mode
-	ld.Mode = impliedLoadMode(ld.Mode)
-
-	if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
-		if ld.Fset == nil {
-			ld.Fset = token.NewFileSet()
-		}
-
-		// ParseFile is required even in LoadTypes mode
-		// because we load source if export data is missing.
-		if ld.ParseFile == nil {
-			ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
-				const mode = parser.AllErrors | parser.ParseComments
-				return parser.ParseFile(fset, filename, src, mode)
-			}
-		}
-	}
-
-	return ld
-}
-
-// refine connects the supplied packages into a graph and then adds type and
-// and syntax information as requested by the LoadMode.
-func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
-	rootMap := make(map[string]int, len(roots))
-	for i, root := range roots {
-		rootMap[root] = i
-	}
-	ld.pkgs = make(map[string]*loaderPackage)
-	// first pass, fixup and build the map and roots
-	var initial = make([]*loaderPackage, len(roots))
-	for _, pkg := range list {
-		rootIndex := -1
-		if i, found := rootMap[pkg.ID]; found {
-			rootIndex = i
-		}
-
-		// Overlays can invalidate export data.
-		// TODO(matloob): make this check fine-grained based on dependencies on overlaid files
-		exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
-		// This package needs type information if the caller requested types and the package is
-		// either a root, or it's a non-root and the user requested dependencies ...
-		needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
-		// This package needs source if the call requested source (or types info, which implies source)
-		// and the package is either a root, or itas a non- root and the user requested dependencies...
-		needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
-			// ... or if we need types and the exportData is invalid. We fall back to (incompletely)
-			// typechecking packages from source if they fail to compile.
-			(ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe"
-		lpkg := &loaderPackage{
-			Package:   pkg,
-			needtypes: needtypes,
-			needsrc:   needsrc,
-		}
-		ld.pkgs[lpkg.ID] = lpkg
-		if rootIndex >= 0 {
-			initial[rootIndex] = lpkg
-			lpkg.initial = true
-		}
-	}
-	for i, root := range roots {
-		if initial[i] == nil {
-			return nil, fmt.Errorf("root package %v is missing", root)
-		}
-	}
-
-	// Materialize the import graph.
-
-	const (
-		white = 0 // new
-		grey  = 1 // in progress
-		black = 2 // complete
-	)
-
-	// visit traverses the import graph, depth-first,
-	// and materializes the graph as Packages.Imports.
-	//
-	// Valid imports are saved in the Packages.Import map.
-	// Invalid imports (cycles and missing nodes) are saved in the importErrors map.
-	// Thus, even in the presence of both kinds of errors, the Import graph remains a DAG.
-	//
-	// visit returns whether the package needs src or has a transitive
-	// dependency on a package that does. These are the only packages
-	// for which we load source code.
-	var stack []*loaderPackage
-	var visit func(lpkg *loaderPackage) bool
-	var srcPkgs []*loaderPackage
-	visit = func(lpkg *loaderPackage) bool {
-		switch lpkg.color {
-		case black:
-			return lpkg.needsrc
-		case grey:
-			panic("internal error: grey node")
-		}
-		lpkg.color = grey
-		stack = append(stack, lpkg) // push
-		stubs := lpkg.Imports       // the structure form has only stubs with the ID in the Imports
-		// If NeedImports isn't set, the imports fields will all be zeroed out.
-		if ld.Mode&NeedImports != 0 {
-			lpkg.Imports = make(map[string]*Package, len(stubs))
-			for importPath, ipkg := range stubs {
-				var importErr error
-				imp := ld.pkgs[ipkg.ID]
-				if imp == nil {
-					// (includes package "C" when DisableCgo)
-					importErr = fmt.Errorf("missing package: %q", ipkg.ID)
-				} else if imp.color == grey {
-					importErr = fmt.Errorf("import cycle: %s", stack)
-				}
-				if importErr != nil {
-					if lpkg.importErrors == nil {
-						lpkg.importErrors = make(map[string]error)
-					}
-					lpkg.importErrors[importPath] = importErr
-					continue
-				}
-
-				if visit(imp) {
-					lpkg.needsrc = true
-				}
-				lpkg.Imports[importPath] = imp.Package
-			}
-		}
-		if lpkg.needsrc {
-			srcPkgs = append(srcPkgs, lpkg)
-		}
-		if ld.Mode&NeedTypesSizes != 0 {
-			lpkg.TypesSizes = ld.sizes
-		}
-		stack = stack[:len(stack)-1] // pop
-		lpkg.color = black
-
-		return lpkg.needsrc
-	}
-
-	if ld.Mode&NeedImports == 0 {
-		// We do this to drop the stub import packages that we are not even going to try to resolve.
-		for _, lpkg := range initial {
-			lpkg.Imports = nil
-		}
-	} else {
-		// For each initial package, create its import DAG.
-		for _, lpkg := range initial {
-			visit(lpkg)
-		}
-	}
-	if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 {
-		for _, lpkg := range srcPkgs {
-			// Complete type information is required for the
-			// immediate dependencies of each source package.
-			for _, ipkg := range lpkg.Imports {
-				imp := ld.pkgs[ipkg.ID]
-				imp.needtypes = true
-			}
-		}
-	}
-	// Load type data and syntax if needed, starting at
-	// the initial packages (roots of the import DAG).
-	if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
-		var wg sync.WaitGroup
-		for _, lpkg := range initial {
-			wg.Add(1)
-			go func(lpkg *loaderPackage) {
-				ld.loadRecursive(lpkg)
-				wg.Done()
-			}(lpkg)
-		}
-		wg.Wait()
-	}
-
-	result := make([]*Package, len(initial))
-	for i, lpkg := range initial {
-		result[i] = lpkg.Package
-	}
-	for i := range ld.pkgs {
-		// Clear all unrequested fields,
-		// to catch programs that use more than they request.
-		if ld.requestedMode&NeedName == 0 {
-			ld.pkgs[i].Name = ""
-			ld.pkgs[i].PkgPath = ""
-		}
-		if ld.requestedMode&NeedFiles == 0 {
-			ld.pkgs[i].GoFiles = nil
-			ld.pkgs[i].OtherFiles = nil
-			ld.pkgs[i].IgnoredFiles = nil
-		}
-		if ld.requestedMode&NeedCompiledGoFiles == 0 {
-			ld.pkgs[i].CompiledGoFiles = nil
-		}
-		if ld.requestedMode&NeedImports == 0 {
-			ld.pkgs[i].Imports = nil
-		}
-		if ld.requestedMode&NeedExportsFile == 0 {
-			ld.pkgs[i].ExportFile = ""
-		}
-		if ld.requestedMode&NeedTypes == 0 {
-			ld.pkgs[i].Types = nil
-			ld.pkgs[i].Fset = nil
-			ld.pkgs[i].IllTyped = false
-		}
-		if ld.requestedMode&NeedSyntax == 0 {
-			ld.pkgs[i].Syntax = nil
-		}
-		if ld.requestedMode&NeedTypesInfo == 0 {
-			ld.pkgs[i].TypesInfo = nil
-		}
-		if ld.requestedMode&NeedTypesSizes == 0 {
-			ld.pkgs[i].TypesSizes = nil
-		}
-		if ld.requestedMode&NeedModule == 0 {
-			ld.pkgs[i].Module = nil
-		}
-	}
-
-	return result, nil
-}
-
-// loadRecursive loads the specified package and its dependencies,
-// recursively, in parallel, in topological order.
-// It is atomic and idempotent.
-// Precondition: ld.Mode&NeedTypes.
-func (ld *loader) loadRecursive(lpkg *loaderPackage) {
-	lpkg.loadOnce.Do(func() {
-		// Load the direct dependencies, in parallel.
-		var wg sync.WaitGroup
-		for _, ipkg := range lpkg.Imports {
-			imp := ld.pkgs[ipkg.ID]
-			wg.Add(1)
-			go func(imp *loaderPackage) {
-				ld.loadRecursive(imp)
-				wg.Done()
-			}(imp)
-		}
-		wg.Wait()
-		ld.loadPackage(lpkg)
-	})
-}
-
-// loadPackage loads the specified package.
-// It must be called only once per Package,
-// after immediate dependencies are loaded.
-// Precondition: ld.Mode & NeedTypes.
-func (ld *loader) loadPackage(lpkg *loaderPackage) {
-	if lpkg.PkgPath == "unsafe" {
-		// Fill in the blanks to avoid surprises.
-		lpkg.Types = types.Unsafe
-		lpkg.Fset = ld.Fset
-		lpkg.Syntax = []*ast.File{}
-		lpkg.TypesInfo = new(types.Info)
-		lpkg.TypesSizes = ld.sizes
-		return
-	}
-
-	// Call NewPackage directly with explicit name.
-	// This avoids skew between golist and go/types when the files'
-	// package declarations are inconsistent.
-	lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name)
-	lpkg.Fset = ld.Fset
-
-	// Subtle: we populate all Types fields with an empty Package
-	// before loading export data so that export data processing
-	// never has to create a types.Package for an indirect dependency,
-	// which would then require that such created packages be explicitly
-	// inserted back into the Import graph as a final step after export data loading.
-	// The Diamond test exercises this case.
-	if !lpkg.needtypes && !lpkg.needsrc {
-		return
-	}
-	if !lpkg.needsrc {
-		ld.loadFromExportData(lpkg)
-		return // not a source package, don't get syntax trees
-	}
-
-	appendError := func(err error) {
-		// Convert various error types into the one true Error.
-		var errs []Error
-		switch err := err.(type) {
-		case Error:
-			// from driver
-			errs = append(errs, err)
-
-		case *os.PathError:
-			// from parser
-			errs = append(errs, Error{
-				Pos:  err.Path + ":1",
-				Msg:  err.Err.Error(),
-				Kind: ParseError,
-			})
-
-		case scanner.ErrorList:
-			// from parser
-			for _, err := range err {
-				errs = append(errs, Error{
-					Pos:  err.Pos.String(),
-					Msg:  err.Msg,
-					Kind: ParseError,
-				})
-			}
-
-		case types.Error:
-			// from type checker
-			errs = append(errs, Error{
-				Pos:  err.Fset.Position(err.Pos).String(),
-				Msg:  err.Msg,
-				Kind: TypeError,
-			})
-
-		default:
-			// unexpected impoverished error from parser?
-			errs = append(errs, Error{
-				Pos:  "-",
-				Msg:  err.Error(),
-				Kind: UnknownError,
-			})
-
-			// If you see this error message, please file a bug.
-			log.Printf("internal error: error %q (%T) without position", err, err)
-		}
-
-		lpkg.Errors = append(lpkg.Errors, errs...)
-	}
-
-	if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
-		// The config requested loading sources and types, but sources are missing.
-		// Add an error to the package and fall back to loading from export data.
-		appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
-		ld.loadFromExportData(lpkg)
-		return // can't get syntax trees for this package
-	}
-
-	files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
-	for _, err := range errs {
-		appendError(err)
-	}
-
-	lpkg.Syntax = files
-	if ld.Config.Mode&NeedTypes == 0 {
-		return
-	}
-
-	lpkg.TypesInfo = &types.Info{
-		Types:      make(map[ast.Expr]types.TypeAndValue),
-		Defs:       make(map[*ast.Ident]types.Object),
-		Uses:       make(map[*ast.Ident]types.Object),
-		Implicits:  make(map[ast.Node]types.Object),
-		Scopes:     make(map[ast.Node]*types.Scope),
-		Selections: make(map[*ast.SelectorExpr]*types.Selection),
-	}
-	lpkg.TypesSizes = ld.sizes
-
-	importer := importerFunc(func(path string) (*types.Package, error) {
-		if path == "unsafe" {
-			return types.Unsafe, nil
-		}
-
-		// The imports map is keyed by import path.
-		ipkg := lpkg.Imports[path]
-		if ipkg == nil {
-			if err := lpkg.importErrors[path]; err != nil {
-				return nil, err
-			}
-			// There was skew between the metadata and the
-			// import declarations, likely due to an edit
-			// race, or because the ParseFile feature was
-			// used to supply alternative file contents.
-			return nil, fmt.Errorf("no metadata for %s", path)
-		}
-
-		if ipkg.Types != nil && ipkg.Types.Complete() {
-			return ipkg.Types, nil
-		}
-		log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg)
-		panic("unreachable")
-	})
-
-	// type-check
-	tc := &types.Config{
-		Importer: importer,
-
-		// Type-check bodies of functions only in non-initial packages.
-		// Example: for import graph A->B->C and initial packages {A,C},
-		// we can ignore function bodies in B.
-		IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial,
-
-		Error: appendError,
-		Sizes: ld.sizes,
-	}
-	if (ld.Mode & typecheckCgo) != 0 {
-		if !typesinternal.SetUsesCgo(tc) {
-			appendError(Error{
-				Msg:  "typecheckCgo requires Go 1.15+",
-				Kind: ListError,
-			})
-			return
-		}
-	}
-	types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
-
-	lpkg.importErrors = nil // no longer needed
-
-	// If !Cgo, the type-checker uses FakeImportC mode, so
-	// it doesn't invoke the importer for import "C",
-	// nor report an error for the import,
-	// or for any undefined C.f reference.
-	// We must detect this explicitly and correctly
-	// mark the package as IllTyped (by reporting an error).
-	// TODO(adonovan): if these errors are annoying,
-	// we could just set IllTyped quietly.
-	if tc.FakeImportC {
-	outer:
-		for _, f := range lpkg.Syntax {
-			for _, imp := range f.Imports {
-				if imp.Path.Value == `"C"` {
-					err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`}
-					appendError(err)
-					break outer
-				}
-			}
-		}
-	}
-
-	// Record accumulated errors.
-	illTyped := len(lpkg.Errors) > 0
-	if !illTyped {
-		for _, imp := range lpkg.Imports {
-			if imp.IllTyped {
-				illTyped = true
-				break
-			}
-		}
-	}
-	lpkg.IllTyped = illTyped
-}
-
-// An importFunc is an implementation of the single-method
-// types.Importer interface based on a function value.
-type importerFunc func(path string) (*types.Package, error)
-
-func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
-
-// We use a counting semaphore to limit
-// the number of parallel I/O calls per process.
-var ioLimit = make(chan bool, 20)
-
-func (ld *loader) parseFile(filename string) (*ast.File, error) {
-	ld.parseCacheMu.Lock()
-	v, ok := ld.parseCache[filename]
-	if ok {
-		// cache hit
-		ld.parseCacheMu.Unlock()
-		<-v.ready
-	} else {
-		// cache miss
-		v = &parseValue{ready: make(chan struct{})}
-		ld.parseCache[filename] = v
-		ld.parseCacheMu.Unlock()
-
-		var src []byte
-		for f, contents := range ld.Config.Overlay {
-			if sameFile(f, filename) {
-				src = contents
-			}
-		}
-		var err error
-		if src == nil {
-			ioLimit <- true // wait
-			src, err = ioutil.ReadFile(filename)
-			<-ioLimit // signal
-		}
-		if err != nil {
-			v.err = err
-		} else {
-			v.f, v.err = ld.ParseFile(ld.Fset, filename, src)
-		}
-
-		close(v.ready)
-	}
-	return v.f, v.err
-}
-
-// parseFiles reads and parses the Go source files and returns the ASTs
-// of the ones that could be at least partially parsed, along with a
-// list of I/O and parse errors encountered.
-//
-// Because files are scanned in parallel, the token.Pos
-// positions of the resulting ast.Files are not ordered.
-//
-func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
-	var wg sync.WaitGroup
-	n := len(filenames)
-	parsed := make([]*ast.File, n)
-	errors := make([]error, n)
-	for i, file := range filenames {
-		if ld.Config.Context.Err() != nil {
-			parsed[i] = nil
-			errors[i] = ld.Config.Context.Err()
-			continue
-		}
-		wg.Add(1)
-		go func(i int, filename string) {
-			parsed[i], errors[i] = ld.parseFile(filename)
-			wg.Done()
-		}(i, file)
-	}
-	wg.Wait()
-
-	// Eliminate nils, preserving order.
-	var o int
-	for _, f := range parsed {
-		if f != nil {
-			parsed[o] = f
-			o++
-		}
-	}
-	parsed = parsed[:o]
-
-	o = 0
-	for _, err := range errors {
-		if err != nil {
-			errors[o] = err
-			o++
-		}
-	}
-	errors = errors[:o]
-
-	return parsed, errors
-}
-
-// sameFile returns true if x and y have the same basename and denote
-// the same file.
-//
-func sameFile(x, y string) bool {
-	if x == y {
-		// It could be the case that y doesn't exist.
-		// For instance, it may be an overlay file that
-		// hasn't been written to disk. To handle that case
-		// let x == y through. (We added the exact absolute path
-		// string to the CompiledGoFiles list, so the unwritten
-		// overlay case implies x==y.)
-		return true
-	}
-	if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation)
-		if xi, err := os.Stat(x); err == nil {
-			if yi, err := os.Stat(y); err == nil {
-				return os.SameFile(xi, yi)
-			}
-		}
-	}
-	return false
-}
-
-// loadFromExportData returns type information for the specified
-// package, loading it from an export data file on the first request.
-func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) {
-	if lpkg.PkgPath == "" {
-		log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
-	}
-
-	// Because gcexportdata.Read has the potential to create or
-	// modify the types.Package for each node in the transitive
-	// closure of dependencies of lpkg, all exportdata operations
-	// must be sequential. (Finer-grained locking would require
-	// changes to the gcexportdata API.)
-	//
-	// The exportMu lock guards the Package.Pkg field and the
-	// types.Package it points to, for each Package in the graph.
-	//
-	// Not all accesses to Package.Pkg need to be protected by exportMu:
-	// graph ordering ensures that direct dependencies of source
-	// packages are fully loaded before the importer reads their Pkg field.
-	ld.exportMu.Lock()
-	defer ld.exportMu.Unlock()
-
-	if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
-		return tpkg, nil // cache hit
-	}
-
-	lpkg.IllTyped = true // fail safe
-
-	if lpkg.ExportFile == "" {
-		// Errors while building export data will have been printed to stderr.
-		return nil, fmt.Errorf("no export data file")
-	}
-	f, err := os.Open(lpkg.ExportFile)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-
-	// Read gc export data.
-	//
-	// We don't currently support gccgo export data because all
-	// underlying workspaces use the gc toolchain. (Even build
-	// systems that support gccgo don't use it for workspace
-	// queries.)
-	r, err := gcexportdata.NewReader(f)
-	if err != nil {
-		return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
-	}
-
-	// Build the view.
-	//
-	// The gcexportdata machinery has no concept of package ID.
-	// It identifies packages by their PkgPath, which although not
-	// globally unique is unique within the scope of one invocation
-	// of the linker, type-checker, or gcexportdata.
-	//
-	// So, we must build a PkgPath-keyed view of the global
-	// (conceptually ID-keyed) cache of packages and pass it to
-	// gcexportdata. The view must contain every existing
-	// package that might possibly be mentioned by the
-	// current package---its transitive closure.
-	//
-	// In loadPackage, we unconditionally create a types.Package for
-	// each dependency so that export data loading does not
-	// create new ones.
-	//
-	// TODO(adonovan): it would be simpler and more efficient
-	// if the export data machinery invoked a callback to
-	// get-or-create a package instead of a map.
-	//
-	view := make(map[string]*types.Package) // view seen by gcexportdata
-	seen := make(map[*loaderPackage]bool)   // all visited packages
-	var visit func(pkgs map[string]*Package)
-	visit = func(pkgs map[string]*Package) {
-		for _, p := range pkgs {
-			lpkg := ld.pkgs[p.ID]
-			if !seen[lpkg] {
-				seen[lpkg] = true
-				view[lpkg.PkgPath] = lpkg.Types
-				visit(lpkg.Imports)
-			}
-		}
-	}
-	visit(lpkg.Imports)
-
-	viewLen := len(view) + 1 // adding the self package
-	// Parse the export data.
-	// (May modify incomplete packages in view but not create new ones.)
-	tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
-	if err != nil {
-		return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
-	}
-	if viewLen != len(view) {
-		log.Fatalf("Unexpected package creation during export data loading")
-	}
-
-	lpkg.Types = tpkg
-	lpkg.IllTyped = false
-
-	return tpkg, nil
-}
-
-// impliedLoadMode returns loadMode with its dependencies.
-func impliedLoadMode(loadMode LoadMode) LoadMode {
-	if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 {
-		// If NeedTypesInfo, go/packages needs to do typechecking itself so it can
-		// associate type info with the AST. To do so, we need the export data
-		// for dependencies, which means we need to ask for the direct dependencies.
-		// NeedImports is used to ask for the direct dependencies.
-		loadMode |= NeedImports
-	}
-
-	if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 {
-		// With NeedDeps we need to load at least direct dependencies.
-		// NeedImports is used to ask for the direct dependencies.
-		loadMode |= NeedImports
-	}
-
-	return loadMode
-}
-
-func usesExportData(cfg *Config) bool {
-	return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
-}

+ 0 - 59
vendor/golang.org/x/tools/go/packages/visit.go

@@ -1,59 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packages
-
-import (
-	"fmt"
-	"os"
-	"sort"
-)
-
-// Visit visits all the packages in the import graph whose roots are
-// pkgs, calling the optional pre function the first time each package
-// is encountered (preorder), and the optional post function after a
-// package's dependencies have been visited (postorder).
-// The boolean result of pre(pkg) determines whether
-// the imports of package pkg are visited.
-func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
-	seen := make(map[*Package]bool)
-	var visit func(*Package)
-	visit = func(pkg *Package) {
-		if !seen[pkg] {
-			seen[pkg] = true
-
-			if pre == nil || pre(pkg) {
-				paths := make([]string, 0, len(pkg.Imports))
-				for path := range pkg.Imports {
-					paths = append(paths, path)
-				}
-				sort.Strings(paths) // Imports is a map, this makes visit stable
-				for _, path := range paths {
-					visit(pkg.Imports[path])
-				}
-			}
-
-			if post != nil {
-				post(pkg)
-			}
-		}
-	}
-	for _, pkg := range pkgs {
-		visit(pkg)
-	}
-}
-
-// PrintErrors prints to os.Stderr the accumulated errors of all
-// packages in the import graph rooted at pkgs, dependencies first.
-// PrintErrors returns the number of errors printed.
-func PrintErrors(pkgs []*Package) int {
-	var n int
-	Visit(pkgs, nil, func(pkg *Package) {
-		for _, err := range pkg.Errors {
-			fmt.Fprintln(os.Stderr, err)
-			n++
-		}
-	})
-	return n
-}

+ 0 - 46
vendor/golang.org/x/tools/go/types/typeutil/callee.go

@@ -1,46 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeutil
-
-import (
-	"go/ast"
-	"go/types"
-
-	"golang.org/x/tools/go/ast/astutil"
-)
-
-// Callee returns the named target of a function call, if any:
-// a function, method, builtin, or variable.
-func Callee(info *types.Info, call *ast.CallExpr) types.Object {
-	var obj types.Object
-	switch fun := astutil.Unparen(call.Fun).(type) {
-	case *ast.Ident:
-		obj = info.Uses[fun] // type, var, builtin, or declared func
-	case *ast.SelectorExpr:
-		if sel, ok := info.Selections[fun]; ok {
-			obj = sel.Obj() // method or field
-		} else {
-			obj = info.Uses[fun.Sel] // qualified identifier?
-		}
-	}
-	if _, ok := obj.(*types.TypeName); ok {
-		return nil // T(x) is a conversion, not a call
-	}
-	return obj
-}
-
-// StaticCallee returns the target (function or method) of a static
-// function call, if any. It returns nil for calls to builtins.
-func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
-	if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
-		return f
-	}
-	return nil
-}
-
-func interfaceMethod(f *types.Func) bool {
-	recv := f.Type().(*types.Signature).Recv()
-	return recv != nil && types.IsInterface(recv.Type())
-}

+ 0 - 31
vendor/golang.org/x/tools/go/types/typeutil/imports.go

@@ -1,31 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeutil
-
-import "go/types"
-
-// Dependencies returns all dependencies of the specified packages.
-//
-// Dependent packages appear in topological order: if package P imports
-// package Q, Q appears earlier than P in the result.
-// The algorithm follows import statements in the order they
-// appear in the source code, so the result is a total order.
-//
-func Dependencies(pkgs ...*types.Package) []*types.Package {
-	var result []*types.Package
-	seen := make(map[*types.Package]bool)
-	var visit func(pkgs []*types.Package)
-	visit = func(pkgs []*types.Package) {
-		for _, p := range pkgs {
-			if !seen[p] {
-				seen[p] = true
-				visit(p.Imports())
-				result = append(result, p)
-			}
-		}
-	}
-	visit(pkgs)
-	return result
-}

+ 0 - 313
vendor/golang.org/x/tools/go/types/typeutil/map.go

@@ -1,313 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package typeutil defines various utilities for types, such as Map,
-// a mapping from types.Type to interface{} values.
-package typeutil // import "golang.org/x/tools/go/types/typeutil"
-
-import (
-	"bytes"
-	"fmt"
-	"go/types"
-	"reflect"
-)
-
-// Map is a hash-table-based mapping from types (types.Type) to
-// arbitrary interface{} values.  The concrete types that implement
-// the Type interface are pointers.  Since they are not canonicalized,
-// == cannot be used to check for equivalence, and thus we cannot
-// simply use a Go map.
-//
-// Just as with map[K]V, a nil *Map is a valid empty map.
-//
-// Not thread-safe.
-//
-type Map struct {
-	hasher Hasher             // shared by many Maps
-	table  map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
-	length int                // number of map entries
-}
-
-// entry is an entry (key/value association) in a hash bucket.
-type entry struct {
-	key   types.Type
-	value interface{}
-}
-
-// SetHasher sets the hasher used by Map.
-//
-// All Hashers are functionally equivalent but contain internal state
-// used to cache the results of hashing previously seen types.
-//
-// A single Hasher created by MakeHasher() may be shared among many
-// Maps.  This is recommended if the instances have many keys in
-// common, as it will amortize the cost of hash computation.
-//
-// A Hasher may grow without bound as new types are seen.  Even when a
-// type is deleted from the map, the Hasher never shrinks, since other
-// types in the map may reference the deleted type indirectly.
-//
-// Hashers are not thread-safe, and read-only operations such as
-// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
-// read-lock) is require around all Map operations if a shared
-// hasher is accessed from multiple threads.
-//
-// If SetHasher is not called, the Map will create a private hasher at
-// the first call to Insert.
-//
-func (m *Map) SetHasher(hasher Hasher) {
-	m.hasher = hasher
-}
-
-// Delete removes the entry with the given key, if any.
-// It returns true if the entry was found.
-//
-func (m *Map) Delete(key types.Type) bool {
-	if m != nil && m.table != nil {
-		hash := m.hasher.Hash(key)
-		bucket := m.table[hash]
-		for i, e := range bucket {
-			if e.key != nil && types.Identical(key, e.key) {
-				// We can't compact the bucket as it
-				// would disturb iterators.
-				bucket[i] = entry{}
-				m.length--
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// At returns the map entry for the given key.
-// The result is nil if the entry is not present.
-//
-func (m *Map) At(key types.Type) interface{} {
-	if m != nil && m.table != nil {
-		for _, e := range m.table[m.hasher.Hash(key)] {
-			if e.key != nil && types.Identical(key, e.key) {
-				return e.value
-			}
-		}
-	}
-	return nil
-}
-
-// Set sets the map entry for key to val,
-// and returns the previous entry, if any.
-func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
-	if m.table != nil {
-		hash := m.hasher.Hash(key)
-		bucket := m.table[hash]
-		var hole *entry
-		for i, e := range bucket {
-			if e.key == nil {
-				hole = &bucket[i]
-			} else if types.Identical(key, e.key) {
-				prev = e.value
-				bucket[i].value = value
-				return
-			}
-		}
-
-		if hole != nil {
-			*hole = entry{key, value} // overwrite deleted entry
-		} else {
-			m.table[hash] = append(bucket, entry{key, value})
-		}
-	} else {
-		if m.hasher.memo == nil {
-			m.hasher = MakeHasher()
-		}
-		hash := m.hasher.Hash(key)
-		m.table = map[uint32][]entry{hash: {entry{key, value}}}
-	}
-
-	m.length++
-	return
-}
-
-// Len returns the number of map entries.
-func (m *Map) Len() int {
-	if m != nil {
-		return m.length
-	}
-	return 0
-}
-
-// Iterate calls function f on each entry in the map in unspecified order.
-//
-// If f should mutate the map, Iterate provides the same guarantees as
-// Go maps: if f deletes a map entry that Iterate has not yet reached,
-// f will not be invoked for it, but if f inserts a map entry that
-// Iterate has not yet reached, whether or not f will be invoked for
-// it is unspecified.
-//
-func (m *Map) Iterate(f func(key types.Type, value interface{})) {
-	if m != nil {
-		for _, bucket := range m.table {
-			for _, e := range bucket {
-				if e.key != nil {
-					f(e.key, e.value)
-				}
-			}
-		}
-	}
-}
-
-// Keys returns a new slice containing the set of map keys.
-// The order is unspecified.
-func (m *Map) Keys() []types.Type {
-	keys := make([]types.Type, 0, m.Len())
-	m.Iterate(func(key types.Type, _ interface{}) {
-		keys = append(keys, key)
-	})
-	return keys
-}
-
-func (m *Map) toString(values bool) string {
-	if m == nil {
-		return "{}"
-	}
-	var buf bytes.Buffer
-	fmt.Fprint(&buf, "{")
-	sep := ""
-	m.Iterate(func(key types.Type, value interface{}) {
-		fmt.Fprint(&buf, sep)
-		sep = ", "
-		fmt.Fprint(&buf, key)
-		if values {
-			fmt.Fprintf(&buf, ": %q", value)
-		}
-	})
-	fmt.Fprint(&buf, "}")
-	return buf.String()
-}
-
-// String returns a string representation of the map's entries.
-// Values are printed using fmt.Sprintf("%v", v).
-// Order is unspecified.
-//
-func (m *Map) String() string {
-	return m.toString(true)
-}
-
-// KeysString returns a string representation of the map's key set.
-// Order is unspecified.
-//
-func (m *Map) KeysString() string {
-	return m.toString(false)
-}
-
-////////////////////////////////////////////////////////////////////////
-// Hasher
-
-// A Hasher maps each type to its hash value.
-// For efficiency, a hasher uses memoization; thus its memory
-// footprint grows monotonically over time.
-// Hashers are not thread-safe.
-// Hashers have reference semantics.
-// Call MakeHasher to create a Hasher.
-type Hasher struct {
-	memo map[types.Type]uint32
-}
-
-// MakeHasher returns a new Hasher instance.
-func MakeHasher() Hasher {
-	return Hasher{make(map[types.Type]uint32)}
-}
-
-// Hash computes a hash value for the given type t such that
-// Identical(t, t') => Hash(t) == Hash(t').
-func (h Hasher) Hash(t types.Type) uint32 {
-	hash, ok := h.memo[t]
-	if !ok {
-		hash = h.hashFor(t)
-		h.memo[t] = hash
-	}
-	return hash
-}
-
-// hashString computes the Fowler–Noll–Vo hash of s.
-func hashString(s string) uint32 {
-	var h uint32
-	for i := 0; i < len(s); i++ {
-		h ^= uint32(s[i])
-		h *= 16777619
-	}
-	return h
-}
-
-// hashFor computes the hash of t.
-func (h Hasher) hashFor(t types.Type) uint32 {
-	// See Identical for rationale.
-	switch t := t.(type) {
-	case *types.Basic:
-		return uint32(t.Kind())
-
-	case *types.Array:
-		return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
-
-	case *types.Slice:
-		return 9049 + 2*h.Hash(t.Elem())
-
-	case *types.Struct:
-		var hash uint32 = 9059
-		for i, n := 0, t.NumFields(); i < n; i++ {
-			f := t.Field(i)
-			if f.Anonymous() {
-				hash += 8861
-			}
-			hash += hashString(t.Tag(i))
-			hash += hashString(f.Name()) // (ignore f.Pkg)
-			hash += h.Hash(f.Type())
-		}
-		return hash
-
-	case *types.Pointer:
-		return 9067 + 2*h.Hash(t.Elem())
-
-	case *types.Signature:
-		var hash uint32 = 9091
-		if t.Variadic() {
-			hash *= 8863
-		}
-		return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
-
-	case *types.Interface:
-		var hash uint32 = 9103
-		for i, n := 0, t.NumMethods(); i < n; i++ {
-			// See go/types.identicalMethods for rationale.
-			// Method order is not significant.
-			// Ignore m.Pkg().
-			m := t.Method(i)
-			hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
-		}
-		return hash
-
-	case *types.Map:
-		return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
-
-	case *types.Chan:
-		return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
-
-	case *types.Named:
-		// Not safe with a copying GC; objects may move.
-		return uint32(reflect.ValueOf(t.Obj()).Pointer())
-
-	case *types.Tuple:
-		return h.hashTuple(t)
-	}
-	panic(t)
-}
-
-func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
-	// See go/types.identicalTypes for rationale.
-	n := tuple.Len()
-	var hash uint32 = 9137 + 2*uint32(n)
-	for i := 0; i < n; i++ {
-		hash += 3 * h.Hash(tuple.At(i).Type())
-	}
-	return hash
-}

+ 0 - 72
vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go

@@ -1,72 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements a cache of method sets.
-
-package typeutil
-
-import (
-	"go/types"
-	"sync"
-)
-
-// A MethodSetCache records the method set of each type T for which
-// MethodSet(T) is called so that repeat queries are fast.
-// The zero value is a ready-to-use cache instance.
-type MethodSetCache struct {
-	mu     sync.Mutex
-	named  map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
-	others map[types.Type]*types.MethodSet                            // all other types
-}
-
-// MethodSet returns the method set of type T.  It is thread-safe.
-//
-// If cache is nil, this function is equivalent to types.NewMethodSet(T).
-// Utility functions can thus expose an optional *MethodSetCache
-// parameter to clients that care about performance.
-//
-func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
-	if cache == nil {
-		return types.NewMethodSet(T)
-	}
-	cache.mu.Lock()
-	defer cache.mu.Unlock()
-
-	switch T := T.(type) {
-	case *types.Named:
-		return cache.lookupNamed(T).value
-
-	case *types.Pointer:
-		if N, ok := T.Elem().(*types.Named); ok {
-			return cache.lookupNamed(N).pointer
-		}
-	}
-
-	// all other types
-	// (The map uses pointer equivalence, not type identity.)
-	mset := cache.others[T]
-	if mset == nil {
-		mset = types.NewMethodSet(T)
-		if cache.others == nil {
-			cache.others = make(map[types.Type]*types.MethodSet)
-		}
-		cache.others[T] = mset
-	}
-	return mset
-}
-
-func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
-	if cache.named == nil {
-		cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
-	}
-	// Avoid recomputing mset(*T) for each distinct Pointer
-	// instance whose underlying type is a named type.
-	msets, ok := cache.named[named]
-	if !ok {
-		msets.value = types.NewMethodSet(named)
-		msets.pointer = types.NewMethodSet(types.NewPointer(named))
-		cache.named[named] = msets
-	}
-	return msets
-}

+ 0 - 52
vendor/golang.org/x/tools/go/types/typeutil/ui.go

@@ -1,52 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeutil
-
-// This file defines utilities for user interfaces that display types.
-
-import "go/types"
-
-// IntuitiveMethodSet returns the intuitive method set of a type T,
-// which is the set of methods you can call on an addressable value of
-// that type.
-//
-// The result always contains MethodSet(T), and is exactly MethodSet(T)
-// for interface types and for pointer-to-concrete types.
-// For all other concrete types T, the result additionally
-// contains each method belonging to *T if there is no identically
-// named method on T itself.
-//
-// This corresponds to user intuition about method sets;
-// this function is intended only for user interfaces.
-//
-// The order of the result is as for types.MethodSet(T).
-//
-func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
-	isPointerToConcrete := func(T types.Type) bool {
-		ptr, ok := T.(*types.Pointer)
-		return ok && !types.IsInterface(ptr.Elem())
-	}
-
-	var result []*types.Selection
-	mset := msets.MethodSet(T)
-	if types.IsInterface(T) || isPointerToConcrete(T) {
-		for i, n := 0, mset.Len(); i < n; i++ {
-			result = append(result, mset.At(i))
-		}
-	} else {
-		// T is some other concrete type.
-		// Report methods of T and *T, preferring those of T.
-		pmset := msets.MethodSet(types.NewPointer(T))
-		for i, n := 0, pmset.Len(); i < n; i++ {
-			meth := pmset.At(i)
-			if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
-				meth = m
-			}
-			result = append(result, meth)
-		}
-
-	}
-	return result
-}

+ 0 - 21
vendor/golang.org/x/tools/internal/packagesinternal/packages.go

@@ -1,21 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package packagesinternal exposes internal-only fields from go/packages.
-package packagesinternal
-
-import (
-	"golang.org/x/tools/internal/gocommand"
-)
-
-var GetForTest = func(p interface{}) string { return "" }
-
-var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil }
-
-var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {}
-
-var TypecheckCgo int
-
-var SetModFlag = func(config interface{}, value string) {}
-var SetModFile = func(config interface{}, value string) {}

+ 0 - 1358
vendor/golang.org/x/tools/internal/typesinternal/errorcode.go

@@ -1,1358 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typesinternal
-
-//go:generate stringer -type=ErrorCode
-
-type ErrorCode int
-
-// This file defines the error codes that can be produced during type-checking.
-// Collectively, these codes provide an identifier that may be used to
-// implement special handling for certain types of errors.
-//
-// Error codes should be fine-grained enough that the exact nature of the error
-// can be easily determined, but coarse enough that they are not an
-// implementation detail of the type checking algorithm. As a rule-of-thumb,
-// errors should be considered equivalent if there is a theoretical refactoring
-// of the type checker in which they are emitted in exactly one place. For
-// example, the type checker emits different error messages for "too many
-// arguments" and "too few arguments", but one can imagine an alternative type
-// checker where this check instead just emits a single "wrong number of
-// arguments", so these errors should have the same code.
-//
-// Error code names should be as brief as possible while retaining accuracy and
-// distinctiveness. In most cases names should start with an adjective
-// describing the nature of the error (e.g. "invalid", "unused", "misplaced"),
-// and end with a noun identifying the relevant language object. For example,
-// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the
-// convention that "bad" implies a problem with syntax, and "invalid" implies a
-// problem with types.
-
-const (
-	_ ErrorCode = iota
-
-	// Test is reserved for errors that only apply while in self-test mode.
-	Test
-
-	/* package names */
-
-	// BlankPkgName occurs when a package name is the blank identifier "_".
-	//
-	// Per the spec:
-	//  "The PackageName must not be the blank identifier."
-	BlankPkgName
-
-	// MismatchedPkgName occurs when a file's package name doesn't match the
-	// package name already established by other files.
-	MismatchedPkgName
-
-	// InvalidPkgUse occurs when a package identifier is used outside of a
-	// selector expression.
-	//
-	// Example:
-	//  import "fmt"
-	//
-	//  var _ = fmt
-	InvalidPkgUse
-
-	/* imports */
-
-	// BadImportPath occurs when an import path is not valid.
-	BadImportPath
-
-	// BrokenImport occurs when importing a package fails.
-	//
-	// Example:
-	//  import "amissingpackage"
-	BrokenImport
-
-	// ImportCRenamed occurs when the special import "C" is renamed. "C" is a
-	// pseudo-package, and must not be renamed.
-	//
-	// Example:
-	//  import _ "C"
-	ImportCRenamed
-
-	// UnusedImport occurs when an import is unused.
-	//
-	// Example:
-	//  import "fmt"
-	//
-	//  func main() {}
-	UnusedImport
-
-	/* initialization */
-
-	// InvalidInitCycle occurs when an invalid cycle is detected within the
-	// initialization graph.
-	//
-	// Example:
-	//  var x int = f()
-	//
-	//  func f() int { return x }
-	InvalidInitCycle
-
-	/* decls */
-
-	// DuplicateDecl occurs when an identifier is declared multiple times.
-	//
-	// Example:
-	//  var x = 1
-	//  var x = 2
-	DuplicateDecl
-
-	// InvalidDeclCycle occurs when a declaration cycle is not valid.
-	//
-	// Example:
-	//  import "unsafe"
-	//
-	//  type T struct {
-	//  	a [n]int
-	//  }
-	//
-	//  var n = unsafe.Sizeof(T{})
-	InvalidDeclCycle
-
-	// InvalidTypeCycle occurs when a cycle in type definitions results in a
-	// type that is not well-defined.
-	//
-	// Example:
-	//  import "unsafe"
-	//
-	//  type T [unsafe.Sizeof(T{})]int
-	InvalidTypeCycle
-
-	/* decls > const */
-
-	// InvalidConstInit occurs when a const declaration has a non-constant
-	// initializer.
-	//
-	// Example:
-	//  var x int
-	//  const _ = x
-	InvalidConstInit
-
-	// InvalidConstVal occurs when a const value cannot be converted to its
-	// target type.
-	//
-	// TODO(findleyr): this error code and example are not very clear. Consider
-	// removing it.
-	//
-	// Example:
-	//  const _ = 1 << "hello"
-	InvalidConstVal
-
-	// InvalidConstType occurs when the underlying type in a const declaration
-	// is not a valid constant type.
-	//
-	// Example:
-	//  const c *int = 4
-	InvalidConstType
-
-	/* decls > var (+ other variable assignment codes) */
-
-	// UntypedNil occurs when the predeclared (untyped) value nil is used to
-	// initialize a variable declared without an explicit type.
-	//
-	// Example:
-	//  var x = nil
-	UntypedNil
-
-	// WrongAssignCount occurs when the number of values on the right-hand side
-	// of an assignment or or initialization expression does not match the number
-	// of variables on the left-hand side.
-	//
-	// Example:
-	//  var x = 1, 2
-	WrongAssignCount
-
-	// UnassignableOperand occurs when the left-hand side of an assignment is
-	// not assignable.
-	//
-	// Example:
-	//  func f() {
-	//  	const c = 1
-	//  	c = 2
-	//  }
-	UnassignableOperand
-
-	// NoNewVar occurs when a short variable declaration (':=') does not declare
-	// new variables.
-	//
-	// Example:
-	//  func f() {
-	//  	x := 1
-	//  	x := 2
-	//  }
-	NoNewVar
-
-	// MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does
-	// not have single-valued left-hand or right-hand side.
-	//
-	// Per the spec:
-	//  "In assignment operations, both the left- and right-hand expression lists
-	//  must contain exactly one single-valued expression"
-	//
-	// Example:
-	//  func f() int {
-	//  	x, y := 1, 2
-	//  	x, y += 1
-	//  	return x + y
-	//  }
-	MultiValAssignOp
-
-	// InvalidIfaceAssign occurs when a value of type T is used as an
-	// interface, but T does not implement a method of the expected interface.
-	//
-	// Example:
-	//  type I interface {
-	//  	f()
-	//  }
-	//
-	//  type T int
-	//
-	//  var x I = T(1)
-	InvalidIfaceAssign
-
-	// InvalidChanAssign occurs when a chan assignment is invalid.
-	//
-	// Per the spec, a value x is assignable to a channel type T if:
-	//  "x is a bidirectional channel value, T is a channel type, x's type V and
-	//  T have identical element types, and at least one of V or T is not a
-	//  defined type."
-	//
-	// Example:
-	//  type T1 chan int
-	//  type T2 chan int
-	//
-	//  var x T1
-	//  // Invalid assignment because both types are named
-	//  var _ T2 = x
-	InvalidChanAssign
-
-	// IncompatibleAssign occurs when the type of the right-hand side expression
-	// in an assignment cannot be assigned to the type of the variable being
-	// assigned.
-	//
-	// Example:
-	//  var x []int
-	//  var _ int = x
-	IncompatibleAssign
-
-	// UnaddressableFieldAssign occurs when trying to assign to a struct field
-	// in a map value.
-	//
-	// Example:
-	//  func f() {
-	//  	m := make(map[string]struct{i int})
-	//  	m["foo"].i = 42
-	//  }
-	UnaddressableFieldAssign
-
-	/* decls > type (+ other type expression codes) */
-
-	// NotAType occurs when the identifier used as the underlying type in a type
-	// declaration or the right-hand side of a type alias does not denote a type.
-	//
-	// Example:
-	//  var S = 2
-	//
-	//  type T S
-	NotAType
-
-	// InvalidArrayLen occurs when an array length is not a constant value.
-	//
-	// Example:
-	//  var n = 3
-	//  var _ = [n]int{}
-	InvalidArrayLen
-
-	// BlankIfaceMethod occurs when a method name is '_'.
-	//
-	// Per the spec:
-	//  "The name of each explicitly specified method must be unique and not
-	//  blank."
-	//
-	// Example:
-	//  type T interface {
-	//  	_(int)
-	//  }
-	BlankIfaceMethod
-
-	// IncomparableMapKey occurs when a map key type does not support the == and
-	// != operators.
-	//
-	// Per the spec:
-	//  "The comparison operators == and != must be fully defined for operands of
-	//  the key type; thus the key type must not be a function, map, or slice."
-	//
-	// Example:
-	//  var x map[T]int
-	//
-	//  type T []int
-	IncomparableMapKey
-
-	// InvalidIfaceEmbed occurs when a non-interface type is embedded in an
-	// interface.
-	//
-	// Example:
-	//  type T struct {}
-	//
-	//  func (T) m()
-	//
-	//  type I interface {
-	//  	T
-	//  }
-	InvalidIfaceEmbed
-
-	// InvalidPtrEmbed occurs when an embedded field is of the pointer form *T,
-	// and T itself is itself a pointer, an unsafe.Pointer, or an interface.
-	//
-	// Per the spec:
-	//  "An embedded field must be specified as a type name T or as a pointer to
-	//  a non-interface type name *T, and T itself may not be a pointer type."
-	//
-	// Example:
-	//  type T *int
-	//
-	//  type S struct {
-	//  	*T
-	//  }
-	InvalidPtrEmbed
-
-	/* decls > func and method */
-
-	// BadRecv occurs when a method declaration does not have exactly one
-	// receiver parameter.
-	//
-	// Example:
-	//  func () _() {}
-	BadRecv
-
-	// InvalidRecv occurs when a receiver type expression is not of the form T
-	// or *T, or T is a pointer type.
-	//
-	// Example:
-	//  type T struct {}
-	//
-	//  func (**T) m() {}
-	InvalidRecv
-
-	// DuplicateFieldAndMethod occurs when an identifier appears as both a field
-	// and method name.
-	//
-	// Example:
-	//  type T struct {
-	//  	m int
-	//  }
-	//
-	//  func (T) m() {}
-	DuplicateFieldAndMethod
-
-	// DuplicateMethod occurs when two methods on the same receiver type have
-	// the same name.
-	//
-	// Example:
-	//  type T struct {}
-	//  func (T) m() {}
-	//  func (T) m(i int) int { return i }
-	DuplicateMethod
-
-	/* decls > special */
-
-	// InvalidBlank occurs when a blank identifier is used as a value or type.
-	//
-	// Per the spec:
-	//  "The blank identifier may appear as an operand only on the left-hand side
-	//  of an assignment."
-	//
-	// Example:
-	//  var x = _
-	InvalidBlank
-
-	// InvalidIota occurs when the predeclared identifier iota is used outside
-	// of a constant declaration.
-	//
-	// Example:
-	//  var x = iota
-	InvalidIota
-
-	// MissingInitBody occurs when an init function is missing its body.
-	//
-	// Example:
-	//  func init()
-	MissingInitBody
-
-	// InvalidInitSig occurs when an init function declares parameters or
-	// results.
-	//
-	// Example:
-	//  func init() int { return 1 }
-	InvalidInitSig
-
-	// InvalidInitDecl occurs when init is declared as anything other than a
-	// function.
-	//
-	// Example:
-	//  var init = 1
-	InvalidInitDecl
-
-	// InvalidMainDecl occurs when main is declared as anything other than a
-	// function, in a main package.
-	InvalidMainDecl
-
-	/* exprs */
-
-	// TooManyValues occurs when a function returns too many values for the
-	// expression context in which it is used.
-	//
-	// Example:
-	//  func ReturnTwo() (int, int) {
-	//  	return 1, 2
-	//  }
-	//
-	//  var x = ReturnTwo()
-	TooManyValues
-
-	// NotAnExpr occurs when a type expression is used where a value expression
-	// is expected.
-	//
-	// Example:
-	//  type T struct {}
-	//
-	//  func f() {
-	//  	T
-	//  }
-	NotAnExpr
-
-	/* exprs > const */
-
-	// TruncatedFloat occurs when a float constant is truncated to an integer
-	// value.
-	//
-	// Example:
-	//  var _ int = 98.6
-	TruncatedFloat
-
-	// NumericOverflow occurs when a numeric constant overflows its target type.
-	//
-	// Example:
-	//  var x int8 = 1000
-	NumericOverflow
-
-	/* exprs > operation */
-
-	// UndefinedOp occurs when an operator is not defined for the type(s) used
-	// in an operation.
-	//
-	// Example:
-	//  var c = "a" - "b"
-	UndefinedOp
-
-	// MismatchedTypes occurs when operand types are incompatible in a binary
-	// operation.
-	//
-	// Example:
-	//  var a = "hello"
-	//  var b = 1
-	//  var c = a - b
-	MismatchedTypes
-
-	// DivByZero occurs when a division operation is provable at compile
-	// time to be a division by zero.
-	//
-	// Example:
-	//  const divisor = 0
-	//  var x int = 1/divisor
-	DivByZero
-
-	// NonNumericIncDec occurs when an increment or decrement operator is
-	// applied to a non-numeric value.
-	//
-	// Example:
-	//  func f() {
-	//  	var c = "c"
-	//  	c++
-	//  }
-	NonNumericIncDec
-
-	/* exprs > ptr */
-
-	// UnaddressableOperand occurs when the & operator is applied to an
-	// unaddressable expression.
-	//
-	// Example:
-	//  var x = &1
-	UnaddressableOperand
-
-	// InvalidIndirection occurs when a non-pointer value is indirected via the
-	// '*' operator.
-	//
-	// Example:
-	//  var x int
-	//  var y = *x
-	InvalidIndirection
-
-	/* exprs > [] */
-
-	// NonIndexableOperand occurs when an index operation is applied to a value
-	// that cannot be indexed.
-	//
-	// Example:
-	//  var x = 1
-	//  var y = x[1]
-	NonIndexableOperand
-
-	// InvalidIndex occurs when an index argument is not of integer type,
-	// negative, or out-of-bounds.
-	//
-	// Example:
-	//  var s = [...]int{1,2,3}
-	//  var x = s[5]
-	//
-	// Example:
-	//  var s = []int{1,2,3}
-	//  var _ = s[-1]
-	//
-	// Example:
-	//  var s = []int{1,2,3}
-	//  var i string
-	//  var _ = s[i]
-	InvalidIndex
-
-	// SwappedSliceIndices occurs when constant indices in a slice expression
-	// are decreasing in value.
-	//
-	// Example:
-	//  var _ = []int{1,2,3}[2:1]
-	SwappedSliceIndices
-
-	/* operators > slice */
-
-	// NonSliceableOperand occurs when a slice operation is applied to a value
-	// whose type is not sliceable, or is unaddressable.
-	//
-	// Example:
-	//  var x = [...]int{1, 2, 3}[:1]
-	//
-	// Example:
-	//  var x = 1
-	//  var y = 1[:1]
-	NonSliceableOperand
-
-	// InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is
-	// applied to a string.
-	//
-	// Example:
-	//  var s = "hello"
-	//  var x = s[1:2:3]
-	InvalidSliceExpr
-
-	/* exprs > shift */
-
-	// InvalidShiftCount occurs when the right-hand side of a shift operation is
-	// either non-integer, negative, or too large.
-	//
-	// Example:
-	//  var (
-	//  	x string
-	//  	y int = 1 << x
-	//  )
-	InvalidShiftCount
-
-	// InvalidShiftOperand occurs when the shifted operand is not an integer.
-	//
-	// Example:
-	//  var s = "hello"
-	//  var x = s << 2
-	InvalidShiftOperand
-
-	/* exprs > chan */
-
-	// InvalidReceive occurs when there is a channel receive from a value that
-	// is either not a channel, or is a send-only channel.
-	//
-	// Example:
-	//  func f() {
-	//  	var x = 1
-	//  	<-x
-	//  }
-	InvalidReceive
-
-	// InvalidSend occurs when there is a channel send to a value that is not a
-	// channel, or is a receive-only channel.
-	//
-	// Example:
-	//  func f() {
-	//  	var x = 1
-	//  	x <- "hello!"
-	//  }
-	InvalidSend
-
-	/* exprs > literal */
-
-	// DuplicateLitKey occurs when an index is duplicated in a slice, array, or
-	// map literal.
-	//
-	// Example:
-	//  var _ = []int{0:1, 0:2}
-	//
-	// Example:
-	//  var _ = map[string]int{"a": 1, "a": 2}
-	DuplicateLitKey
-
-	// MissingLitKey occurs when a map literal is missing a key expression.
-	//
-	// Example:
-	//  var _ = map[string]int{1}
-	MissingLitKey
-
-	// InvalidLitIndex occurs when the key in a key-value element of a slice or
-	// array literal is not an integer constant.
-	//
-	// Example:
-	//  var i = 0
-	//  var x = []string{i: "world"}
-	InvalidLitIndex
-
-	// OversizeArrayLit occurs when an array literal exceeds its length.
-	//
-	// Example:
-	//  var _ = [2]int{1,2,3}
-	OversizeArrayLit
-
-	// MixedStructLit occurs when a struct literal contains a mix of positional
-	// and named elements.
-	//
-	// Example:
-	//  var _ = struct{i, j int}{i: 1, 2}
-	MixedStructLit
-
-	// InvalidStructLit occurs when a positional struct literal has an incorrect
-	// number of values.
-	//
-	// Example:
-	//  var _ = struct{i, j int}{1,2,3}
-	InvalidStructLit
-
-	// MissingLitField occurs when a struct literal refers to a field that does
-	// not exist on the struct type.
-	//
-	// Example:
-	//  var _ = struct{i int}{j: 2}
-	MissingLitField
-
-	// DuplicateLitField occurs when a struct literal contains duplicated
-	// fields.
-	//
-	// Example:
-	//  var _ = struct{i int}{i: 1, i: 2}
-	DuplicateLitField
-
-	// UnexportedLitField occurs when a positional struct literal implicitly
-	// assigns an unexported field of an imported type.
-	UnexportedLitField
-
-	// InvalidLitField occurs when a field name is not a valid identifier.
-	//
-	// Example:
-	//  var _ = struct{i int}{1: 1}
-	InvalidLitField
-
-	// UntypedLit occurs when a composite literal omits a required type
-	// identifier.
-	//
-	// Example:
-	//  type outer struct{
-	//  	inner struct { i int }
-	//  }
-	//
-	//  var _ = outer{inner: {1}}
-	UntypedLit
-
-	// InvalidLit occurs when a composite literal expression does not match its
-	// type.
-	//
-	// Example:
-	//  type P *struct{
-	//  	x int
-	//  }
-	//  var _ = P {}
-	InvalidLit
-
-	/* exprs > selector */
-
-	// AmbiguousSelector occurs when a selector is ambiguous.
-	//
-	// Example:
-	//  type E1 struct { i int }
-	//  type E2 struct { i int }
-	//  type T struct { E1; E2 }
-	//
-	//  var x T
-	//  var _ = x.i
-	AmbiguousSelector
-
-	// UndeclaredImportedName occurs when a package-qualified identifier is
-	// undeclared by the imported package.
-	//
-	// Example:
-	//  import "go/types"
-	//
-	//  var _ = types.NotAnActualIdentifier
-	UndeclaredImportedName
-
-	// UnexportedName occurs when a selector refers to an unexported identifier
-	// of an imported package.
-	//
-	// Example:
-	//  import "reflect"
-	//
-	//  type _ reflect.flag
-	UnexportedName
-
-	// UndeclaredName occurs when an identifier is not declared in the current
-	// scope.
-	//
-	// Example:
-	//  var x T
-	UndeclaredName
-
-	// MissingFieldOrMethod occurs when a selector references a field or method
-	// that does not exist.
-	//
-	// Example:
-	//  type T struct {}
-	//
-	//  var x = T{}.f
-	MissingFieldOrMethod
-
-	/* exprs > ... */
-
-	// BadDotDotDotSyntax occurs when a "..." occurs in a context where it is
-	// not valid.
-	//
-	// Example:
-	//  var _ = map[int][...]int{0: {}}
-	BadDotDotDotSyntax
-
-	// NonVariadicDotDotDot occurs when a "..." is used on the final argument to
-	// a non-variadic function.
-	//
-	// Example:
-	//  func printArgs(s []string) {
-	//  	for _, a := range s {
-	//  		println(a)
-	//  	}
-	//  }
-	//
-	//  func f() {
-	//  	s := []string{"a", "b", "c"}
-	//  	printArgs(s...)
-	//  }
-	NonVariadicDotDotDot
-
-	// MisplacedDotDotDot occurs when a "..." is used somewhere other than the
-	// final argument to a function call.
-	//
-	// Example:
-	//  func printArgs(args ...int) {
-	//  	for _, a := range args {
-	//  		println(a)
-	//  	}
-	//  }
-	//
-	//  func f() {
-	//  	a := []int{1,2,3}
-	//  	printArgs(0, a...)
-	//  }
-	MisplacedDotDotDot
-
-	// InvalidDotDotDotOperand occurs when a "..." operator is applied to a
-	// single-valued operand.
-	//
-	// Example:
-	//  func printArgs(args ...int) {
-	//  	for _, a := range args {
-	//  		println(a)
-	//  	}
-	//  }
-	//
-	//  func f() {
-	//  	a := 1
-	//  	printArgs(a...)
-	//  }
-	//
-	// Example:
-	//  func args() (int, int) {
-	//  	return 1, 2
-	//  }
-	//
-	//  func printArgs(args ...int) {
-	//  	for _, a := range args {
-	//  		println(a)
-	//  	}
-	//  }
-	//
-	//  func g() {
-	//  	printArgs(args()...)
-	//  }
-	InvalidDotDotDotOperand
-
-	// InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in
-	// function.
-	//
-	// Example:
-	//  var s = []int{1, 2, 3}
-	//  var l = len(s...)
-	InvalidDotDotDot
-
-	/* exprs > built-in */
-
-	// UncalledBuiltin occurs when a built-in function is used as a
-	// function-valued expression, instead of being called.
-	//
-	// Per the spec:
-	//  "The built-in functions do not have standard Go types, so they can only
-	//  appear in call expressions; they cannot be used as function values."
-	//
-	// Example:
-	//  var _ = copy
-	UncalledBuiltin
-
-	// InvalidAppend occurs when append is called with a first argument that is
-	// not a slice.
-	//
-	// Example:
-	//  var _ = append(1, 2)
-	InvalidAppend
-
-	// InvalidCap occurs when an argument to the cap built-in function is not of
-	// supported type.
-	//
-	// See https://golang.org/ref/spec#Lengthand_capacity for information on
-	// which underlying types are supported as arguments to cap and len.
-	//
-	// Example:
-	//  var s = 2
-	//  var x = cap(s)
-	InvalidCap
-
-	// InvalidClose occurs when close(...) is called with an argument that is
-	// not of channel type, or that is a receive-only channel.
-	//
-	// Example:
-	//  func f() {
-	//  	var x int
-	//  	close(x)
-	//  }
-	InvalidClose
-
-	// InvalidCopy occurs when the arguments are not of slice type or do not
-	// have compatible type.
-	//
-	// See https://golang.org/ref/spec#Appendingand_copying_slices for more
-	// information on the type requirements for the copy built-in.
-	//
-	// Example:
-	//  func f() {
-	//  	var x []int
-	//  	y := []int64{1,2,3}
-	//  	copy(x, y)
-	//  }
-	InvalidCopy
-
-	// InvalidComplex occurs when the complex built-in function is called with
-	// arguments with incompatible types.
-	//
-	// Example:
-	//  var _ = complex(float32(1), float64(2))
-	InvalidComplex
-
-	// InvalidDelete occurs when the delete built-in function is called with a
-	// first argument that is not a map.
-	//
-	// Example:
-	//  func f() {
-	//  	m := "hello"
-	//  	delete(m, "e")
-	//  }
-	InvalidDelete
-
-	// InvalidImag occurs when the imag built-in function is called with an
-	// argument that does not have complex type.
-	//
-	// Example:
-	//  var _ = imag(int(1))
-	InvalidImag
-
-	// InvalidLen occurs when an argument to the len built-in function is not of
-	// supported type.
-	//
-	// See https://golang.org/ref/spec#Lengthand_capacity for information on
-	// which underlying types are supported as arguments to cap and len.
-	//
-	// Example:
-	//  var s = 2
-	//  var x = len(s)
-	InvalidLen
-
-	// SwappedMakeArgs occurs when make is called with three arguments, and its
-	// length argument is larger than its capacity argument.
-	//
-	// Example:
-	//  var x = make([]int, 3, 2)
-	SwappedMakeArgs
-
-	// InvalidMake occurs when make is called with an unsupported type argument.
-	//
-	// See https://golang.org/ref/spec#Makingslices_maps_and_channels for
-	// information on the types that may be created using make.
-	//
-	// Example:
-	//  var x = make(int)
-	InvalidMake
-
-	// InvalidReal occurs when the real built-in function is called with an
-	// argument that does not have complex type.
-	//
-	// Example:
-	//  var _ = real(int(1))
-	InvalidReal
-
-	/* exprs > assertion */
-
-	// InvalidAssert occurs when a type assertion is applied to a
-	// value that is not of interface type.
-	//
-	// Example:
-	//  var x = 1
-	//  var _ = x.(float64)
-	InvalidAssert
-
-	// ImpossibleAssert occurs for a type assertion x.(T) when the value x of
-	// interface cannot have dynamic type T, due to a missing or mismatching
-	// method on T.
-	//
-	// Example:
-	//  type T int
-	//
-	//  func (t *T) m() int { return int(*t) }
-	//
-	//  type I interface { m() int }
-	//
-	//  var x I
-	//  var _ = x.(T)
-	ImpossibleAssert
-
-	/* exprs > conversion */
-
-	// InvalidConversion occurs when the argument type cannot be converted to the
-	// target.
-	//
-	// See https://golang.org/ref/spec#Conversions for the rules of
-	// convertibility.
-	//
-	// Example:
-	//  var x float64
-	//  var _ = string(x)
-	InvalidConversion
-
-	// InvalidUntypedConversion occurs when an there is no valid implicit
-	// conversion from an untyped value satisfying the type constraints of the
-	// context in which it is used.
-	//
-	// Example:
-	//  var _ = 1 + ""
-	InvalidUntypedConversion
-
-	/* offsetof */
-
-	// BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument
-	// that is not a selector expression.
-	//
-	// Example:
-	//  import "unsafe"
-	//
-	//  var x int
-	//  var _ = unsafe.Offsetof(x)
-	BadOffsetofSyntax
-
-	// InvalidOffsetof occurs when unsafe.Offsetof is called with a method
-	// selector, rather than a field selector, or when the field is embedded via
-	// a pointer.
-	//
-	// Per the spec:
-	//
-	//  "If f is an embedded field, it must be reachable without pointer
-	//  indirections through fields of the struct. "
-	//
-	// Example:
-	//  import "unsafe"
-	//
-	//  type T struct { f int }
-	//  type S struct { *T }
-	//  var s S
-	//  var _ = unsafe.Offsetof(s.f)
-	//
-	// Example:
-	//  import "unsafe"
-	//
-	//  type S struct{}
-	//
-	//  func (S) m() {}
-	//
-	//  var s S
-	//  var _ = unsafe.Offsetof(s.m)
-	InvalidOffsetof
-
-	/* control flow > scope */
-
-	// UnusedExpr occurs when a side-effect free expression is used as a
-	// statement. Such a statement has no effect.
-	//
-	// Example:
-	//  func f(i int) {
-	//  	i*i
-	//  }
-	UnusedExpr
-
-	// UnusedVar occurs when a variable is declared but unused.
-	//
-	// Example:
-	//  func f() {
-	//  	x := 1
-	//  }
-	UnusedVar
-
-	// MissingReturn occurs when a function with results is missing a return
-	// statement.
-	//
-	// Example:
-	//  func f() int {}
-	MissingReturn
-
-	// WrongResultCount occurs when a return statement returns an incorrect
-	// number of values.
-	//
-	// Example:
-	//  func ReturnOne() int {
-	//  	return 1, 2
-	//  }
-	WrongResultCount
-
-	// OutOfScopeResult occurs when the name of a value implicitly returned by
-	// an empty return statement is shadowed in a nested scope.
-	//
-	// Example:
-	//  func factor(n int) (i int) {
-	//  	for i := 2; i < n; i++ {
-	//  		if n%i == 0 {
-	//  			return
-	//  		}
-	//  	}
-	//  	return 0
-	//  }
-	OutOfScopeResult
-
-	/* control flow > if */
-
-	// InvalidCond occurs when an if condition is not a boolean expression.
-	//
-	// Example:
-	//  func checkReturn(i int) {
-	//  	if i {
-	//  		panic("non-zero return")
-	//  	}
-	//  }
-	InvalidCond
-
-	/* control flow > for */
-
-	// InvalidPostDecl occurs when there is a declaration in a for-loop post
-	// statement.
-	//
-	// Example:
-	//  func f() {
-	//  	for i := 0; i < 10; j := 0 {}
-	//  }
-	InvalidPostDecl
-
-	// InvalidChanRange occurs when a send-only channel used in a range
-	// expression.
-	//
-	// Example:
-	//  func sum(c chan<- int) {
-	//  	s := 0
-	//  	for i := range c {
-	//  		s += i
-	//  	}
-	//  }
-	InvalidChanRange
-
-	// InvalidIterVar occurs when two iteration variables are used while ranging
-	// over a channel.
-	//
-	// Example:
-	//  func f(c chan int) {
-	//  	for k, v := range c {
-	//  		println(k, v)
-	//  	}
-	//  }
-	InvalidIterVar
-
-	// InvalidRangeExpr occurs when the type of a range expression is not array,
-	// slice, string, map, or channel.
-	//
-	// Example:
-	//  func f(i int) {
-	//  	for j := range i {
-	//  		println(j)
-	//  	}
-	//  }
-	InvalidRangeExpr
-
-	/* control flow > switch */
-
-	// MisplacedBreak occurs when a break statement is not within a for, switch,
-	// or select statement of the innermost function definition.
-	//
-	// Example:
-	//  func f() {
-	//  	break
-	//  }
-	MisplacedBreak
-
-	// MisplacedContinue occurs when a continue statement is not within a for
-	// loop of the innermost function definition.
-	//
-	// Example:
-	//  func sumeven(n int) int {
-	//  	proceed := func() {
-	//  		continue
-	//  	}
-	//  	sum := 0
-	//  	for i := 1; i <= n; i++ {
-	//  		if i % 2 != 0 {
-	//  			proceed()
-	//  		}
-	//  		sum += i
-	//  	}
-	//  	return sum
-	//  }
-	MisplacedContinue
-
-	// MisplacedFallthrough occurs when a fallthrough statement is not within an
-	// expression switch.
-	//
-	// Example:
-	//  func typename(i interface{}) string {
-	//  	switch i.(type) {
-	//  	case int64:
-	//  		fallthrough
-	//  	case int:
-	//  		return "int"
-	//  	}
-	//  	return "unsupported"
-	//  }
-	MisplacedFallthrough
-
-	// DuplicateCase occurs when a type or expression switch has duplicate
-	// cases.
-	//
-	// Example:
-	//  func printInt(i int) {
-	//  	switch i {
-	//  	case 1:
-	//  		println("one")
-	//  	case 1:
-	//  		println("One")
-	//  	}
-	//  }
-	DuplicateCase
-
-	// DuplicateDefault occurs when a type or expression switch has multiple
-	// default clauses.
-	//
-	// Example:
-	//  func printInt(i int) {
-	//  	switch i {
-	//  	case 1:
-	//  		println("one")
-	//  	default:
-	//  		println("One")
-	//  	default:
-	//  		println("1")
-	//  	}
-	//  }
-	DuplicateDefault
-
-	// BadTypeKeyword occurs when a .(type) expression is used anywhere other
-	// than a type switch.
-	//
-	// Example:
-	//  type I interface {
-	//  	m()
-	//  }
-	//  var t I
-	//  var _ = t.(type)
-	BadTypeKeyword
-
-	// InvalidTypeSwitch occurs when .(type) is used on an expression that is
-	// not of interface type.
-	//
-	// Example:
-	//  func f(i int) {
-	//  	switch x := i.(type) {}
-	//  }
-	InvalidTypeSwitch
-
-	/* control flow > select */
-
-	// InvalidSelectCase occurs when a select case is not a channel send or
-	// receive.
-	//
-	// Example:
-	//  func checkChan(c <-chan int) bool {
-	//  	select {
-	//  	case c:
-	//  		return true
-	//  	default:
-	//  		return false
-	//  	}
-	//  }
-	InvalidSelectCase
-
-	/* control flow > labels and jumps */
-
-	// UndeclaredLabel occurs when an undeclared label is jumped to.
-	//
-	// Example:
-	//  func f() {
-	//  	goto L
-	//  }
-	UndeclaredLabel
-
-	// DuplicateLabel occurs when a label is declared more than once.
-	//
-	// Example:
-	//  func f() int {
-	//  L:
-	//  L:
-	//  	return 1
-	//  }
-	DuplicateLabel
-
-	// MisplacedLabel occurs when a break or continue label is not on a for,
-	// switch, or select statement.
-	//
-	// Example:
-	//  func f() {
-	//  L:
-	//  	a := []int{1,2,3}
-	//  	for _, e := range a {
-	//  		if e > 10 {
-	//  			break L
-	//  		}
-	//  		println(a)
-	//  	}
-	//  }
-	MisplacedLabel
-
-	// UnusedLabel occurs when a label is declared but not used.
-	//
-	// Example:
-	//  func f() {
-	//  L:
-	//  }
-	UnusedLabel
-
-	// JumpOverDecl occurs when a label jumps over a variable declaration.
-	//
-	// Example:
-	//  func f() int {
-	//  	goto L
-	//  	x := 2
-	//  L:
-	//  	x++
-	//  	return x
-	//  }
-	JumpOverDecl
-
-	// JumpIntoBlock occurs when a forward jump goes to a label inside a nested
-	// block.
-	//
-	// Example:
-	//  func f(x int) {
-	//  	goto L
-	//  	if x > 0 {
-	//  	L:
-	//  		print("inside block")
-	//  	}
-	// }
-	JumpIntoBlock
-
-	/* control flow > calls */
-
-	// InvalidMethodExpr occurs when a pointer method is called but the argument
-	// is not addressable.
-	//
-	// Example:
-	//  type T struct {}
-	//
-	//  func (*T) m() int { return 1 }
-	//
-	//  var _ = T.m(T{})
-	InvalidMethodExpr
-
-	// WrongArgCount occurs when too few or too many arguments are passed by a
-	// function call.
-	//
-	// Example:
-	//  func f(i int) {}
-	//  var x = f()
-	WrongArgCount
-
-	// InvalidCall occurs when an expression is called that is not of function
-	// type.
-	//
-	// Example:
-	//  var x = "x"
-	//  var y = x()
-	InvalidCall
-
-	/* control flow > suspended */
-
-	// UnusedResults occurs when a restricted expression-only built-in function
-	// is suspended via go or defer. Such a suspension discards the results of
-	// these side-effect free built-in functions, and therefore is ineffectual.
-	//
-	// Example:
-	//  func f(a []int) int {
-	//  	defer len(a)
-	//  	return i
-	//  }
-	UnusedResults
-
-	// InvalidDefer occurs when a deferred expression is not a function call,
-	// for example if the expression is a type conversion.
-	//
-	// Example:
-	//  func f(i int) int {
-	//  	defer int32(i)
-	//  	return i
-	//  }
-	InvalidDefer
-
-	// InvalidGo occurs when a go expression is not a function call, for example
-	// if the expression is a type conversion.
-	//
-	// Example:
-	//  func f(i int) int {
-	//  	go int32(i)
-	//  	return i
-	//  }
-	InvalidGo
-)

+ 0 - 152
vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go

@@ -1,152 +0,0 @@
-// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT.
-
-package typesinternal
-
-import "strconv"
-
-func _() {
-	// An "invalid array index" compiler error signifies that the constant values have changed.
-	// Re-run the stringer command to generate them again.
-	var x [1]struct{}
-	_ = x[Test-1]
-	_ = x[BlankPkgName-2]
-	_ = x[MismatchedPkgName-3]
-	_ = x[InvalidPkgUse-4]
-	_ = x[BadImportPath-5]
-	_ = x[BrokenImport-6]
-	_ = x[ImportCRenamed-7]
-	_ = x[UnusedImport-8]
-	_ = x[InvalidInitCycle-9]
-	_ = x[DuplicateDecl-10]
-	_ = x[InvalidDeclCycle-11]
-	_ = x[InvalidTypeCycle-12]
-	_ = x[InvalidConstInit-13]
-	_ = x[InvalidConstVal-14]
-	_ = x[InvalidConstType-15]
-	_ = x[UntypedNil-16]
-	_ = x[WrongAssignCount-17]
-	_ = x[UnassignableOperand-18]
-	_ = x[NoNewVar-19]
-	_ = x[MultiValAssignOp-20]
-	_ = x[InvalidIfaceAssign-21]
-	_ = x[InvalidChanAssign-22]
-	_ = x[IncompatibleAssign-23]
-	_ = x[UnaddressableFieldAssign-24]
-	_ = x[NotAType-25]
-	_ = x[InvalidArrayLen-26]
-	_ = x[BlankIfaceMethod-27]
-	_ = x[IncomparableMapKey-28]
-	_ = x[InvalidIfaceEmbed-29]
-	_ = x[InvalidPtrEmbed-30]
-	_ = x[BadRecv-31]
-	_ = x[InvalidRecv-32]
-	_ = x[DuplicateFieldAndMethod-33]
-	_ = x[DuplicateMethod-34]
-	_ = x[InvalidBlank-35]
-	_ = x[InvalidIota-36]
-	_ = x[MissingInitBody-37]
-	_ = x[InvalidInitSig-38]
-	_ = x[InvalidInitDecl-39]
-	_ = x[InvalidMainDecl-40]
-	_ = x[TooManyValues-41]
-	_ = x[NotAnExpr-42]
-	_ = x[TruncatedFloat-43]
-	_ = x[NumericOverflow-44]
-	_ = x[UndefinedOp-45]
-	_ = x[MismatchedTypes-46]
-	_ = x[DivByZero-47]
-	_ = x[NonNumericIncDec-48]
-	_ = x[UnaddressableOperand-49]
-	_ = x[InvalidIndirection-50]
-	_ = x[NonIndexableOperand-51]
-	_ = x[InvalidIndex-52]
-	_ = x[SwappedSliceIndices-53]
-	_ = x[NonSliceableOperand-54]
-	_ = x[InvalidSliceExpr-55]
-	_ = x[InvalidShiftCount-56]
-	_ = x[InvalidShiftOperand-57]
-	_ = x[InvalidReceive-58]
-	_ = x[InvalidSend-59]
-	_ = x[DuplicateLitKey-60]
-	_ = x[MissingLitKey-61]
-	_ = x[InvalidLitIndex-62]
-	_ = x[OversizeArrayLit-63]
-	_ = x[MixedStructLit-64]
-	_ = x[InvalidStructLit-65]
-	_ = x[MissingLitField-66]
-	_ = x[DuplicateLitField-67]
-	_ = x[UnexportedLitField-68]
-	_ = x[InvalidLitField-69]
-	_ = x[UntypedLit-70]
-	_ = x[InvalidLit-71]
-	_ = x[AmbiguousSelector-72]
-	_ = x[UndeclaredImportedName-73]
-	_ = x[UnexportedName-74]
-	_ = x[UndeclaredName-75]
-	_ = x[MissingFieldOrMethod-76]
-	_ = x[BadDotDotDotSyntax-77]
-	_ = x[NonVariadicDotDotDot-78]
-	_ = x[MisplacedDotDotDot-79]
-	_ = x[InvalidDotDotDotOperand-80]
-	_ = x[InvalidDotDotDot-81]
-	_ = x[UncalledBuiltin-82]
-	_ = x[InvalidAppend-83]
-	_ = x[InvalidCap-84]
-	_ = x[InvalidClose-85]
-	_ = x[InvalidCopy-86]
-	_ = x[InvalidComplex-87]
-	_ = x[InvalidDelete-88]
-	_ = x[InvalidImag-89]
-	_ = x[InvalidLen-90]
-	_ = x[SwappedMakeArgs-91]
-	_ = x[InvalidMake-92]
-	_ = x[InvalidReal-93]
-	_ = x[InvalidAssert-94]
-	_ = x[ImpossibleAssert-95]
-	_ = x[InvalidConversion-96]
-	_ = x[InvalidUntypedConversion-97]
-	_ = x[BadOffsetofSyntax-98]
-	_ = x[InvalidOffsetof-99]
-	_ = x[UnusedExpr-100]
-	_ = x[UnusedVar-101]
-	_ = x[MissingReturn-102]
-	_ = x[WrongResultCount-103]
-	_ = x[OutOfScopeResult-104]
-	_ = x[InvalidCond-105]
-	_ = x[InvalidPostDecl-106]
-	_ = x[InvalidChanRange-107]
-	_ = x[InvalidIterVar-108]
-	_ = x[InvalidRangeExpr-109]
-	_ = x[MisplacedBreak-110]
-	_ = x[MisplacedContinue-111]
-	_ = x[MisplacedFallthrough-112]
-	_ = x[DuplicateCase-113]
-	_ = x[DuplicateDefault-114]
-	_ = x[BadTypeKeyword-115]
-	_ = x[InvalidTypeSwitch-116]
-	_ = x[InvalidSelectCase-117]
-	_ = x[UndeclaredLabel-118]
-	_ = x[DuplicateLabel-119]
-	_ = x[MisplacedLabel-120]
-	_ = x[UnusedLabel-121]
-	_ = x[JumpOverDecl-122]
-	_ = x[JumpIntoBlock-123]
-	_ = x[InvalidMethodExpr-124]
-	_ = x[WrongArgCount-125]
-	_ = x[InvalidCall-126]
-	_ = x[UnusedResults-127]
-	_ = x[InvalidDefer-128]
-	_ = x[InvalidGo-129]
-}
-
-const _ErrorCode_name = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGo"
-
-var _ErrorCode_index = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 215, 231, 250, 258, 274, 292, 309, 327, 351, 359, 374, 390, 408, 425, 440, 447, 458, 481, 496, 508, 519, 534, 548, 563, 578, 591, 600, 614, 629, 640, 655, 664, 680, 700, 718, 737, 749, 768, 787, 803, 820, 839, 853, 864, 879, 892, 907, 923, 937, 953, 968, 985, 1003, 1018, 1028, 1038, 1055, 1077, 1091, 1105, 1125, 1143, 1163, 1181, 1204, 1220, 1235, 1248, 1258, 1270, 1281, 1295, 1308, 1319, 1329, 1344, 1355, 1366, 1379, 1395, 1412, 1436, 1453, 1468, 1478, 1487, 1500, 1516, 1532, 1543, 1558, 1574, 1588, 1604, 1618, 1635, 1655, 1668, 1684, 1698, 1715, 1732, 1747, 1761, 1775, 1786, 1798, 1811, 1828, 1841, 1852, 1865, 1877, 1886}
-
-func (i ErrorCode) String() string {
-	i -= 1
-	if i < 0 || i >= ErrorCode(len(_ErrorCode_index)-1) {
-		return "ErrorCode(" + strconv.FormatInt(int64(i+1), 10) + ")"
-	}
-	return _ErrorCode_name[_ErrorCode_index[i]:_ErrorCode_index[i+1]]
-}

+ 0 - 45
vendor/golang.org/x/tools/internal/typesinternal/types.go

@@ -1,45 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package typesinternal provides access to internal go/types APIs that are not
-// yet exported.
-package typesinternal
-
-import (
-	"go/token"
-	"go/types"
-	"reflect"
-	"unsafe"
-)
-
-func SetUsesCgo(conf *types.Config) bool {
-	v := reflect.ValueOf(conf).Elem()
-
-	f := v.FieldByName("go115UsesCgo")
-	if !f.IsValid() {
-		f = v.FieldByName("UsesCgo")
-		if !f.IsValid() {
-			return false
-		}
-	}
-
-	addr := unsafe.Pointer(f.UnsafeAddr())
-	*(*bool)(addr) = true
-
-	return true
-}
-
-func ReadGo116ErrorData(terr types.Error) (ErrorCode, token.Pos, token.Pos, bool) {
-	var data [3]int
-	// By coincidence all of these fields are ints, which simplifies things.
-	v := reflect.ValueOf(terr)
-	for i, name := range []string{"go116code", "go116start", "go116end"} {
-		f := v.FieldByName(name)
-		if !f.IsValid() {
-			return 0, 0, 0, false
-		}
-		data[i] = int(f.Int())
-	}
-	return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
-}

+ 0 - 20
vendor/honnef.co/go/tools/LICENSE

@@ -1,20 +0,0 @@
-Copyright (c) 2016 Dominik Honnef
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 0 - 48
vendor/honnef.co/go/tools/arg/arg.go

@@ -1,48 +0,0 @@
-package arg
-
-var args = map[string]int{
-	"(*encoding/json.Decoder).Decode.v":    0,
-	"(*encoding/json.Encoder).Encode.v":    0,
-	"(*encoding/xml.Decoder).Decode.v":     0,
-	"(*encoding/xml.Encoder).Encode.v":     0,
-	"(*sync.Pool).Put.x":                   0,
-	"(*text/template.Template).Parse.text": 0,
-	"(io.Seeker).Seek.offset":              0,
-	"(time.Time).Sub.u":                    0,
-	"append.elems":                         1,
-	"append.slice":                         0,
-	"bytes.Equal.a":                        0,
-	"bytes.Equal.b":                        1,
-	"encoding/binary.Write.data":           2,
-	"errors.New.text":                      0,
-	"fmt.Fprintf.format":                   1,
-	"fmt.Printf.format":                    0,
-	"fmt.Sprintf.a[0]":                     1,
-	"fmt.Sprintf.format":                   0,
-	"json.Marshal.v":                       0,
-	"json.Unmarshal.v":                     1,
-	"len.v":                                0,
-	"make.size[0]":                         1,
-	"make.size[1]":                         2,
-	"make.t":                               0,
-	"net/url.Parse.rawurl":                 0,
-	"os.OpenFile.flag":                     1,
-	"os/exec.Command.name":                 0,
-	"os/signal.Notify.c":                   0,
-	"regexp.Compile.expr":                  0,
-	"runtime.SetFinalizer.finalizer":       1,
-	"runtime.SetFinalizer.obj":             0,
-	"sort.Sort.data":                       0,
-	"time.Parse.layout":                    0,
-	"time.Sleep.d":                         0,
-	"xml.Marshal.v":                        0,
-	"xml.Unmarshal.v":                      1,
-}
-
-func Arg(name string) int {
-	n, ok := args[name]
-	if !ok {
-		panic("unknown argument " + name)
-	}
-	return n
-}

+ 0 - 129
vendor/honnef.co/go/tools/callgraph/callgraph.go

@@ -1,129 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-
-Package callgraph defines the call graph and various algorithms
-and utilities to operate on it.
-
-A call graph is a labelled directed graph whose nodes represent
-functions and whose edge labels represent syntactic function call
-sites.  The presence of a labelled edge (caller, site, callee)
-indicates that caller may call callee at the specified call site.
-
-A call graph is a multigraph: it may contain multiple edges (caller,
-*, callee) connecting the same pair of nodes, so long as the edges
-differ by label; this occurs when one function calls another function
-from multiple call sites.  Also, it may contain multiple edges
-(caller, site, *) that differ only by callee; this indicates a
-polymorphic call.
-
-A SOUND call graph is one that overapproximates the dynamic calling
-behaviors of the program in all possible executions.  One call graph
-is more PRECISE than another if it is a smaller overapproximation of
-the dynamic behavior.
-
-All call graphs have a synthetic root node which is responsible for
-calling main() and init().
-
-Calls to built-in functions (e.g. panic, println) are not represented
-in the call graph; they are treated like built-in operators of the
-language.
-
-*/
-package callgraph // import "honnef.co/go/tools/callgraph"
-
-// TODO(adonovan): add a function to eliminate wrappers from the
-// callgraph, preserving topology.
-// More generally, we could eliminate "uninteresting" nodes such as
-// nodes from packages we don't care about.
-
-import (
-	"fmt"
-	"go/token"
-
-	"honnef.co/go/tools/ssa"
-)
-
-// A Graph represents a call graph.
-//
-// A graph may contain nodes that are not reachable from the root.
-// If the call graph is sound, such nodes indicate unreachable
-// functions.
-//
-type Graph struct {
-	Root  *Node                   // the distinguished root node
-	Nodes map[*ssa.Function]*Node // all nodes by function
-}
-
-// New returns a new Graph with the specified root node.
-func New(root *ssa.Function) *Graph {
-	g := &Graph{Nodes: make(map[*ssa.Function]*Node)}
-	g.Root = g.CreateNode(root)
-	return g
-}
-
-// CreateNode returns the Node for fn, creating it if not present.
-func (g *Graph) CreateNode(fn *ssa.Function) *Node {
-	n, ok := g.Nodes[fn]
-	if !ok {
-		n = &Node{Func: fn, ID: len(g.Nodes)}
-		g.Nodes[fn] = n
-	}
-	return n
-}
-
-// A Node represents a node in a call graph.
-type Node struct {
-	Func *ssa.Function // the function this node represents
-	ID   int           // 0-based sequence number
-	In   []*Edge       // unordered set of incoming call edges (n.In[*].Callee == n)
-	Out  []*Edge       // unordered set of outgoing call edges (n.Out[*].Caller == n)
-}
-
-func (n *Node) String() string {
-	return fmt.Sprintf("n%d:%s", n.ID, n.Func)
-}
-
-// A Edge represents an edge in the call graph.
-//
-// Site is nil for edges originating in synthetic or intrinsic
-// functions, e.g. reflect.Call or the root of the call graph.
-type Edge struct {
-	Caller *Node
-	Site   ssa.CallInstruction
-	Callee *Node
-}
-
-func (e Edge) String() string {
-	return fmt.Sprintf("%s --> %s", e.Caller, e.Callee)
-}
-
-func (e Edge) Description() string {
-	var prefix string
-	switch e.Site.(type) {
-	case nil:
-		return "synthetic call"
-	case *ssa.Go:
-		prefix = "concurrent "
-	case *ssa.Defer:
-		prefix = "deferred "
-	}
-	return prefix + e.Site.Common().Description()
-}
-
-func (e Edge) Pos() token.Pos {
-	if e.Site == nil {
-		return token.NoPos
-	}
-	return e.Site.Pos()
-}
-
-// AddEdge adds the edge (caller, site, callee) to the call graph.
-// Elimination of duplicate edges is the caller's responsibility.
-func AddEdge(caller *Node, site ssa.CallInstruction, callee *Node) {
-	e := &Edge{caller, site, callee}
-	callee.In = append(callee.In, e)
-	caller.Out = append(caller.Out, e)
-}

+ 0 - 35
vendor/honnef.co/go/tools/callgraph/static/static.go

@@ -1,35 +0,0 @@
-// Package static computes the call graph of a Go program containing
-// only static call edges.
-package static // import "honnef.co/go/tools/callgraph/static"
-
-import (
-	"honnef.co/go/tools/callgraph"
-	"honnef.co/go/tools/ssa"
-	"honnef.co/go/tools/ssa/ssautil"
-)
-
-// CallGraph computes the call graph of the specified program
-// considering only static calls.
-//
-func CallGraph(prog *ssa.Program) *callgraph.Graph {
-	cg := callgraph.New(nil) // TODO(adonovan) eliminate concept of rooted callgraph
-
-	// TODO(adonovan): opt: use only a single pass over the ssa.Program.
-	// TODO(adonovan): opt: this is slower than RTA (perhaps because
-	// the lower precision means so many edges are allocated)!
-	for f := range ssautil.AllFunctions(prog) {
-		fnode := cg.CreateNode(f)
-		for _, b := range f.Blocks {
-			for _, instr := range b.Instrs {
-				if site, ok := instr.(ssa.CallInstruction); ok {
-					if g := site.Common().StaticCallee(); g != nil {
-						gnode := cg.CreateNode(g)
-						callgraph.AddEdge(fnode, site, gnode)
-					}
-				}
-			}
-		}
-	}
-
-	return cg
-}

+ 0 - 181
vendor/honnef.co/go/tools/callgraph/util.go

@@ -1,181 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package callgraph
-
-import "honnef.co/go/tools/ssa"
-
-// This file provides various utilities over call graphs, such as
-// visitation and path search.
-
-// CalleesOf returns a new set containing all direct callees of the
-// caller node.
-//
-func CalleesOf(caller *Node) map[*Node]bool {
-	callees := make(map[*Node]bool)
-	for _, e := range caller.Out {
-		callees[e.Callee] = true
-	}
-	return callees
-}
-
-// GraphVisitEdges visits all the edges in graph g in depth-first order.
-// The edge function is called for each edge in postorder.  If it
-// returns non-nil, visitation stops and GraphVisitEdges returns that
-// value.
-//
-func GraphVisitEdges(g *Graph, edge func(*Edge) error) error {
-	seen := make(map[*Node]bool)
-	var visit func(n *Node) error
-	visit = func(n *Node) error {
-		if !seen[n] {
-			seen[n] = true
-			for _, e := range n.Out {
-				if err := visit(e.Callee); err != nil {
-					return err
-				}
-				if err := edge(e); err != nil {
-					return err
-				}
-			}
-		}
-		return nil
-	}
-	for _, n := range g.Nodes {
-		if err := visit(n); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// PathSearch finds an arbitrary path starting at node start and
-// ending at some node for which isEnd() returns true.  On success,
-// PathSearch returns the path as an ordered list of edges; on
-// failure, it returns nil.
-//
-func PathSearch(start *Node, isEnd func(*Node) bool) []*Edge {
-	stack := make([]*Edge, 0, 32)
-	seen := make(map[*Node]bool)
-	var search func(n *Node) []*Edge
-	search = func(n *Node) []*Edge {
-		if !seen[n] {
-			seen[n] = true
-			if isEnd(n) {
-				return stack
-			}
-			for _, e := range n.Out {
-				stack = append(stack, e) // push
-				if found := search(e.Callee); found != nil {
-					return found
-				}
-				stack = stack[:len(stack)-1] // pop
-			}
-		}
-		return nil
-	}
-	return search(start)
-}
-
-// DeleteSyntheticNodes removes from call graph g all nodes for
-// synthetic functions (except g.Root and package initializers),
-// preserving the topology.  In effect, calls to synthetic wrappers
-// are "inlined".
-//
-func (g *Graph) DeleteSyntheticNodes() {
-	// Measurements on the standard library and go.tools show that
-	// resulting graph has ~15% fewer nodes and 4-8% fewer edges
-	// than the input.
-	//
-	// Inlining a wrapper of in-degree m, out-degree n adds m*n
-	// and removes m+n edges.  Since most wrappers are monomorphic
-	// (n=1) this results in a slight reduction.  Polymorphic
-	// wrappers (n>1), e.g. from embedding an interface value
-	// inside a struct to satisfy some interface, cause an
-	// increase in the graph, but they seem to be uncommon.
-
-	// Hash all existing edges to avoid creating duplicates.
-	edges := make(map[Edge]bool)
-	for _, cgn := range g.Nodes {
-		for _, e := range cgn.Out {
-			edges[*e] = true
-		}
-	}
-	for fn, cgn := range g.Nodes {
-		if cgn == g.Root || fn.Synthetic == "" || isInit(cgn.Func) {
-			continue // keep
-		}
-		for _, eIn := range cgn.In {
-			for _, eOut := range cgn.Out {
-				newEdge := Edge{eIn.Caller, eIn.Site, eOut.Callee}
-				if edges[newEdge] {
-					continue // don't add duplicate
-				}
-				AddEdge(eIn.Caller, eIn.Site, eOut.Callee)
-				edges[newEdge] = true
-			}
-		}
-		g.DeleteNode(cgn)
-	}
-}
-
-func isInit(fn *ssa.Function) bool {
-	return fn.Pkg != nil && fn.Pkg.Func("init") == fn
-}
-
-// DeleteNode removes node n and its edges from the graph g.
-// (NB: not efficient for batch deletion.)
-func (g *Graph) DeleteNode(n *Node) {
-	n.deleteIns()
-	n.deleteOuts()
-	delete(g.Nodes, n.Func)
-}
-
-// deleteIns deletes all incoming edges to n.
-func (n *Node) deleteIns() {
-	for _, e := range n.In {
-		removeOutEdge(e)
-	}
-	n.In = nil
-}
-
-// deleteOuts deletes all outgoing edges from n.
-func (n *Node) deleteOuts() {
-	for _, e := range n.Out {
-		removeInEdge(e)
-	}
-	n.Out = nil
-}
-
-// removeOutEdge removes edge.Caller's outgoing edge 'edge'.
-func removeOutEdge(edge *Edge) {
-	caller := edge.Caller
-	n := len(caller.Out)
-	for i, e := range caller.Out {
-		if e == edge {
-			// Replace it with the final element and shrink the slice.
-			caller.Out[i] = caller.Out[n-1]
-			caller.Out[n-1] = nil // aid GC
-			caller.Out = caller.Out[:n-1]
-			return
-		}
-	}
-	panic("edge not found: " + edge.String())
-}
-
-// removeInEdge removes edge.Callee's incoming edge 'edge'.
-func removeInEdge(edge *Edge) {
-	caller := edge.Callee
-	n := len(caller.In)
-	for i, e := range caller.In {
-		if e == edge {
-			// Replace it with the final element and shrink the slice.
-			caller.In[i] = caller.In[n-1]
-			caller.In[n-1] = nil // aid GC
-			caller.In = caller.In[:n-1]
-			return
-		}
-	}
-	panic("edge not found: " + edge.String())
-}

+ 0 - 15
vendor/honnef.co/go/tools/cmd/staticcheck/README.md

@@ -1,15 +0,0 @@
-# staticcheck
-
-_staticcheck_ offers extensive analysis of Go code, covering a myriad
-of categories. It will detect bugs, suggest code simplifications,
-point out dead code, and more.
-
-## Installation
-
-See [the main README](https://github.com/dominikh/go-tools#installation) for installation instructions.
-
-## Documentation
-
-Detailed documentation can be found on
-[staticcheck.io](https://staticcheck.io/docs/).
-

+ 0 - 27
vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go

@@ -1,27 +0,0 @@
-// staticcheck analyses Go code and makes it better.
-package main // import "honnef.co/go/tools/cmd/staticcheck"
-
-import (
-	"os"
-
-	"honnef.co/go/tools/lint"
-	"honnef.co/go/tools/lint/lintutil"
-	"honnef.co/go/tools/simple"
-	"honnef.co/go/tools/staticcheck"
-	"honnef.co/go/tools/stylecheck"
-	"honnef.co/go/tools/unused"
-)
-
-func main() {
-	fs := lintutil.FlagSet("staticcheck")
-	fs.Parse(os.Args[1:])
-
-	checkers := []lint.Checker{
-		simple.NewChecker(),
-		staticcheck.NewChecker(),
-		stylecheck.NewChecker(),
-		&unused.Checker{},
-	}
-
-	lintutil.ProcessFlagSet(checkers, fs)
-}

+ 0 - 162
vendor/honnef.co/go/tools/config/config.go

@@ -1,162 +0,0 @@
-package config
-
-import (
-	"os"
-	"path/filepath"
-
-	"github.com/BurntSushi/toml"
-)
-
-func mergeLists(a, b []string) []string {
-	out := make([]string, 0, len(a)+len(b))
-	for _, el := range b {
-		if el == "inherit" {
-			out = append(out, a...)
-		} else {
-			out = append(out, el)
-		}
-	}
-
-	return out
-}
-
-func normalizeList(list []string) []string {
-	if len(list) > 1 {
-		nlist := make([]string, 0, len(list))
-		nlist = append(nlist, list[0])
-		for i, el := range list[1:] {
-			if el != list[i] {
-				nlist = append(nlist, el)
-			}
-		}
-		list = nlist
-	}
-
-	for _, el := range list {
-		if el == "inherit" {
-			// This should never happen, because the default config
-			// should not use "inherit"
-			panic(`unresolved "inherit"`)
-		}
-	}
-
-	return list
-}
-
-func (cfg Config) Merge(ocfg Config) Config {
-	if ocfg.Checks != nil {
-		cfg.Checks = mergeLists(cfg.Checks, ocfg.Checks)
-	}
-	if ocfg.Initialisms != nil {
-		cfg.Initialisms = mergeLists(cfg.Initialisms, ocfg.Initialisms)
-	}
-	if ocfg.DotImportWhitelist != nil {
-		cfg.DotImportWhitelist = mergeLists(cfg.DotImportWhitelist, ocfg.DotImportWhitelist)
-	}
-	if ocfg.HTTPStatusCodeWhitelist != nil {
-		cfg.HTTPStatusCodeWhitelist = mergeLists(cfg.HTTPStatusCodeWhitelist, ocfg.HTTPStatusCodeWhitelist)
-	}
-	return cfg
-}
-
-type Config struct {
-	// TODO(dh): this implementation makes it impossible for external
-	// clients to add their own checkers with configuration. At the
-	// moment, we don't really care about that; we don't encourage
-	// that people use this package. In the future, we may. The
-	// obvious solution would be using map[string]interface{}, but
-	// that's obviously subpar.
-
-	Checks                  []string `toml:"checks"`
-	Initialisms             []string `toml:"initialisms"`
-	DotImportWhitelist      []string `toml:"dot_import_whitelist"`
-	HTTPStatusCodeWhitelist []string `toml:"http_status_code_whitelist"`
-}
-
-var defaultConfig = Config{
-	Checks: []string{"all", "-ST1000", "-ST1003", "-ST1016"},
-	Initialisms: []string{
-		"ACL", "API", "ASCII", "CPU", "CSS", "DNS",
-		"EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID",
-		"IP", "JSON", "QPS", "RAM", "RPC", "SLA",
-		"SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
-		"UDP", "UI", "GID", "UID", "UUID", "URI",
-		"URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
-		"XSS", "SIP", "RTP",
-	},
-	DotImportWhitelist:      []string{},
-	HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"},
-}
-
-const configName = "staticcheck.conf"
-
-func parseConfigs(dir string) ([]Config, error) {
-	var out []Config
-
-	// TODO(dh): consider stopping at the GOPATH/module boundary
-	for dir != "" {
-		f, err := os.Open(filepath.Join(dir, configName))
-		if os.IsNotExist(err) {
-			ndir := filepath.Dir(dir)
-			if ndir == dir {
-				break
-			}
-			dir = ndir
-			continue
-		}
-		if err != nil {
-			return nil, err
-		}
-		var cfg Config
-		_, err = toml.DecodeReader(f, &cfg)
-		f.Close()
-		if err != nil {
-			return nil, err
-		}
-		out = append(out, cfg)
-		ndir := filepath.Dir(dir)
-		if ndir == dir {
-			break
-		}
-		dir = ndir
-	}
-	out = append(out, defaultConfig)
-	if len(out) < 2 {
-		return out, nil
-	}
-	for i := 0; i < len(out)/2; i++ {
-		out[i], out[len(out)-1-i] = out[len(out)-1-i], out[i]
-	}
-	return out, nil
-}
-
-func mergeConfigs(confs []Config) Config {
-	if len(confs) == 0 {
-		// This shouldn't happen because we always have at least a
-		// default config.
-		panic("trying to merge zero configs")
-	}
-	if len(confs) == 1 {
-		return confs[0]
-	}
-	conf := confs[0]
-	for _, oconf := range confs[1:] {
-		conf = conf.Merge(oconf)
-	}
-	return conf
-}
-
-func Load(dir string) (Config, error) {
-	confs, err := parseConfigs(dir)
-	if err != nil {
-		return Config{}, err
-	}
-	conf := mergeConfigs(confs)
-
-	conf.Checks = normalizeList(conf.Checks)
-	conf.Initialisms = normalizeList(conf.Initialisms)
-	conf.DotImportWhitelist = normalizeList(conf.DotImportWhitelist)
-	conf.HTTPStatusCodeWhitelist = normalizeList(conf.HTTPStatusCodeWhitelist)
-
-	return conf, nil
-}

+ 0 - 10
vendor/honnef.co/go/tools/config/example.conf

@@ -1,10 +0,0 @@
-checks = ["all", "-ST1003", "-ST1014"]
-initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS",
-	"EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID",
-	"IP", "JSON", "QPS", "RAM", "RPC", "SLA",
-	"SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
-	"UDP", "UI", "GID", "UID", "UUID", "URI",
-	"URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
-	"XSS", "SIP", "RTP"]
-dot_import_whitelist = []
-http_status_code_whitelist = ["200", "400", "404", "500"]

+ 0 - 112
vendor/honnef.co/go/tools/deprecated/stdlib.go

@@ -1,112 +0,0 @@
-package deprecated
-
-type Deprecation struct {
-	DeprecatedSince           int
-	AlternativeAvailableSince int
-}
-
-var Stdlib = map[string]Deprecation{
-	"image/jpeg.Reader": {4, 0},
-	// FIXME(dh): AllowBinary isn't being detected as deprecated
-	// because the comment has a newline right after "Deprecated:"
-	"go/build.AllowBinary":                        {7, 7},
-	"(archive/zip.FileHeader).CompressedSize":     {1, 1},
-	"(archive/zip.FileHeader).UncompressedSize":   {1, 1},
-	"(archive/zip.FileHeader).ModifiedTime":       {10, 10},
-	"(archive/zip.FileHeader).ModifiedDate":       {10, 10},
-	"(*archive/zip.FileHeader).ModTime":           {10, 10},
-	"(*archive/zip.FileHeader).SetModTime":        {10, 10},
-	"(go/doc.Package).Bugs":                       {1, 1},
-	"os.SEEK_SET":                                 {7, 7},
-	"os.SEEK_CUR":                                 {7, 7},
-	"os.SEEK_END":                                 {7, 7},
-	"(net.Dialer).Cancel":                         {7, 7},
-	"runtime.CPUProfile":                          {9, 0},
-	"compress/flate.ReadError":                    {6, 6},
-	"compress/flate.WriteError":                   {6, 6},
-	"path/filepath.HasPrefix":                     {0, 0},
-	"(net/http.Transport).Dial":                   {7, 7},
-	"(*net/http.Transport).CancelRequest":         {6, 5},
-	"net/http.ErrWriteAfterFlush":                 {7, 0},
-	"net/http.ErrHeaderTooLong":                   {8, 0},
-	"net/http.ErrShortBody":                       {8, 0},
-	"net/http.ErrMissingContentLength":            {8, 0},
-	"net/http/httputil.ErrPersistEOF":             {0, 0},
-	"net/http/httputil.ErrClosed":                 {0, 0},
-	"net/http/httputil.ErrPipeline":               {0, 0},
-	"net/http/httputil.ServerConn":                {0, 0},
-	"net/http/httputil.NewServerConn":             {0, 0},
-	"net/http/httputil.ClientConn":                {0, 0},
-	"net/http/httputil.NewClientConn":             {0, 0},
-	"net/http/httputil.NewProxyClientConn":        {0, 0},
-	"(net/http.Request).Cancel":                   {7, 7},
-	"(text/template/parse.PipeNode).Line":         {1, 1},
-	"(text/template/parse.ActionNode).Line":       {1, 1},
-	"(text/template/parse.BranchNode).Line":       {1, 1},
-	"(text/template/parse.TemplateNode).Line":     {1, 1},
-	"database/sql/driver.ColumnConverter":         {9, 9},
-	"database/sql/driver.Execer":                  {8, 8},
-	"database/sql/driver.Queryer":                 {8, 8},
-	"(database/sql/driver.Conn).Begin":            {8, 8},
-	"(database/sql/driver.Stmt).Exec":             {8, 8},
-	"(database/sql/driver.Stmt).Query":            {8, 8},
-	"syscall.StringByteSlice":                     {1, 1},
-	"syscall.StringBytePtr":                       {1, 1},
-	"syscall.StringSlicePtr":                      {1, 1},
-	"syscall.StringToUTF16":                       {1, 1},
-	"syscall.StringToUTF16Ptr":                    {1, 1},
-	"(*regexp.Regexp).Copy":                       {12, 12},
-	"(archive/tar.Header).Xattrs":                 {10, 10},
-	"archive/tar.TypeRegA":                        {11, 1},
-	"go/types.NewInterface":                       {11, 11},
-	"(*go/types.Interface).Embedded":              {11, 11},
-	"go/importer.For":                             {12, 12},
-	"encoding/json.InvalidUTF8Error":              {2, 2},
-	"encoding/json.UnmarshalFieldError":           {2, 2},
-	"encoding/csv.ErrTrailingComma":               {2, 2},
-	"(encoding/csv.Reader).TrailingComma":         {2, 2},
-	"(net.Dialer).DualStack":                      {12, 12},
-	"net/http.ErrUnexpectedTrailer":               {12, 12},
-	"net/http.CloseNotifier":                      {11, 7},
-	"net/http.ProtocolError":                      {8, 8},
-	"(crypto/x509.CertificateRequest).Attributes": {5, 3},
-	// This function has no alternative, but also no purpose.
-	"(*crypto/rc4.Cipher).Reset":                     {12, 0},
-	"(net/http/httptest.ResponseRecorder).HeaderMap": {11, 7},
-
-	// All of these have been deprecated in favour of external libraries
-	"syscall.AttachLsf":             {7, 0},
-	"syscall.DetachLsf":             {7, 0},
-	"syscall.LsfSocket":             {7, 0},
-	"syscall.SetLsfPromisc":         {7, 0},
-	"syscall.LsfJump":               {7, 0},
-	"syscall.LsfStmt":               {7, 0},
-	"syscall.BpfStmt":               {7, 0},
-	"syscall.BpfJump":               {7, 0},
-	"syscall.BpfBuflen":             {7, 0},
-	"syscall.SetBpfBuflen":          {7, 0},
-	"syscall.BpfDatalink":           {7, 0},
-	"syscall.SetBpfDatalink":        {7, 0},
-	"syscall.SetBpfPromisc":         {7, 0},
-	"syscall.FlushBpf":              {7, 0},
-	"syscall.BpfInterface":          {7, 0},
-	"syscall.SetBpfInterface":       {7, 0},
-	"syscall.BpfTimeout":            {7, 0},
-	"syscall.SetBpfTimeout":         {7, 0},
-	"syscall.BpfStats":              {7, 0},
-	"syscall.SetBpfImmediate":       {7, 0},
-	"syscall.SetBpf":                {7, 0},
-	"syscall.CheckBpfVersion":       {7, 0},
-	"syscall.BpfHeadercmpl":         {7, 0},
-	"syscall.SetBpfHeadercmpl":      {7, 0},
-	"syscall.RouteRIB":              {8, 0},
-	"syscall.RoutingMessage":        {8, 0},
-	"syscall.RouteMessage":          {8, 0},
-	"syscall.InterfaceMessage":      {8, 0},
-	"syscall.InterfaceAddrMessage":  {8, 0},
-	"syscall.ParseRoutingMessage":   {8, 0},
-	"syscall.ParseRoutingSockaddr":  {8, 0},
-	"InterfaceAnnounceMessage":      {7, 0},
-	"InterfaceMulticastAddrMessage": {7, 0},
-	"syscall.FormatMessage":         {5, 0},
-}

+ 0 - 56
vendor/honnef.co/go/tools/functions/concrete.go

@@ -1,56 +0,0 @@
-package functions
-
-import (
-	"go/token"
-	"go/types"
-
-	"honnef.co/go/tools/ssa"
-)
-
-func concreteReturnTypes(fn *ssa.Function) []*types.Tuple {
-	res := fn.Signature.Results()
-	if res == nil {
-		return nil
-	}
-	ifaces := make([]bool, res.Len())
-	any := false
-	for i := 0; i < res.Len(); i++ {
-		_, ifaces[i] = res.At(i).Type().Underlying().(*types.Interface)
-		any = any || ifaces[i]
-	}
-	if !any {
-		return []*types.Tuple{res}
-	}
-	var out []*types.Tuple
-	for _, block := range fn.Blocks {
-		if len(block.Instrs) == 0 {
-			continue
-		}
-		ret, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return)
-		if !ok {
-			continue
-		}
-		vars := make([]*types.Var, res.Len())
-		for i, v := range ret.Results {
-			var typ types.Type
-			if !ifaces[i] {
-				typ = res.At(i).Type()
-			} else if mi, ok := v.(*ssa.MakeInterface); ok {
-				// TODO(dh): if mi.X is a function call that returns
-				// an interface, call concreteReturnTypes on that
-				// function (or, really, go through Descriptions,
-				// avoid infinite recursion etc, just like nil error
-				// detection)
-
-				// TODO(dh): support Phi nodes
-				typ = mi.X.Type()
-			} else {
-				typ = res.At(i).Type()
-			}
-			vars[i] = types.NewParam(token.NoPos, nil, "", typ)
-		}
-		out = append(out, types.NewTuple(vars...))
-	}
-	// TODO(dh): deduplicate out
-	return out
-}

+ 0 - 150
vendor/honnef.co/go/tools/functions/functions.go

@@ -1,150 +0,0 @@
-package functions
-
-import (
-	"go/types"
-	"sync"
-
-	"honnef.co/go/tools/callgraph"
-	"honnef.co/go/tools/callgraph/static"
-	"honnef.co/go/tools/ssa"
-	"honnef.co/go/tools/staticcheck/vrp"
-)
-
-var stdlibDescs = map[string]Description{
-	"errors.New": {Pure: true},
-
-	"fmt.Errorf":  {Pure: true},
-	"fmt.Sprintf": {Pure: true},
-	"fmt.Sprint":  {Pure: true},
-
-	"sort.Reverse": {Pure: true},
-
-	"strings.Map":            {Pure: true},
-	"strings.Repeat":         {Pure: true},
-	"strings.Replace":        {Pure: true},
-	"strings.Title":          {Pure: true},
-	"strings.ToLower":        {Pure: true},
-	"strings.ToLowerSpecial": {Pure: true},
-	"strings.ToTitle":        {Pure: true},
-	"strings.ToTitleSpecial": {Pure: true},
-	"strings.ToUpper":        {Pure: true},
-	"strings.ToUpperSpecial": {Pure: true},
-	"strings.Trim":           {Pure: true},
-	"strings.TrimFunc":       {Pure: true},
-	"strings.TrimLeft":       {Pure: true},
-	"strings.TrimLeftFunc":   {Pure: true},
-	"strings.TrimPrefix":     {Pure: true},
-	"strings.TrimRight":      {Pure: true},
-	"strings.TrimRightFunc":  {Pure: true},
-	"strings.TrimSpace":      {Pure: true},
-	"strings.TrimSuffix":     {Pure: true},
-
-	"(*net/http.Request).WithContext": {Pure: true},
-
-	"math/rand.Read":         {NilError: true},
-	"(*math/rand.Rand).Read": {NilError: true},
-}
-
-type Description struct {
-	// The function is known to be pure
-	Pure bool
-	// The function is known to be a stub
-	Stub bool
-	// The function is known to never return (panics notwithstanding)
-	Infinite bool
-	// Variable ranges
-	Ranges vrp.Ranges
-	Loops  []Loop
-	// Function returns an error as its last argument, but it is
-	// always nil
-	NilError            bool
-	ConcreteReturnTypes []*types.Tuple
-}
-
-type descriptionEntry struct {
-	ready  chan struct{}
-	result Description
-}
-
-type Descriptions struct {
-	CallGraph *callgraph.Graph
-	mu        sync.Mutex
-	cache     map[*ssa.Function]*descriptionEntry
-}
-
-func NewDescriptions(prog *ssa.Program) *Descriptions {
-	return &Descriptions{
-		CallGraph: static.CallGraph(prog),
-		cache:     map[*ssa.Function]*descriptionEntry{},
-	}
-}
-
-func (d *Descriptions) Get(fn *ssa.Function) Description {
-	d.mu.Lock()
-	fd := d.cache[fn]
-	if fd == nil {
-		fd = &descriptionEntry{
-			ready: make(chan struct{}),
-		}
-		d.cache[fn] = fd
-		d.mu.Unlock()
-
-		{
-			fd.result = stdlibDescs[fn.RelString(nil)]
-			fd.result.Pure = fd.result.Pure || d.IsPure(fn)
-			fd.result.Stub = fd.result.Stub || d.IsStub(fn)
-			fd.result.Infinite = fd.result.Infinite || !terminates(fn)
-			fd.result.Ranges = vrp.BuildGraph(fn).Solve()
-			fd.result.Loops = findLoops(fn)
-			fd.result.NilError = fd.result.NilError || IsNilError(fn)
-			fd.result.ConcreteReturnTypes = concreteReturnTypes(fn)
-		}
-
-		close(fd.ready)
-	} else {
-		d.mu.Unlock()
-		<-fd.ready
-	}
-	return fd.result
-}
-
-func IsNilError(fn *ssa.Function) bool {
-	// TODO(dh): This is very simplistic, as we only look for constant
-	// nil returns. A more advanced approach would work transitively.
-	// An even more advanced approach would be context-aware and
-	// determine nil errors based on inputs (e.g. io.WriteString to a
-	// bytes.Buffer will always return nil, but an io.WriteString to
-	// an os.File might not). Similarly, an os.File opened for reading
-	// won't error on Close, but other files will.
-	res := fn.Signature.Results()
-	if res.Len() == 0 {
-		return false
-	}
-	last := res.At(res.Len() - 1)
-	if types.TypeString(last.Type(), nil) != "error" {
-		return false
-	}
-
-	if fn.Blocks == nil {
-		return false
-	}
-	for _, block := range fn.Blocks {
-		if len(block.Instrs) == 0 {
-			continue
-		}
-		ins := block.Instrs[len(block.Instrs)-1]
-		ret, ok := ins.(*ssa.Return)
-		if !ok {
-			continue
-		}
-		v := ret.Results[len(ret.Results)-1]
-		c, ok := v.(*ssa.Const)
-		if !ok {
-			return false
-		}
-		if !c.IsNil() {
-			return false
-		}
-	}
-	return true
-}

+ 0 - 50
vendor/honnef.co/go/tools/functions/loops.go

@@ -1,50 +0,0 @@
-package functions
-
-import "honnef.co/go/tools/ssa"
-
-type Loop map[*ssa.BasicBlock]bool
-
-func findLoops(fn *ssa.Function) []Loop {
-	if fn.Blocks == nil {
-		return nil
-	}
-	tree := fn.DomPreorder()
-	var sets []Loop
-	for _, h := range tree {
-		for _, n := range h.Preds {
-			if !h.Dominates(n) {
-				continue
-			}
-			// n is a back-edge to h
-			// h is the loop header
-			if n == h {
-				sets = append(sets, Loop{n: true})
-				continue
-			}
-			set := Loop{h: true, n: true}
-			for _, b := range allPredsBut(n, h, nil) {
-				set[b] = true
-			}
-			sets = append(sets, set)
-		}
-	}
-	return sets
-}
-
-func allPredsBut(b, but *ssa.BasicBlock, list []*ssa.BasicBlock) []*ssa.BasicBlock {
-outer:
-	for _, pred := range b.Preds {
-		if pred == but {
-			continue
-		}
-		for _, p := range list {
-			// TODO improve big-o complexity of this function
-			if pred == p {
-				continue outer
-			}
-		}
-		list = append(list, pred)
-		list = allPredsBut(pred, but, list)
-	}
-	return list
-}

+ 0 - 123
vendor/honnef.co/go/tools/functions/pure.go

@@ -1,123 +0,0 @@
-package functions
-
-import (
-	"go/token"
-	"go/types"
-
-	"honnef.co/go/tools/callgraph"
-	"honnef.co/go/tools/lint/lintdsl"
-	"honnef.co/go/tools/ssa"
-)
-
-// IsStub reports whether a function is a stub. A function is
-// considered a stub if it has no instructions or exactly one
-// instruction, which must be either returning only constant values or
-// a panic.
-func (d *Descriptions) IsStub(fn *ssa.Function) bool {
-	if len(fn.Blocks) == 0 {
-		return true
-	}
-	if len(fn.Blocks) > 1 {
-		return false
-	}
-	instrs := lintdsl.FilterDebug(fn.Blocks[0].Instrs)
-	if len(instrs) != 1 {
-		return false
-	}
-
-	switch instrs[0].(type) {
-	case *ssa.Return:
-		// Since this is the only instruction, the return value must
-		// be a constant. We consider all constants as stubs, not just
-		// the zero value. This does not, unfortunately, cover zero
-		// initialised structs, as these cause additional
-		// instructions.
-		return true
-	case *ssa.Panic:
-		return true
-	default:
-		return false
-	}
-}
-
-func (d *Descriptions) IsPure(fn *ssa.Function) bool {
-	if fn.Signature.Results().Len() == 0 {
-		// A function with no return values is empty or is doing some
-		// work we cannot see (for example because of build tags);
-		// don't consider it pure.
-		return false
-	}
-
-	for _, param := range fn.Params {
-		if _, ok := param.Type().Underlying().(*types.Basic); !ok {
-			return false
-		}
-	}
-
-	if fn.Blocks == nil {
-		return false
-	}
-	checkCall := func(common *ssa.CallCommon) bool {
-		if common.IsInvoke() {
-			return false
-		}
-		builtin, ok := common.Value.(*ssa.Builtin)
-		if !ok {
-			if common.StaticCallee() != fn {
-				if common.StaticCallee() == nil {
-					return false
-				}
-				// TODO(dh): ideally, IsPure wouldn't be responsible
-				// for avoiding infinite recursion, but
-				// FunctionDescriptions would be.
-				node := d.CallGraph.CreateNode(common.StaticCallee())
-				if callgraph.PathSearch(node, func(other *callgraph.Node) bool {
-					return other.Func == fn
-				}) != nil {
-					return false
-				}
-				if !d.Get(common.StaticCallee()).Pure {
-					return false
-				}
-			}
-		} else {
-			switch builtin.Name() {
-			case "len", "cap", "make", "new":
-			default:
-				return false
-			}
-		}
-		return true
-	}
-	for _, b := range fn.Blocks {
-		for _, ins := range b.Instrs {
-			switch ins := ins.(type) {
-			case *ssa.Call:
-				if !checkCall(ins.Common()) {
-					return false
-				}
-			case *ssa.Defer:
-				if !checkCall(&ins.Call) {
-					return false
-				}
-			case *ssa.Select:
-				return false
-			case *ssa.Send:
-				return false
-			case *ssa.Go:
-				return false
-			case *ssa.Panic:
-				return false
-			case *ssa.Store:
-				return false
-			case *ssa.FieldAddr:
-				return false
-			case *ssa.UnOp:
-				if ins.Op == token.MUL || ins.Op == token.AND {
-					return false
-				}
-			}
-		}
-	}
-	return true
-}

+ 0 - 24
vendor/honnef.co/go/tools/functions/terminates.go

@@ -1,24 +0,0 @@
-package functions
-
-import "honnef.co/go/tools/ssa"
-
-// terminates reports whether fn is supposed to return, that is if it
-// has at least one theoretic path that returns from the function.
-// Explicit panics do not count as terminating.
-func terminates(fn *ssa.Function) bool {
-	if fn.Blocks == nil {
-		// assuming that a function terminates is the conservative
-		// choice
-		return true
-	}
-
-	for _, block := range fn.Blocks {
-		if len(block.Instrs) == 0 {
-			continue
-		}
-		if _, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return); ok {
-			return true
-		}
-	}
-	return false
-}

+ 0 - 46
vendor/honnef.co/go/tools/go/types/typeutil/callee.go

@@ -1,46 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeutil
-
-import (
-	"go/ast"
-	"go/types"
-
-	"golang.org/x/tools/go/ast/astutil"
-)
-
-// Callee returns the named target of a function call, if any:
-// a function, method, builtin, or variable.
-func Callee(info *types.Info, call *ast.CallExpr) types.Object {
-	var obj types.Object
-	switch fun := astutil.Unparen(call.Fun).(type) {
-	case *ast.Ident:
-		obj = info.Uses[fun] // type, var, builtin, or declared func
-	case *ast.SelectorExpr:
-		if sel, ok := info.Selections[fun]; ok {
-			obj = sel.Obj() // method or field
-		} else {
-			obj = info.Uses[fun.Sel] // qualified identifier?
-		}
-	}
-	if _, ok := obj.(*types.TypeName); ok {
-		return nil // T(x) is a conversion, not a call
-	}
-	return obj
-}
-
-// StaticCallee returns the target (function or method) of a static
-// function call, if any. It returns nil for calls to builtins.
-func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
-	if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
-		return f
-	}
-	return nil
-}
-
-func interfaceMethod(f *types.Func) bool {
-	recv := f.Type().(*types.Signature).Recv()
-	return recv != nil && types.IsInterface(recv.Type())
-}

+ 0 - 29
vendor/honnef.co/go/tools/go/types/typeutil/identical.go

@@ -1,29 +0,0 @@
-package typeutil
-
-import (
-	"go/types"
-)
-
-// Identical reports whether x and y are identical types.
-// Unlike types.Identical, receivers of Signature types are not ignored.
-func Identical(x, y types.Type) (ret bool) {
-	if !types.Identical(x, y) {
-		return false
-	}
-	sigX, ok := x.(*types.Signature)
-	if !ok {
-		return true
-	}
-	sigY, ok := y.(*types.Signature)
-	if !ok {
-		// should be impossible
-		return true
-	}
-	if sigX.Recv() == sigY.Recv() {
-		return true
-	}
-	if sigX.Recv() == nil || sigY.Recv() == nil {
-		return false
-	}
-	return Identical(sigX.Recv().Type(), sigY.Recv().Type())
-}

+ 0 - 31
vendor/honnef.co/go/tools/go/types/typeutil/imports.go

@@ -1,31 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeutil
-
-import "go/types"
-
-// Dependencies returns all dependencies of the specified packages.
-//
-// Dependent packages appear in topological order: if package P imports
-// package Q, Q appears earlier than P in the result.
-// The algorithm follows import statements in the order they
-// appear in the source code, so the result is a total order.
-//
-func Dependencies(pkgs ...*types.Package) []*types.Package {
-	var result []*types.Package
-	seen := make(map[*types.Package]bool)
-	var visit func(pkgs []*types.Package)
-	visit = func(pkgs []*types.Package) {
-		for _, p := range pkgs {
-			if !seen[p] {
-				seen[p] = true
-				visit(p.Imports())
-				result = append(result, p)
-			}
-		}
-	}
-	visit(pkgs)
-	return result
-}

+ 0 - 315
vendor/honnef.co/go/tools/go/types/typeutil/map.go

@@ -1,315 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package typeutil defines various utilities for types, such as Map,
-// a mapping from types.Type to interface{} values.
-package typeutil
-
-import (
-	"bytes"
-	"fmt"
-	"go/types"
-	"reflect"
-)
-
-// Map is a hash-table-based mapping from types (types.Type) to
-// arbitrary interface{} values.  The concrete types that implement
-// the Type interface are pointers.  Since they are not canonicalized,
-// == cannot be used to check for equivalence, and thus we cannot
-// simply use a Go map.
-//
-// Just as with map[K]V, a nil *Map is a valid empty map.
-//
-// Not thread-safe.
-//
-// This fork handles Signatures correctly, respecting method receivers.
-//
-type Map struct {
-	hasher Hasher             // shared by many Maps
-	table  map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
-	length int                // number of map entries
-}
-
-// entry is an entry (key/value association) in a hash bucket.
-type entry struct {
-	key   types.Type
-	value interface{}
-}
-
-// SetHasher sets the hasher used by Map.
-//
-// All Hashers are functionally equivalent but contain internal state
-// used to cache the results of hashing previously seen types.
-//
-// A single Hasher created by MakeHasher() may be shared among many
-// Maps.  This is recommended if the instances have many keys in
-// common, as it will amortize the cost of hash computation.
-//
-// A Hasher may grow without bound as new types are seen.  Even when a
-// type is deleted from the map, the Hasher never shrinks, since other
-// types in the map may reference the deleted type indirectly.
-//
-// Hashers are not thread-safe, and read-only operations such as
-// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
-// read-lock) is require around all Map operations if a shared
-// hasher is accessed from multiple threads.
-//
-// If SetHasher is not called, the Map will create a private hasher at
-// the first call to Insert.
-//
-func (m *Map) SetHasher(hasher Hasher) {
-	m.hasher = hasher
-}
-
-// Delete removes the entry with the given key, if any.
-// It returns true if the entry was found.
-//
-func (m *Map) Delete(key types.Type) bool {
-	if m != nil && m.table != nil {
-		hash := m.hasher.Hash(key)
-		bucket := m.table[hash]
-		for i, e := range bucket {
-			if e.key != nil && Identical(key, e.key) {
-				// We can't compact the bucket as it
-				// would disturb iterators.
-				bucket[i] = entry{}
-				m.length--
-				return true
-			}
-		}
-	}
-	return false
-}
-
-// At returns the map entry for the given key.
-// The result is nil if the entry is not present.
-//
-func (m *Map) At(key types.Type) interface{} {
-	if m != nil && m.table != nil {
-		for _, e := range m.table[m.hasher.Hash(key)] {
-			if e.key != nil && Identical(key, e.key) {
-				return e.value
-			}
-		}
-	}
-	return nil
-}
-
-// Set sets the map entry for key to val,
-// and returns the previous entry, if any.
-func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
-	if m.table != nil {
-		hash := m.hasher.Hash(key)
-		bucket := m.table[hash]
-		var hole *entry
-		for i, e := range bucket {
-			if e.key == nil {
-				hole = &bucket[i]
-			} else if Identical(key, e.key) {
-				prev = e.value
-				bucket[i].value = value
-				return
-			}
-		}
-
-		if hole != nil {
-			*hole = entry{key, value} // overwrite deleted entry
-		} else {
-			m.table[hash] = append(bucket, entry{key, value})
-		}
-	} else {
-		if m.hasher.memo == nil {
-			m.hasher = MakeHasher()
-		}
-		hash := m.hasher.Hash(key)
-		m.table = map[uint32][]entry{hash: {entry{key, value}}}
-	}
-
-	m.length++
-	return
-}
-
-// Len returns the number of map entries.
-func (m *Map) Len() int {
-	if m != nil {
-		return m.length
-	}
-	return 0
-}
-
-// Iterate calls function f on each entry in the map in unspecified order.
-//
-// If f should mutate the map, Iterate provides the same guarantees as
-// Go maps: if f deletes a map entry that Iterate has not yet reached,
-// f will not be invoked for it, but if f inserts a map entry that
-// Iterate has not yet reached, whether or not f will be invoked for
-// it is unspecified.
-//
-func (m *Map) Iterate(f func(key types.Type, value interface{})) {
-	if m != nil {
-		for _, bucket := range m.table {
-			for _, e := range bucket {
-				if e.key != nil {
-					f(e.key, e.value)
-				}
-			}
-		}
-	}
-}
-
-// Keys returns a new slice containing the set of map keys.
-// The order is unspecified.
-func (m *Map) Keys() []types.Type {
-	keys := make([]types.Type, 0, m.Len())
-	m.Iterate(func(key types.Type, _ interface{}) {
-		keys = append(keys, key)
-	})
-	return keys
-}
-
-func (m *Map) toString(values bool) string {
-	if m == nil {
-		return "{}"
-	}
-	var buf bytes.Buffer
-	fmt.Fprint(&buf, "{")
-	sep := ""
-	m.Iterate(func(key types.Type, value interface{}) {
-		fmt.Fprint(&buf, sep)
-		sep = ", "
-		fmt.Fprint(&buf, key)
-		if values {
-			fmt.Fprintf(&buf, ": %q", value)
-		}
-	})
-	fmt.Fprint(&buf, "}")
-	return buf.String()
-}
-
-// String returns a string representation of the map's entries.
-// Values are printed using fmt.Sprintf("%v", v).
-// Order is unspecified.
-//
-func (m *Map) String() string {
-	return m.toString(true)
-}
-
-// KeysString returns a string representation of the map's key set.
-// Order is unspecified.
-//
-func (m *Map) KeysString() string {
-	return m.toString(false)
-}
-
-////////////////////////////////////////////////////////////////////////
-// Hasher
-
-// A Hasher maps each type to its hash value.
-// For efficiency, a hasher uses memoization; thus its memory
-// footprint grows monotonically over time.
-// Hashers are not thread-safe.
-// Hashers have reference semantics.
-// Call MakeHasher to create a Hasher.
-type Hasher struct {
-	memo map[types.Type]uint32
-}
-
-// MakeHasher returns a new Hasher instance.
-func MakeHasher() Hasher {
-	return Hasher{make(map[types.Type]uint32)}
-}
-
-// Hash computes a hash value for the given type t such that
-// Identical(t, t') => Hash(t) == Hash(t').
-func (h Hasher) Hash(t types.Type) uint32 {
-	hash, ok := h.memo[t]
-	if !ok {
-		hash = h.hashFor(t)
-		h.memo[t] = hash
-	}
-	return hash
-}
-
-// hashString computes the Fowler–Noll–Vo hash of s.
-func hashString(s string) uint32 {
-	var h uint32
-	for i := 0; i < len(s); i++ {
-		h ^= uint32(s[i])
-		h *= 16777619
-	}
-	return h
-}
-
-// hashFor computes the hash of t.
-func (h Hasher) hashFor(t types.Type) uint32 {
-	// See Identical for rationale.
-	switch t := t.(type) {
-	case *types.Basic:
-		return uint32(t.Kind())
-
-	case *types.Array:
-		return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
-
-	case *types.Slice:
-		return 9049 + 2*h.Hash(t.Elem())
-
-	case *types.Struct:
-		var hash uint32 = 9059
-		for i, n := 0, t.NumFields(); i < n; i++ {
-			f := t.Field(i)
-			if f.Anonymous() {
-				hash += 8861
-			}
-			hash += hashString(t.Tag(i))
-			hash += hashString(f.Name()) // (ignore f.Pkg)
-			hash += h.Hash(f.Type())
-		}
-		return hash
-
-	case *types.Pointer:
-		return 9067 + 2*h.Hash(t.Elem())
-
-	case *types.Signature:
-		var hash uint32 = 9091
-		if t.Variadic() {
-			hash *= 8863
-		}
-		return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
-
-	case *types.Interface:
-		var hash uint32 = 9103
-		for i, n := 0, t.NumMethods(); i < n; i++ {
-			// See go/types.identicalMethods for rationale.
-			// Method order is not significant.
-			// Ignore m.Pkg().
-			m := t.Method(i)
-			hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
-		}
-		return hash
-
-	case *types.Map:
-		return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
-
-	case *types.Chan:
-		return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
-
-	case *types.Named:
-		// Not safe with a copying GC; objects may move.
-		return uint32(reflect.ValueOf(t.Obj()).Pointer())
-
-	case *types.Tuple:
-		return h.hashTuple(t)
-	}
-	panic(t)
-}
-
-func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
-	// See go/types.identicalTypes for rationale.
-	n := tuple.Len()
-	var hash uint32 = 9137 + 2*uint32(n)
-	for i := 0; i < n; i++ {
-		hash += 3 * h.Hash(tuple.At(i).Type())
-	}
-	return hash
-}

+ 0 - 72
vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go

@@ -1,72 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements a cache of method sets.
-
-package typeutil
-
-import (
-	"go/types"
-	"sync"
-)
-
-// A MethodSetCache records the method set of each type T for which
-// MethodSet(T) is called so that repeat queries are fast.
-// The zero value is a ready-to-use cache instance.
-type MethodSetCache struct {
-	mu     sync.Mutex
-	named  map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
-	others map[types.Type]*types.MethodSet                            // all other types
-}
-
-// MethodSet returns the method set of type T.  It is thread-safe.
-//
-// If cache is nil, this function is equivalent to types.NewMethodSet(T).
-// Utility functions can thus expose an optional *MethodSetCache
-// parameter to clients that care about performance.
-//
-func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
-	if cache == nil {
-		return types.NewMethodSet(T)
-	}
-	cache.mu.Lock()
-	defer cache.mu.Unlock()
-
-	switch T := T.(type) {
-	case *types.Named:
-		return cache.lookupNamed(T).value
-
-	case *types.Pointer:
-		if N, ok := T.Elem().(*types.Named); ok {
-			return cache.lookupNamed(N).pointer
-		}
-	}
-
-	// all other types
-	// (The map uses pointer equivalence, not type identity.)
-	mset := cache.others[T]
-	if mset == nil {
-		mset = types.NewMethodSet(T)
-		if cache.others == nil {
-			cache.others = make(map[types.Type]*types.MethodSet)
-		}
-		cache.others[T] = mset
-	}
-	return mset
-}
-
-func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
-	if cache.named == nil {
-		cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
-	}
-	// Avoid recomputing mset(*T) for each distinct Pointer
-	// instance whose underlying type is a named type.
-	msets, ok := cache.named[named]
-	if !ok {
-		msets.value = types.NewMethodSet(named)
-		msets.pointer = types.NewMethodSet(types.NewPointer(named))
-		cache.named[named] = msets
-	}
-	return msets
-}

+ 0 - 52
vendor/honnef.co/go/tools/go/types/typeutil/ui.go

@@ -1,52 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeutil
-
-// This file defines utilities for user interfaces that display types.
-
-import "go/types"
-
-// IntuitiveMethodSet returns the intuitive method set of a type T,
-// which is the set of methods you can call on an addressable value of
-// that type.
-//
-// The result always contains MethodSet(T), and is exactly MethodSet(T)
-// for interface types and for pointer-to-concrete types.
-// For all other concrete types T, the result additionally
-// contains each method belonging to *T if there is no identically
-// named method on T itself.
-//
-// This corresponds to user intuition about method sets;
-// this function is intended only for user interfaces.
-//
-// The order of the result is as for types.MethodSet(T).
-//
-func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
-	isPointerToConcrete := func(T types.Type) bool {
-		ptr, ok := T.(*types.Pointer)
-		return ok && !types.IsInterface(ptr.Elem())
-	}
-
-	var result []*types.Selection
-	mset := msets.MethodSet(T)
-	if types.IsInterface(T) || isPointerToConcrete(T) {
-		for i, n := 0, mset.Len(); i < n; i++ {
-			result = append(result, mset.At(i))
-		}
-	} else {
-		// T is some other concrete type.
-		// Report methods of T and *T, preferring those of T.
-		pmset := msets.MethodSet(types.NewPointer(T))
-		for i, n := 0, pmset.Len(); i < n; i++ {
-			meth := pmset.At(i)
-			if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
-				meth = m
-			}
-			result = append(result, meth)
-		}
-
-	}
-	return result
-}

+ 0 - 68
vendor/honnef.co/go/tools/internal/sharedcheck/lint.go

@@ -1,68 +0,0 @@
-package sharedcheck
-
-import (
-	"go/ast"
-	"go/types"
-
-	"honnef.co/go/tools/lint"
-	. "honnef.co/go/tools/lint/lintdsl"
-	"honnef.co/go/tools/ssa"
-)
-
-func CheckRangeStringRunes(j *lint.Job) {
-	for _, ssafn := range j.Pkg.InitialFunctions {
-		fn := func(node ast.Node) bool {
-			rng, ok := node.(*ast.RangeStmt)
-			if !ok || !IsBlank(rng.Key) {
-				return true
-			}
-
-			v, _ := ssafn.ValueForExpr(rng.X)
-
-			// Check that we're converting from string to []rune
-			val, _ := v.(*ssa.Convert)
-			if val == nil {
-				return true
-			}
-			Tsrc, ok := val.X.Type().(*types.Basic)
-			if !ok || Tsrc.Kind() != types.String {
-				return true
-			}
-			Tdst, ok := val.Type().(*types.Slice)
-			if !ok {
-				return true
-			}
-			TdstElem, ok := Tdst.Elem().(*types.Basic)
-			if !ok || TdstElem.Kind() != types.Int32 {
-				return true
-			}
-
-			// Check that the result of the conversion is only used to
-			// range over
-			refs := val.Referrers()
-			if refs == nil {
-				return true
-			}
-
-			// Expect two refs: one for obtaining the length of the slice,
-			// one for accessing the elements
-			if len(FilterDebug(*refs)) != 2 {
-				// TODO(dh): right now, we check that only one place
-				// refers to our slice. This will miss cases such as
-				// ranging over the slice twice. Ideally, we'd ensure that
-				// the slice is only used for ranging over (without
-				// accessing the key), but that is harder to do because in
-				// SSA form, ranging over a slice looks like an ordinary
-				// loop with index increments and slice accesses. We'd
-				// have to look at the associated AST node to check that
-				// it's a range statement.
-				return true
-			}
-
-			j.Errorf(rng, "should range over string, not []rune(string)")
-
-			return true
-		}
-		Inspect(ssafn.Syntax(), fn)
-	}
-}

+ 0 - 28
vendor/honnef.co/go/tools/lint/LICENSE

@@ -1,28 +0,0 @@
-Copyright (c) 2013 The Go Authors. All rights reserved.
-Copyright (c) 2016 Dominik Honnef. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 0 - 44
vendor/honnef.co/go/tools/lint/generated.go

@@ -1,44 +0,0 @@
-package lint
-
-import (
-	"bufio"
-	"bytes"
-	"io"
-	"os"
-)
-
-var (
-	// used by cgo before Go 1.11
-	oldCgo = []byte("// Created by cgo - DO NOT EDIT")
-	prefix = []byte("// Code generated ")
-	suffix = []byte(" DO NOT EDIT.")
-	nl     = []byte("\n")
-	crnl   = []byte("\r\n")
-)
-
-func isGenerated(path string) bool {
-	f, err := os.Open(path)
-	if err != nil {
-		return false
-	}
-	defer f.Close()
-	br := bufio.NewReader(f)
-	for {
-		s, err := br.ReadBytes('\n')
-		if err != nil && err != io.EOF {
-			return false
-		}
-		s = bytes.TrimSuffix(s, crnl)
-		s = bytes.TrimSuffix(s, nl)
-		if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) {
-			return true
-		}
-		if bytes.Equal(s, oldCgo) {
-			return true
-		}
-		if err == io.EOF {
-			break
-		}
-	}
-	return false
-}

+ 0 - 679
vendor/honnef.co/go/tools/lint/lint.go

@@ -1,679 +0,0 @@
-// Package lint provides the foundation for tools like staticcheck
-package lint // import "honnef.co/go/tools/lint"
-
-import (
-	"bytes"
-	"fmt"
-	"go/ast"
-	"go/token"
-	"go/types"
-	"io"
-	"os"
-	"path/filepath"
-	"runtime"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-	"unicode"
-
-	"golang.org/x/tools/go/ast/inspector"
-	"golang.org/x/tools/go/packages"
-	"honnef.co/go/tools/config"
-	"honnef.co/go/tools/ssa"
-	"honnef.co/go/tools/ssa/ssautil"
-)
-
-type Job struct {
-	Pkg       *Pkg
-	GoVersion int
-
-	check    Check
-	problems []Problem
-
-	duration time.Duration
-}
-
-type Ignore interface {
-	Match(p Problem) bool
-}
-
-type LineIgnore struct {
-	File    string
-	Line    int
-	Checks  []string
-	matched bool
-	pos     token.Pos
-}
-
-func (li *LineIgnore) Match(p Problem) bool {
-	if p.Position.Filename != li.File || p.Position.Line != li.Line {
-		return false
-	}
-	for _, c := range li.Checks {
-		if m, _ := filepath.Match(c, p.Check); m {
-			li.matched = true
-			return true
-		}
-	}
-	return false
-}
-
-func (li *LineIgnore) String() string {
-	matched := "not matched"
-	if li.matched {
-		matched = "matched"
-	}
-	return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched)
-}
-
-type FileIgnore struct {
-	File   string
-	Checks []string
-}
-
-func (fi *FileIgnore) Match(p Problem) bool {
-	if p.Position.Filename != fi.File {
-		return false
-	}
-	for _, c := range fi.Checks {
-		if m, _ := filepath.Match(c, p.Check); m {
-			return true
-		}
-	}
-	return false
-}
-
-type GlobIgnore struct {
-	Pattern string
-	Checks  []string
-}
-
-func (gi *GlobIgnore) Match(p Problem) bool {
-	if gi.Pattern != "*" {
-		pkgpath := p.Package.Types.Path()
-		if strings.HasSuffix(pkgpath, "_test") {
-			pkgpath = pkgpath[:len(pkgpath)-len("_test")]
-		}
-		name := filepath.Join(pkgpath, filepath.Base(p.Position.Filename))
-		if m, _ := filepath.Match(gi.Pattern, name); !m {
-			return false
-		}
-	}
-	for _, c := range gi.Checks {
-		if m, _ := filepath.Match(c, p.Check); m {
-			return true
-		}
-	}
-	return false
-}
-
-type Program struct {
-	SSA             *ssa.Program
-	InitialPackages []*Pkg
-	AllPackages     []*packages.Package
-	AllFunctions    []*ssa.Function
-}
-
-func (prog *Program) Fset() *token.FileSet {
-	return prog.InitialPackages[0].Fset
-}
-
-type Func func(*Job)
-
-type Severity uint8
-
-const (
-	Error Severity = iota
-	Warning
-	Ignored
-)
-
-// Problem represents a problem in some source code.
-type Problem struct {
-	Position token.Position // position in source file
-	Text     string         // the prose that describes the problem
-	Check    string
-	Package  *Pkg
-	Severity Severity
-}
-
-func (p *Problem) String() string {
-	if p.Check == "" {
-		return p.Text
-	}
-	return fmt.Sprintf("%s (%s)", p.Text, p.Check)
-}
-
-type Checker interface {
-	Name() string
-	Prefix() string
-	Init(*Program)
-	Checks() []Check
-}
-
-type Check struct {
-	Fn              Func
-	ID              string
-	FilterGenerated bool
-	Doc             string
-}
-
-// A Linter lints Go source code.
-type Linter struct {
-	Checkers      []Checker
-	Ignores       []Ignore
-	GoVersion     int
-	ReturnIgnored bool
-	Config        config.Config
-
-	MaxConcurrentJobs int
-	PrintStats        bool
-
-	automaticIgnores []Ignore
-}
-
-func (l *Linter) ignore(p Problem) bool {
-	ignored := false
-	for _, ig := range l.automaticIgnores {
-		// We cannot short-circuit these, as we want to record, for
-		// each ignore, whether it matched or not.
-		if ig.Match(p) {
-			ignored = true
-		}
-	}
-	if ignored {
-		// no need to execute other ignores if we've already had a
-		// match.
-		return true
-	}
-	for _, ig := range l.Ignores {
-		// We can short-circuit here, as we aren't tracking any
-		// information.
-		if ig.Match(p) {
-			return true
-		}
-	}
-
-	return false
-}
-
-func (j *Job) File(node Positioner) *ast.File {
-	return j.Pkg.tokenFileMap[j.Pkg.Fset.File(node.Pos())]
-}
-
-func parseDirective(s string) (cmd string, args []string) {
-	if !strings.HasPrefix(s, "//lint:") {
-		return "", nil
-	}
-	s = strings.TrimPrefix(s, "//lint:")
-	fields := strings.Split(s, " ")
-	return fields[0], fields[1:]
-}
-
-type PerfStats struct {
-	PackageLoading time.Duration
-	SSABuild       time.Duration
-	OtherInitWork  time.Duration
-	CheckerInits   map[string]time.Duration
-	Jobs           []JobStat
-}
-
-type JobStat struct {
-	Job      string
-	Duration time.Duration
-}
-
-func (stats *PerfStats) Print(w io.Writer) {
-	fmt.Fprintln(w, "Package loading:", stats.PackageLoading)
-	fmt.Fprintln(w, "SSA build:", stats.SSABuild)
-	fmt.Fprintln(w, "Other init work:", stats.OtherInitWork)
-
-	fmt.Fprintln(w, "Checker inits:")
-	for checker, d := range stats.CheckerInits {
-		fmt.Fprintf(w, "\t%s: %s\n", checker, d)
-	}
-	fmt.Fprintln(w)
-
-	fmt.Fprintln(w, "Jobs:")
-	sort.Slice(stats.Jobs, func(i, j int) bool {
-		return stats.Jobs[i].Duration < stats.Jobs[j].Duration
-	})
-	var total time.Duration
-	for _, job := range stats.Jobs {
-		fmt.Fprintf(w, "\t%s: %s\n", job.Job, job.Duration)
-		total += job.Duration
-	}
-	fmt.Fprintf(w, "\tTotal: %s\n", total)
-}
-
-func (l *Linter) Lint(initial []*packages.Package, stats *PerfStats) []Problem {
-	allPkgs := allPackages(initial)
-	t := time.Now()
-	ssaprog, _ := ssautil.Packages(allPkgs, ssa.GlobalDebug)
-	ssaprog.Build()
-	if stats != nil {
-		stats.SSABuild = time.Since(t)
-	}
-	runtime.GC()
-
-	t = time.Now()
-	pkgMap := map[*ssa.Package]*Pkg{}
-	var pkgs []*Pkg
-	for _, pkg := range initial {
-		ssapkg := ssaprog.Package(pkg.Types)
-		var cfg config.Config
-		if len(pkg.GoFiles) != 0 {
-			path := pkg.GoFiles[0]
-			dir := filepath.Dir(path)
-			var err error
-			// OPT(dh): we're rebuilding the entire config tree for
-			// each package. for example, if we check a/b/c and
-			// a/b/c/d, we'll process a, a/b, a/b/c, a, a/b, a/b/c,
-			// a/b/c/d – we should cache configs per package and only
-			// load the new levels.
-			cfg, err = config.Load(dir)
-			if err != nil {
-				// FIXME(dh): we couldn't load the config, what are we
-				// supposed to do? probably tell the user somehow
-			}
-			cfg = cfg.Merge(l.Config)
-		}
-
-		pkg := &Pkg{
-			SSA:          ssapkg,
-			Package:      pkg,
-			Config:       cfg,
-			Generated:    map[string]bool{},
-			tokenFileMap: map[*token.File]*ast.File{},
-		}
-		pkg.Inspector = inspector.New(pkg.Syntax)
-		for _, f := range pkg.Syntax {
-			tf := pkg.Fset.File(f.Pos())
-			pkg.tokenFileMap[tf] = f
-
-			path := DisplayPosition(pkg.Fset, f.Pos()).Filename
-			pkg.Generated[path] = isGenerated(path)
-		}
-		pkgMap[ssapkg] = pkg
-		pkgs = append(pkgs, pkg)
-	}
-
-	prog := &Program{
-		SSA:             ssaprog,
-		InitialPackages: pkgs,
-		AllPackages:     allPkgs,
-	}
-
-	for fn := range ssautil.AllFunctions(ssaprog) {
-		prog.AllFunctions = append(prog.AllFunctions, fn)
-		if fn.Pkg == nil {
-			continue
-		}
-		if pkg, ok := pkgMap[fn.Pkg]; ok {
-			pkg.InitialFunctions = append(pkg.InitialFunctions, fn)
-		}
-	}
-
-	var out []Problem
-	l.automaticIgnores = nil
-	for _, pkg := range initial {
-		for _, f := range pkg.Syntax {
-			found := false
-		commentLoop:
-			for _, cg := range f.Comments {
-				for _, c := range cg.List {
-					if strings.Contains(c.Text, "//lint:") {
-						found = true
-						break commentLoop
-					}
-				}
-			}
-			if !found {
-				continue
-			}
-			cm := ast.NewCommentMap(pkg.Fset, f, f.Comments)
-			for node, cgs := range cm {
-				for _, cg := range cgs {
-					for _, c := range cg.List {
-						if !strings.HasPrefix(c.Text, "//lint:") {
-							continue
-						}
-						cmd, args := parseDirective(c.Text)
-						switch cmd {
-						case "ignore", "file-ignore":
-							if len(args) < 2 {
-								// FIXME(dh): this causes duplicated warnings when using megacheck
-								p := Problem{
-									Position: DisplayPosition(prog.Fset(), c.Pos()),
-									Text:     "malformed linter directive; missing the required reason field?",
-									Check:    "",
-									Package:  nil,
-								}
-								out = append(out, p)
-								continue
-							}
-						default:
-							// unknown directive, ignore
-							continue
-						}
-						checks := strings.Split(args[0], ",")
-						pos := DisplayPosition(prog.Fset(), node.Pos())
-						var ig Ignore
-						switch cmd {
-						case "ignore":
-							ig = &LineIgnore{
-								File:   pos.Filename,
-								Line:   pos.Line,
-								Checks: checks,
-								pos:    c.Pos(),
-							}
-						case "file-ignore":
-							ig = &FileIgnore{
-								File:   pos.Filename,
-								Checks: checks,
-							}
-						}
-						l.automaticIgnores = append(l.automaticIgnores, ig)
-					}
-				}
-			}
-		}
-	}
-
-	if stats != nil {
-		stats.OtherInitWork = time.Since(t)
-	}
-
-	for _, checker := range l.Checkers {
-		t := time.Now()
-		checker.Init(prog)
-		if stats != nil {
-			stats.CheckerInits[checker.Name()] = time.Since(t)
-		}
-	}
-
-	var jobs []*Job
-	var allChecks []string
-
-	var wg sync.WaitGroup
-	for _, checker := range l.Checkers {
-		for _, check := range checker.Checks() {
-			allChecks = append(allChecks, check.ID)
-			if check.Fn == nil {
-				continue
-			}
-			for _, pkg := range pkgs {
-				j := &Job{
-					Pkg:       pkg,
-					check:     check,
-					GoVersion: l.GoVersion,
-				}
-				jobs = append(jobs, j)
-				wg.Add(1)
-				go func(check Check, j *Job) {
-					t := time.Now()
-					check.Fn(j)
-					j.duration = time.Since(t)
-					wg.Done()
-				}(check, j)
-			}
-		}
-	}
-
-	wg.Wait()
-
-	for _, j := range jobs {
-		if stats != nil {
-			stats.Jobs = append(stats.Jobs, JobStat{j.check.ID, j.duration})
-		}
-		for _, p := range j.problems {
-			if p.Package == nil {
-				panic(fmt.Sprintf("internal error: problem at position %s has nil package", p.Position))
-			}
-			allowedChecks := FilterChecks(allChecks, p.Package.Config.Checks)
-
-			if l.ignore(p) {
-				p.Severity = Ignored
-			}
-			// TODO(dh): support globs in check white/blacklist
-			// OPT(dh): this approach doesn't actually disable checks,
-			// it just discards their results. For the moment, that's
-			// fine. None of our checks are super expensive. In the
-			// future, we may want to provide opt-in expensive
-			// analysis, which shouldn't run at all. It may be easiest
-			// to implement this in the individual checks.
-			if (l.ReturnIgnored || p.Severity != Ignored) && allowedChecks[p.Check] {
-				out = append(out, p)
-			}
-		}
-	}
-
-	for _, ig := range l.automaticIgnores {
-		ig, ok := ig.(*LineIgnore)
-		if !ok {
-			continue
-		}
-		if ig.matched {
-			continue
-		}
-
-		couldveMatched := false
-		for _, pkg := range pkgs {
-			for _, f := range pkg.tokenFileMap {
-				if prog.Fset().Position(f.Pos()).Filename != ig.File {
-					continue
-				}
-				allowedChecks := FilterChecks(allChecks, pkg.Config.Checks)
-				for _, c := range ig.Checks {
-					if !allowedChecks[c] {
-						continue
-					}
-					couldveMatched = true
-					break
-				}
-				break
-			}
-		}
-
-		if !couldveMatched {
-			// The ignored checks were disabled for the containing package.
-			// Don't flag the ignore for not having matched.
-			continue
-		}
-		p := Problem{
-			Position: DisplayPosition(prog.Fset(), ig.pos),
-			Text:     "this linter directive didn't match anything; should it be removed?",
-			Check:    "",
-			Package:  nil,
-		}
-		out = append(out, p)
-	}
-
-	sort.Slice(out, func(i int, j int) bool {
-		pi, pj := out[i].Position, out[j].Position
-
-		if pi.Filename != pj.Filename {
-			return pi.Filename < pj.Filename
-		}
-		if pi.Line != pj.Line {
-			return pi.Line < pj.Line
-		}
-		if pi.Column != pj.Column {
-			return pi.Column < pj.Column
-		}
-
-		return out[i].Text < out[j].Text
-	})
-
-	if l.PrintStats && stats != nil {
-		stats.Print(os.Stderr)
-	}
-
-	if len(out) < 2 {
-		return out
-	}
-
-	uniq := make([]Problem, 0, len(out))
-	uniq = append(uniq, out[0])
-	prev := out[0]
-	for _, p := range out[1:] {
-		if prev.Position == p.Position && prev.Text == p.Text {
-			continue
-		}
-		prev = p
-		uniq = append(uniq, p)
-	}
-
-	return uniq
-}
-
-func FilterChecks(allChecks []string, checks []string) map[string]bool {
-	// OPT(dh): this entire computation could be cached per package
-	allowedChecks := map[string]bool{}
-
-	for _, check := range checks {
-		b := true
-		if len(check) > 1 && check[0] == '-' {
-			b = false
-			check = check[1:]
-		}
-		if check == "*" || check == "all" {
-			// Match all
-			for _, c := range allChecks {
-				allowedChecks[c] = b
-			}
-		} else if strings.HasSuffix(check, "*") {
-			// Glob
-			prefix := check[:len(check)-1]
-			isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1
-
-			for _, c := range allChecks {
-				idx := strings.IndexFunc(c, func(r rune) bool { return unicode.IsNumber(r) })
-				if isCat {
-					// Glob is S*, which should match S1000 but not SA1000
-					cat := c[:idx]
-					if prefix == cat {
-						allowedChecks[c] = b
-					}
-				} else {
-					// Glob is S1*
-					if strings.HasPrefix(c, prefix) {
-						allowedChecks[c] = b
-					}
-				}
-			}
-		} else {
-			// Literal check name
-			allowedChecks[check] = b
-		}
-	}
-	return allowedChecks
-}
-
-// Pkg represents a package being linted.
-type Pkg struct {
-	SSA              *ssa.Package
-	InitialFunctions []*ssa.Function
-	*packages.Package
-	Config    config.Config
-	Inspector *inspector.Inspector
-	// TODO(dh): this map should probably map from *ast.File, not string
-	Generated map[string]bool
-
-	tokenFileMap map[*token.File]*ast.File
-}
-
-type Positioner interface {
-	Pos() token.Pos
-}
-
-func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position {
-	// Only use the adjusted position if it points to another Go file.
-	// This means we'll point to the original file for cgo files, but
-	// we won't point to a YACC grammar file.
-
-	pos := fset.PositionFor(p, false)
-	adjPos := fset.PositionFor(p, true)
-
-	if filepath.Ext(adjPos.Filename) == ".go" {
-		return adjPos
-	}
-	return pos
-}
-
-func (j *Job) Errorf(n Positioner, format string, args ...interface{}) *Problem {
-	pos := DisplayPosition(j.Pkg.Fset, n.Pos())
-	if j.Pkg.Generated[pos.Filename] && j.check.FilterGenerated {
-		return nil
-	}
-	problem := Problem{
-		Position: pos,
-		Text:     fmt.Sprintf(format, args...),
-		Check:    j.check.ID,
-		Package:  j.Pkg,
-	}
-	j.problems = append(j.problems, problem)
-	return &j.problems[len(j.problems)-1]
-}
-
-func allPackages(pkgs []*packages.Package) []*packages.Package {
-	var out []*packages.Package
-	packages.Visit(
-		pkgs,
-		func(pkg *packages.Package) bool {
-			out = append(out, pkg)
-			return true
-		},
-		nil,
-	)
-	return out
-}
-
-var bufferPool = &sync.Pool{
-	New: func() interface{} {
-		buf := bytes.NewBuffer(nil)
-		buf.Grow(64)
-		return buf
-	},
-}
-
-func FuncName(f *types.Func) string {
-	buf := bufferPool.Get().(*bytes.Buffer)
-	buf.Reset()
-	if f.Type() != nil {
-		sig := f.Type().(*types.Signature)
-		if recv := sig.Recv(); recv != nil {
-			buf.WriteByte('(')
-			if _, ok := recv.Type().(*types.Interface); ok {
-				// gcimporter creates abstract methods of
-				// named interfaces using the interface type
-				// (not the named type) as the receiver.
-				// Don't print it in full.
-				buf.WriteString("interface")
-			} else {
-				types.WriteType(buf, recv.Type(), nil)
-			}
-			buf.WriteByte(')')
-			buf.WriteByte('.')
-		} else if f.Pkg() != nil {
-			writePackage(buf, f.Pkg())
-		}
-	}
-	buf.WriteString(f.Name())
-	s := buf.String()
-	bufferPool.Put(buf)
-	return s
-}
-
-func writePackage(buf *bytes.Buffer, pkg *types.Package) {
-	if pkg == nil {
-		return
-	}
-	var s string
-	s = pkg.Path()
-	if s != "" {
-		buf.WriteString(s)
-		buf.WriteByte('.')
-	}
-}

+ 0 - 361
vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go

@@ -1,361 +0,0 @@
-// Package lintdsl provides helpers for implementing static analysis
-// checks. Dot-importing this package is encouraged.
-package lintdsl
-
-import (
-	"bytes"
-	"fmt"
-	"go/ast"
-	"go/constant"
-	"go/printer"
-	"go/token"
-	"go/types"
-	"strings"
-
-	"honnef.co/go/tools/lint"
-	"honnef.co/go/tools/ssa"
-)
-
-type packager interface {
-	Package() *ssa.Package
-}
-
-func CallName(call *ssa.CallCommon) string {
-	if call.IsInvoke() {
-		return ""
-	}
-	switch v := call.Value.(type) {
-	case *ssa.Function:
-		fn, ok := v.Object().(*types.Func)
-		if !ok {
-			return ""
-		}
-		return lint.FuncName(fn)
-	case *ssa.Builtin:
-		return v.Name()
-	}
-	return ""
-}
-
-func IsCallTo(call *ssa.CallCommon, name string) bool { return CallName(call) == name }
-func IsType(T types.Type, name string) bool           { return types.TypeString(T, nil) == name }
-
-func FilterDebug(instr []ssa.Instruction) []ssa.Instruction {
-	var out []ssa.Instruction
-	for _, ins := range instr {
-		if _, ok := ins.(*ssa.DebugRef); !ok {
-			out = append(out, ins)
-		}
-	}
-	return out
-}
-
-func IsExample(fn *ssa.Function) bool {
-	if !strings.HasPrefix(fn.Name(), "Example") {
-		return false
-	}
-	f := fn.Prog.Fset.File(fn.Pos())
-	if f == nil {
-		return false
-	}
-	return strings.HasSuffix(f.Name(), "_test.go")
-}
-
-func IsPointerLike(T types.Type) bool {
-	switch T := T.Underlying().(type) {
-	case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer:
-		return true
-	case *types.Basic:
-		return T.Kind() == types.UnsafePointer
-	}
-	return false
-}
-
-func IsGenerated(f *ast.File) bool {
-	comments := f.Comments
-	if len(comments) > 0 {
-		comment := comments[0].Text()
-		return strings.Contains(comment, "Code generated by") ||
-			strings.Contains(comment, "DO NOT EDIT")
-	}
-	return false
-}
-
-func IsIdent(expr ast.Expr, ident string) bool {
-	id, ok := expr.(*ast.Ident)
-	return ok && id.Name == ident
-}
-
-// isBlank returns whether id is the blank identifier "_".
-// If id == nil, the answer is false.
-func IsBlank(id ast.Expr) bool {
-	ident, _ := id.(*ast.Ident)
-	return ident != nil && ident.Name == "_"
-}
-
-func IsIntLiteral(expr ast.Expr, literal string) bool {
-	lit, ok := expr.(*ast.BasicLit)
-	return ok && lit.Kind == token.INT && lit.Value == literal
-}
-
-// Deprecated: use IsIntLiteral instead
-func IsZero(expr ast.Expr) bool {
-	return IsIntLiteral(expr, "0")
-}
-
-func IsOfType(j *lint.Job, expr ast.Expr, name string) bool {
-	return IsType(j.Pkg.TypesInfo.TypeOf(expr), name)
-}
-
-func IsInTest(j *lint.Job, node lint.Positioner) bool {
-	// FIXME(dh): this doesn't work for global variables with
-	// initializers
-	f := j.Pkg.Fset.File(node.Pos())
-	return f != nil && strings.HasSuffix(f.Name(), "_test.go")
-}
-
-func IsInMain(j *lint.Job, node lint.Positioner) bool {
-	if node, ok := node.(packager); ok {
-		return node.Package().Pkg.Name() == "main"
-	}
-	return j.Pkg.Types.Name() == "main"
-}
-
-func SelectorName(j *lint.Job, expr *ast.SelectorExpr) string {
-	info := j.Pkg.TypesInfo
-	sel := info.Selections[expr]
-	if sel == nil {
-		if x, ok := expr.X.(*ast.Ident); ok {
-			pkg, ok := info.ObjectOf(x).(*types.PkgName)
-			if !ok {
-				// This shouldn't happen
-				return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name)
-			}
-			return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name)
-		}
-		panic(fmt.Sprintf("unsupported selector: %v", expr))
-	}
-	return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name())
-}
-
-func IsNil(j *lint.Job, expr ast.Expr) bool {
-	return j.Pkg.TypesInfo.Types[expr].IsNil()
-}
-
-func BoolConst(j *lint.Job, expr ast.Expr) bool {
-	val := j.Pkg.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val()
-	return constant.BoolVal(val)
-}
-
-func IsBoolConst(j *lint.Job, expr ast.Expr) bool {
-	// We explicitly don't support typed bools because more often than
-	// not, custom bool types are used as binary enums and the
-	// explicit comparison is desired.
-
-	ident, ok := expr.(*ast.Ident)
-	if !ok {
-		return false
-	}
-	obj := j.Pkg.TypesInfo.ObjectOf(ident)
-	c, ok := obj.(*types.Const)
-	if !ok {
-		return false
-	}
-	basic, ok := c.Type().(*types.Basic)
-	if !ok {
-		return false
-	}
-	if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool {
-		return false
-	}
-	return true
-}
-
-func ExprToInt(j *lint.Job, expr ast.Expr) (int64, bool) {
-	tv := j.Pkg.TypesInfo.Types[expr]
-	if tv.Value == nil {
-		return 0, false
-	}
-	if tv.Value.Kind() != constant.Int {
-		return 0, false
-	}
-	return constant.Int64Val(tv.Value)
-}
-
-func ExprToString(j *lint.Job, expr ast.Expr) (string, bool) {
-	val := j.Pkg.TypesInfo.Types[expr].Value
-	if val == nil {
-		return "", false
-	}
-	if val.Kind() != constant.String {
-		return "", false
-	}
-	return constant.StringVal(val), true
-}
-
-// Dereference returns a pointer's element type; otherwise it returns
-// T.
-func Dereference(T types.Type) types.Type {
-	if p, ok := T.Underlying().(*types.Pointer); ok {
-		return p.Elem()
-	}
-	return T
-}
-
-// DereferenceR returns a pointer's element type; otherwise it returns
-// T. If the element type is itself a pointer, DereferenceR will be
-// applied recursively.
-func DereferenceR(T types.Type) types.Type {
-	if p, ok := T.Underlying().(*types.Pointer); ok {
-		return DereferenceR(p.Elem())
-	}
-	return T
-}
-
-func IsGoVersion(j *lint.Job, minor int) bool {
-	return j.GoVersion >= minor
-}
-
-func CallNameAST(j *lint.Job, call *ast.CallExpr) string {
-	switch fun := call.Fun.(type) {
-	case *ast.SelectorExpr:
-		fn, ok := j.Pkg.TypesInfo.ObjectOf(fun.Sel).(*types.Func)
-		if !ok {
-			return ""
-		}
-		return lint.FuncName(fn)
-	case *ast.Ident:
-		obj := j.Pkg.TypesInfo.ObjectOf(fun)
-		switch obj := obj.(type) {
-		case *types.Func:
-			return lint.FuncName(obj)
-		case *types.Builtin:
-			return obj.Name()
-		default:
-			return ""
-		}
-	default:
-		return ""
-	}
-}
-
-func IsCallToAST(j *lint.Job, node ast.Node, name string) bool {
-	call, ok := node.(*ast.CallExpr)
-	if !ok {
-		return false
-	}
-	return CallNameAST(j, call) == name
-}
-
-func IsCallToAnyAST(j *lint.Job, node ast.Node, names ...string) bool {
-	for _, name := range names {
-		if IsCallToAST(j, node, name) {
-			return true
-		}
-	}
-	return false
-}
-
-func Render(j *lint.Job, x interface{}) string {
-	var buf bytes.Buffer
-	if err := printer.Fprint(&buf, j.Pkg.Fset, x); err != nil {
-		panic(err)
-	}
-	return buf.String()
-}
-
-func RenderArgs(j *lint.Job, args []ast.Expr) string {
-	var ss []string
-	for _, arg := range args {
-		ss = append(ss, Render(j, arg))
-	}
-	return strings.Join(ss, ", ")
-}
-
-func Preamble(f *ast.File) string {
-	cutoff := f.Package
-	if f.Doc != nil {
-		cutoff = f.Doc.Pos()
-	}
-	var out []string
-	for _, cmt := range f.Comments {
-		if cmt.Pos() >= cutoff {
-			break
-		}
-		out = append(out, cmt.Text())
-	}
-	return strings.Join(out, "\n")
-}
-
-func Inspect(node ast.Node, fn func(node ast.Node) bool) {
-	if node == nil {
-		return
-	}
-	ast.Inspect(node, fn)
-}
-
-func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec {
-	if len(specs) == 0 {
-		return nil
-	}
-	groups := make([][]ast.Spec, 1)
-	groups[0] = append(groups[0], specs[0])
-
-	for _, spec := range specs[1:] {
-		g := groups[len(groups)-1]
-		if fset.PositionFor(spec.Pos(), false).Line-1 !=
-			fset.PositionFor(g[len(g)-1].End(), false).Line {
-
-			groups = append(groups, nil)
-		}
-
-		groups[len(groups)-1] = append(groups[len(groups)-1], spec)
-	}
-
-	return groups
-}
-
-func IsObject(obj types.Object, name string) bool {
-	var path string
-	if pkg := obj.Pkg(); pkg != nil {
-		path = pkg.Path() + "."
-	}
-	return path+obj.Name() == name
-}
-
-type Field struct {
-	Var  *types.Var
-	Tag  string
-	Path []int
-}
-
-// FlattenFields recursively flattens T and embedded structs,
-// returning a list of fields. If multiple fields with the same name
-// exist, all will be returned.
-func FlattenFields(T *types.Struct) []Field {
-	return flattenFields(T, nil, nil)
-}
-
-func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field {
-	if seen == nil {
-		seen = map[types.Type]bool{}
-	}
-	if seen[T] {
-		return nil
-	}
-	seen[T] = true
-	var out []Field
-	for i := 0; i < T.NumFields(); i++ {
-		field := T.Field(i)
-		tag := T.Tag(i)
-		np := append(path[:len(path):len(path)], i)
-		if field.Anonymous() {
-			if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok {
-				out = append(out, flattenFields(s, np, seen)...)
-			}
-		} else {
-			out = append(out, Field{field, tag, np})
-		}
-	}
-	return out
-}

+ 0 - 128
vendor/honnef.co/go/tools/lint/lintutil/format/format.go

@@ -1,128 +0,0 @@
-// Package format provides formatters for linter problems.
-package format
-
-import (
-	"encoding/json"
-	"fmt"
-	"go/token"
-	"io"
-	"os"
-	"path/filepath"
-	"text/tabwriter"
-
-	"honnef.co/go/tools/lint"
-)
-
-func shortPath(path string) string {
-	cwd, err := os.Getwd()
-	if err != nil {
-		return path
-	}
-	if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) {
-		return rel
-	}
-	return path
-}
-
-func relativePositionString(pos token.Position) string {
-	s := shortPath(pos.Filename)
-	if pos.IsValid() {
-		if s != "" {
-			s += ":"
-		}
-		s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
-	}
-	if s == "" {
-		s = "-"
-	}
-	return s
-}
-
-type Statter interface {
-	Stats(total, errors, warnings int)
-}
-
-type Formatter interface {
-	Format(p lint.Problem)
-}
-
-type Text struct {
-	W io.Writer
-}
-
-func (o Text) Format(p lint.Problem) {
-	fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Position), p.String())
-}
-
-type JSON struct {
-	W io.Writer
-}
-
-func severity(s lint.Severity) string {
-	switch s {
-	case lint.Error:
-		return "error"
-	case lint.Warning:
-		return "warning"
-	case lint.Ignored:
-		return "ignored"
-	}
-	return ""
-}
-
-func (o JSON) Format(p lint.Problem) {
-	type location struct {
-		File   string `json:"file"`
-		Line   int    `json:"line"`
-		Column int    `json:"column"`
-	}
-	jp := struct {
-		Code     string   `json:"code"`
-		Severity string   `json:"severity,omitempty"`
-		Location location `json:"location"`
-		Message  string   `json:"message"`
-	}{
-		Code:     p.Check,
-		Severity: severity(p.Severity),
-		Location: location{
-			File:   p.Position.Filename,
-			Line:   p.Position.Line,
-			Column: p.Position.Column,
-		},
-		Message: p.Text,
-	}
-	_ = json.NewEncoder(o.W).Encode(jp)
-}
-
-type Stylish struct {
-	W io.Writer
-
-	prevFile string
-	tw       *tabwriter.Writer
-}
-
-func (o *Stylish) Format(p lint.Problem) {
-	if p.Position.Filename == "" {
-		p.Position.Filename = "-"
-	}
-
-	if p.Position.Filename != o.prevFile {
-		if o.prevFile != "" {
-			o.tw.Flush()
-			fmt.Fprintln(o.W)
-		}
-		fmt.Fprintln(o.W, p.Position.Filename)
-		o.prevFile = p.Position.Filename
-		o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0)
-	}
-	fmt.Fprintf(o.tw, "  (%d, %d)\t%s\t%s\n", p.Position.Line, p.Position.Column, p.Check, p.Text)
-}
-
-func (o *Stylish) Stats(total, errors, warnings int) {
-	if o.tw != nil {
-		o.tw.Flush()
-		fmt.Fprintln(o.W)
-	}
-	fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings)\n",
-		total, errors, warnings)
-}

+ 0 - 394
vendor/honnef.co/go/tools/lint/lintutil/util.go

@@ -1,394 +0,0 @@
-// Copyright (c) 2013 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-// Package lintutil provides helpers for writing linter command lines.
-package lintutil // import "honnef.co/go/tools/lint/lintutil"
-
-import (
-	"errors"
-	"flag"
-	"fmt"
-	"go/build"
-	"go/token"
-	"log"
-	"os"
-	"regexp"
-	"runtime"
-	"runtime/debug"
-	"runtime/pprof"
-	"strconv"
-	"strings"
-	"time"
-
-	"honnef.co/go/tools/config"
-	"honnef.co/go/tools/lint"
-	"honnef.co/go/tools/lint/lintutil/format"
-	"honnef.co/go/tools/version"
-
-	"golang.org/x/tools/go/packages"
-)
-
-func usage(name string, flags *flag.FlagSet) func() {
-	return func() {
-		fmt.Fprintf(os.Stderr, "Usage of %s:\n", name)
-		fmt.Fprintf(os.Stderr, "\t%s [flags] # runs on package in current directory\n", name)
-		fmt.Fprintf(os.Stderr, "\t%s [flags] packages\n", name)
-		fmt.Fprintf(os.Stderr, "\t%s [flags] directory\n", name)
-		fmt.Fprintf(os.Stderr, "\t%s [flags] files... # must be a single package\n", name)
-		fmt.Fprintf(os.Stderr, "Flags:\n")
-		flags.PrintDefaults()
-	}
-}
-
-func parseIgnore(s string) ([]lint.Ignore, error) {
-	var out []lint.Ignore
-	if len(s) == 0 {
-		return nil, nil
-	}
-	for _, part := range strings.Fields(s) {
-		p := strings.Split(part, ":")
-		if len(p) != 2 {
-			return nil, errors.New("malformed ignore string")
-		}
-		path := p[0]
-		checks := strings.Split(p[1], ",")
-		out = append(out, &lint.GlobIgnore{Pattern: path, Checks: checks})
-	}
-	return out, nil
-}
-
-type versionFlag int
-
-func (v *versionFlag) String() string {
-	return fmt.Sprintf("1.%d", *v)
-}
-
-func (v *versionFlag) Set(s string) error {
-	if len(s) < 3 {
-		return errors.New("invalid Go version")
-	}
-	if s[0] != '1' {
-		return errors.New("invalid Go version")
-	}
-	if s[1] != '.' {
-		return errors.New("invalid Go version")
-	}
-	i, err := strconv.Atoi(s[2:])
-	*v = versionFlag(i)
-	return err
-}
-
-func (v *versionFlag) Get() interface{} {
-	return int(*v)
-}
-
-type list []string
-
-func (list *list) String() string {
-	return `"` + strings.Join(*list, ",") + `"`
-}
-
-func (list *list) Set(s string) error {
-	if s == "" {
-		*list = nil
-		return nil
-	}
-
-	*list = strings.Split(s, ",")
-	return nil
-}
-
-func FlagSet(name string) *flag.FlagSet {
-	flags := flag.NewFlagSet("", flag.ExitOnError)
-	flags.Usage = usage(name, flags)
-	flags.String("tags", "", "List of `build tags`")
-	flags.String("ignore", "", "Deprecated: use linter directives instead")
-	flags.Bool("tests", true, "Include tests")
-	flags.Bool("version", false, "Print version and exit")
-	flags.Bool("show-ignored", false, "Don't filter ignored problems")
-	flags.String("f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')")
-	flags.String("explain", "", "Print description of `check`")
-
-	flags.Int("debug.max-concurrent-jobs", 0, "Number of jobs to run concurrently")
-	flags.Bool("debug.print-stats", false, "Print debug statistics")
-	flags.String("debug.cpuprofile", "", "Write CPU profile to `file`")
-	flags.String("debug.memprofile", "", "Write memory profile to `file`")
-
-	checks := list{"inherit"}
-	fail := list{"all"}
-	flags.Var(&checks, "checks", "Comma-separated list of `checks` to enable.")
-	flags.Var(&fail, "fail", "Comma-separated list of `checks` that can cause a non-zero exit status.")
-
-	tags := build.Default.ReleaseTags
-	v := tags[len(tags)-1][2:]
-	version := new(versionFlag)
-	if err := version.Set(v); err != nil {
-		panic(fmt.Sprintf("internal error: %s", err))
-	}
-
-	flags.Var(version, "go", "Target Go `version` in the format '1.x'")
-	return flags
-}
-
-func findCheck(cs []lint.Checker, check string) (lint.Check, bool) {
-	for _, c := range cs {
-		for _, cc := range c.Checks() {
-			if cc.ID == check {
-				return cc, true
-			}
-		}
-	}
-	return lint.Check{}, false
-}
-
-func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
-	if _, ok := os.LookupEnv("GOGC"); !ok {
-		debug.SetGCPercent(50)
-	}
-
-	tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string)
-	ignore := fs.Lookup("ignore").Value.(flag.Getter).Get().(string)
-	tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool)
-	goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int)
-	formatter := fs.Lookup("f").Value.(flag.Getter).Get().(string)
-	printVersion := fs.Lookup("version").Value.(flag.Getter).Get().(bool)
-	showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool)
-	explain := fs.Lookup("explain").Value.(flag.Getter).Get().(string)
-
-	maxConcurrentJobs := fs.Lookup("debug.max-concurrent-jobs").Value.(flag.Getter).Get().(int)
-	printStats := fs.Lookup("debug.print-stats").Value.(flag.Getter).Get().(bool)
-	cpuProfile := fs.Lookup("debug.cpuprofile").Value.(flag.Getter).Get().(string)
-	memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string)
-
-	cfg := config.Config{}
-	cfg.Checks = *fs.Lookup("checks").Value.(*list)
-
-	exit := func(code int) {
-		if cpuProfile != "" {
-			pprof.StopCPUProfile()
-		}
-		if memProfile != "" {
-			f, err := os.Create(memProfile)
-			if err != nil {
-				panic(err)
-			}
-			runtime.GC()
-			pprof.WriteHeapProfile(f)
-		}
-		os.Exit(code)
-	}
-	if cpuProfile != "" {
-		f, err := os.Create(cpuProfile)
-		if err != nil {
-			log.Fatal(err)
-		}
-		pprof.StartCPUProfile(f)
-	}
-
-	if printVersion {
-		version.Print()
-		exit(0)
-	}
-
-	if explain != "" {
-		check, ok := findCheck(cs, explain)
-		if !ok {
-			fmt.Fprintln(os.Stderr, "Couldn't find check", explain)
-			exit(1)
-		}
-		if check.Doc == "" {
-			fmt.Fprintln(os.Stderr, explain, "has no documentation")
-			exit(1)
-		}
-		fmt.Println(check.Doc)
-		exit(0)
-	}
-
-	ps, err := Lint(cs, fs.Args(), &Options{
-		Tags:          strings.Fields(tags),
-		LintTests:     tests,
-		Ignores:       ignore,
-		GoVersion:     goVersion,
-		ReturnIgnored: showIgnored,
-		Config:        cfg,
-
-		MaxConcurrentJobs: maxConcurrentJobs,
-		PrintStats:        printStats,
-	})
-	if err != nil {
-		fmt.Fprintln(os.Stderr, err)
-		exit(1)
-	}
-
-	var f format.Formatter
-	switch formatter {
-	case "text":
-		f = format.Text{W: os.Stdout}
-	case "stylish":
-		f = &format.Stylish{W: os.Stdout}
-	case "json":
-		f = format.JSON{W: os.Stdout}
-	default:
-		fmt.Fprintf(os.Stderr, "unsupported output format %q\n", formatter)
-		exit(2)
-	}
-
-	var (
-		total    int
-		errors   int
-		warnings int
-	)
-
-	fail := *fs.Lookup("fail").Value.(*list)
-	var allChecks []string
-	for _, p := range ps {
-		allChecks = append(allChecks, p.Check)
-	}
-
-	shouldExit := lint.FilterChecks(allChecks, fail)
-
-	total = len(ps)
-	for _, p := range ps {
-		if shouldExit[p.Check] {
-			errors++
-		} else {
-			p.Severity = lint.Warning
-			warnings++
-		}
-		f.Format(p)
-	}
-	if f, ok := f.(format.Statter); ok {
-		f.Stats(total, errors, warnings)
-	}
-	if errors > 0 {
-		exit(1)
-	}
-}
-
-type Options struct {
-	Config config.Config
-
-	Tags          []string
-	LintTests     bool
-	Ignores       string
-	GoVersion     int
-	ReturnIgnored bool
-
-	MaxConcurrentJobs int
-	PrintStats        bool
-}
-
-func Lint(cs []lint.Checker, paths []string, opt *Options) ([]lint.Problem, error) {
-	stats := lint.PerfStats{
-		CheckerInits: map[string]time.Duration{},
-	}
-
-	if opt == nil {
-		opt = &Options{}
-	}
-	ignores, err := parseIgnore(opt.Ignores)
-	if err != nil {
-		return nil, err
-	}
-
-	conf := &packages.Config{
-		Mode:  packages.LoadAllSyntax,
-		Tests: opt.LintTests,
-		BuildFlags: []string{
-			"-tags=" + strings.Join(opt.Tags, " "),
-		},
-	}
-
-	t := time.Now()
-	if len(paths) == 0 {
-		paths = []string{"."}
-	}
-	pkgs, err := packages.Load(conf, paths...)
-	if err != nil {
-		return nil, err
-	}
-	stats.PackageLoading = time.Since(t)
-	runtime.GC()
-
-	var problems []lint.Problem
-	workingPkgs := make([]*packages.Package, 0, len(pkgs))
-	for _, pkg := range pkgs {
-		if pkg.IllTyped {
-			problems = append(problems, compileErrors(pkg)...)
-		} else {
-			workingPkgs = append(workingPkgs, pkg)
-		}
-	}
-
-	if len(workingPkgs) == 0 {
-		return problems, nil
-	}
-
-	l := &lint.Linter{
-		Checkers:      cs,
-		Ignores:       ignores,
-		GoVersion:     opt.GoVersion,
-		ReturnIgnored: opt.ReturnIgnored,
-		Config:        opt.Config,
-
-		MaxConcurrentJobs: opt.MaxConcurrentJobs,
-		PrintStats:        opt.PrintStats,
-	}
-	problems = append(problems, l.Lint(workingPkgs, &stats)...)
-
-	return problems, nil
-}
-
-var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`)
-
-func parsePos(pos string) token.Position {
-	if pos == "-" || pos == "" {
-		return token.Position{}
-	}
-	parts := posRe.FindStringSubmatch(pos)
-	if parts == nil {
-		panic(fmt.Sprintf("internal error: malformed position %q", pos))
-	}
-	file := parts[1]
-	line, _ := strconv.Atoi(parts[2])
-	col, _ := strconv.Atoi(parts[3])
-	return token.Position{
-		Filename: file,
-		Line:     line,
-		Column:   col,
-	}
-}
-
-func compileErrors(pkg *packages.Package) []lint.Problem {
-	if !pkg.IllTyped {
-		return nil
-	}
-	if len(pkg.Errors) == 0 {
-		// transitively ill-typed
-		var ps []lint.Problem
-		for _, imp := range pkg.Imports {
-			ps = append(ps, compileErrors(imp)...)
-		}
-		return ps
-	}
-	var ps []lint.Problem
-	for _, err := range pkg.Errors {
-		p := lint.Problem{
-			Position: parsePos(err.Pos),
-			Text:     err.Msg,
-			Check:    "compile",
-		}
-		ps = append(ps, p)
-	}
-	return ps
-}
-
-func ProcessArgs(name string, cs []lint.Checker, args []string) {
-	flags := FlagSet(name)
-	flags.Parse(args)
-
-	ProcessFlagSet(cs, flags)
-}

+ 0 - 11
vendor/honnef.co/go/tools/printf/fuzz.go

@@ -1,11 +0,0 @@
-// +build gofuzz
-
-package printf
-
-func Fuzz(data []byte) int {
-	_, err := Parse(string(data))
-	if err == nil {
-		return 1
-	}
-	return 0
-}

Einige Dateien werden nicht angezeigt, da zu viele Dateien in diesem Diff geändert wurden.