vendor: cloud.google.com/go v0.92.0, google.golang.org/api v0.54.0

this removes a `tools.go` from the dependency, which caused various test
dependencies to be ending up in the dependency-tree, and are now gone.

- cloud.google.com/go v0.92.0: https://github.com/googleapis/google-cloud-go/compare/v0.81.0...v0.92.0
- google.golang.org/api v0.54.0: https://github.com/googleapis/google-api-go-client/compare/v0.46.0...v0.54.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2022-02-17 23:49:31 +01:00
parent a583434ebc
commit 7df7357e08
No known key found for this signature in database
GPG key ID: 76698F39D527CE8C
89 changed files with 373 additions and 32567 deletions

View file

@ -7,7 +7,7 @@ module github.com/docker/docker
go 1.17
require (
cloud.google.com/go v0.81.0
cloud.google.com/go v0.92.0
cloud.google.com/go/logging v1.4.2
github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0
github.com/Microsoft/go-winio v0.5.1
@ -119,7 +119,6 @@ require (
github.com/hashicorp/golang-lru v0.5.3 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.3.0 // indirect
github.com/jstemmer/go-junit-report v0.9.1 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/hashstructure v1.0.0 // indirect
github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect
@ -143,13 +142,10 @@ require (
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.17.0 // indirect
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/tools v0.1.5 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/api v0.46.0 // indirect
google.golang.org/api v0.54.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.27.1 // indirect
labix.org/v2/mgo v0.0.0-20140701140051-000000000287 // indirect

View file

@ -19,8 +19,13 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
cloud.google.com/go v0.92.0 h1:gx+SLyeAiq8Et/uxOrHPuLgu3ZQ2TguaO1OE9P05QsQ=
cloud.google.com/go v0.92.0/go.mod h1:cMc7asehN84LBi1JGTHo4n8wuaGuNAZ7lR7b1YNJBrE=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@ -516,6 +521,7 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@ -527,6 +533,10 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210804190019-f964ff605595/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
@ -627,7 +637,6 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
@ -1053,7 +1062,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
@ -1065,7 +1073,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1135,6 +1142,8 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1237,9 +1246,12 @@ golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -1339,7 +1351,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1368,8 +1381,13 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.46.0 h1:jkDWHOBIoNSD0OQpq4rtBVu+Rh325MPjXG1rakAp8JU=
google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
google.golang.org/api v0.54.0 h1:ECJUVngj71QI6XEm7b1sAf8BljU5inEhMbKPR8Lxhhk=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1426,8 +1444,17 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
@ -1457,10 +1484,13 @@ google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

226
vendor/cloud.google.com/go/CHANGES.md generated vendored
View file

@ -1,5 +1,231 @@
# Changes
## [0.92.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.91.0...v0.92.0) (2021-08-16)
### Features
* **all:** remove testing deps ([#4580](https://www.github.com/googleapis/google-cloud-go/issues/4580)) ([15c1eb9](https://www.github.com/googleapis/google-cloud-go/commit/15c1eb9730f0b514edb911161f9c59e8d790a5ec)), refs [#4061](https://www.github.com/googleapis/google-cloud-go/issues/4061)
* **internal/detect:** add helper to detect projectID from env ([#4582](https://www.github.com/googleapis/google-cloud-go/issues/4582)) ([cc65d94](https://www.github.com/googleapis/google-cloud-go/commit/cc65d945688ac446602bce6ef86a935714dfe2f8)), refs [#1294](https://www.github.com/googleapis/google-cloud-go/issues/1294)
* **spannertest:** Add validation of duplicated column names ([#4611](https://www.github.com/googleapis/google-cloud-go/issues/4611)) ([84f86a6](https://www.github.com/googleapis/google-cloud-go/commit/84f86a605c809ab36dd3cb4b3ab1df15a5302083))
## [0.91.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.90.0...v0.91.0) (2021-08-11)
### Features
* **.github:** support dynamic submodule detection ([#4537](https://www.github.com/googleapis/google-cloud-go/issues/4537)) ([4374b90](https://www.github.com/googleapis/google-cloud-go/commit/4374b907e9f166da6bd23a8ef94399872b00afd6))
* **dialogflow/cx:** add advanced settings for agent level feat: add rollout config, state and failure reason for experiment feat: add insights export settings for security setting feat: add language code for streaming recognition result and flow versions for query parameters docs: deprecate legacy logging settings ([ed73554](https://www.github.com/googleapis/google-cloud-go/commit/ed735541dc57d0681d84b46853393eac5f7ccec3))
* **dialogflow/cx:** add advanced settings for agent level feat: add rollout config, state and failure reason for experiment feat: add insights export settings for security setting feat: add language code for streaming recognition result and flow versions for query parameters docs: deprecate legacy logging settings ([ed73554](https://www.github.com/googleapis/google-cloud-go/commit/ed735541dc57d0681d84b46853393eac5f7ccec3))
* **dialogflow/cx:** added support for DLP templates; expose `Locations` service to get/list avaliable locations of Dialogflow products ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
* **dialogflow/cx:** added support for DLP templates; expose `Locations` service to get/list avaliable locations of Dialogflow products docs: reorder some fields ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
* **dialogflow:** expose `Locations` service to get/list avaliable locations of Dialogflow products; fixed some API annotations ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
* **kms:** add support for HMAC, Variable Key Destruction, and GenerateRandom ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
* **speech:** add total_billed_time response field ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
* **video/transcoder:** Add video cropping feature feat: Add video padding feature feat: Add ttl_after_completion_days field to Job docs: Update proto documentation docs: Indicate v1beta1 deprecation ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
### Bug Fixes
* **functions:** Updating behavior of source_upload_url during Get/List function calls ([381a494](https://www.github.com/googleapis/google-cloud-go/commit/381a494c29da388977b0bdda2177058328cc4afe))
## [0.90.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.89.0...v0.90.0) (2021-08-03)
### ⚠ BREAKING CHANGES
* **compute:** add pagination and an Operation wrapper (#4542)
### Features
* **compute:** add pagination and an Operation wrapper ([#4542](https://www.github.com/googleapis/google-cloud-go/issues/4542)) ([36f4649](https://www.github.com/googleapis/google-cloud-go/commit/36f46494111f6d16d103fb208d49616576dbf91e))
* **internal/godocfx:** add status to packages and TOCs ([#4547](https://www.github.com/googleapis/google-cloud-go/issues/4547)) ([c6de69c](https://www.github.com/googleapis/google-cloud-go/commit/c6de69c710561bb2a40eff05417df4b9798c258a))
* **internal/godocfx:** mark status of deprecated items ([#4525](https://www.github.com/googleapis/google-cloud-go/issues/4525)) ([d571c6f](https://www.github.com/googleapis/google-cloud-go/commit/d571c6f4337ec9c4807c230cd77f53b6e7db6437))
### Bug Fixes
* **internal/carver:** don't tag commits ([#4518](https://www.github.com/googleapis/google-cloud-go/issues/4518)) ([c355eb8](https://www.github.com/googleapis/google-cloud-go/commit/c355eb8ecb0bb1af0ccf55e6262ca8c0d5c7e352))
## [0.89.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.88.0...v0.89.0) (2021-07-29)
### Features
* **assuredworkloads:** Add EU Regions And Support compliance regime ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
* **datacatalog:** Added support for BigQuery connections entries feat: Added support for BigQuery routines entries feat: Added usage_signal field feat: Added labels field feat: Added ReplaceTaxonomy in Policy Tag Manager Serialization API feat: Added support for public tag templates feat: Added support for rich text tags docs: Documentation improvements ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
* **datafusion:** start generating apiv1 ([e55a016](https://www.github.com/googleapis/google-cloud-go/commit/e55a01667afaf36ff70807d061ecafb61827ba97))
* **iap:** start generating apiv1 ([e55a016](https://www.github.com/googleapis/google-cloud-go/commit/e55a01667afaf36ff70807d061ecafb61827ba97))
* **internal/carver:** add tooling to help carve out sub-modules ([#4417](https://www.github.com/googleapis/google-cloud-go/issues/4417)) ([a7e28f2](https://www.github.com/googleapis/google-cloud-go/commit/a7e28f2557469562ae57e5174b41bdf8fce62b63))
* **networkconnectivity:** Add files for Network Connectivity v1 API. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
* **retail:** Add restricted Retail Search features for Retail API v2. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
* **secretmanager:** In Secret Manager, users can now use filter to customize the output of ListSecrets/ListSecretVersions calls ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
* **securitycenter:** add finding_class and indicator fields in Finding ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
* **speech:** add total_billed_time response field. fix!: phrase_set_id is required field in CreatePhraseSetRequest. fix!: custom_class_id is required field in CreateCustomClassRequest. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
* **storagetransfer:** start generating apiv1 ([#4505](https://www.github.com/googleapis/google-cloud-go/issues/4505)) ([f2d531d](https://www.github.com/googleapis/google-cloud-go/commit/f2d531d2b519efa58e0f23a178bbebe675c203c3))
### Bug Fixes
* **internal/gapicgen:** exec Stdout already set ([#4509](https://www.github.com/googleapis/google-cloud-go/issues/4509)) ([41246e9](https://www.github.com/googleapis/google-cloud-go/commit/41246e900aaaea92a9f956e92956c40c86f4cb3a))
* **internal/gapicgen:** tidy all after dep bump ([#4515](https://www.github.com/googleapis/google-cloud-go/issues/4515)) ([9401be5](https://www.github.com/googleapis/google-cloud-go/commit/9401be509c570c3c55694375065c84139e961857)), refs [#4434](https://www.github.com/googleapis/google-cloud-go/issues/4434)
## [0.88.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.87.0...v0.88.0) (2021-07-22)
### ⚠ BREAKING CHANGES
* **cloudbuild/apiv1:** Proto had a prior definitions of WorkerPool resources which were never supported. This change replaces those resources with definitions that are currently supported.
### Features
* **cloudbuild/apiv1:** add a WorkerPools API ([19ea3f8](https://www.github.com/googleapis/google-cloud-go/commit/19ea3f830212582bfee21d9e09f0034f9ce76547))
* **cloudbuild/apiv1:** Implementation of Build Failure Info: - Added message FailureInfo field ([19ea3f8](https://www.github.com/googleapis/google-cloud-go/commit/19ea3f830212582bfee21d9e09f0034f9ce76547))
* **osconfig/agentendpoint:** OSConfig AgentEndpoint: add basic os info to RegisterAgentRequest, add WindowsApplication type to Inventory ([8936bc3](https://www.github.com/googleapis/google-cloud-go/commit/8936bc3f2d0fb2f6514f6e019fa247b8f41bd43c))
* **resourcesettings:** Publish Cloud ResourceSettings v1 API ([43ad3cb](https://www.github.com/googleapis/google-cloud-go/commit/43ad3cb7be981fff9dc5dcf4510f1cd7bea99957))
### Bug Fixes
* **internal/godocfx:** set exit code, print cmd output, no go get ... ([#4445](https://www.github.com/googleapis/google-cloud-go/issues/4445)) ([cc70f77](https://www.github.com/googleapis/google-cloud-go/commit/cc70f77ac279a62e24e1b07f6e53fd126b7286b0))
* **internal:** detect module for properly generating docs URLs ([#4460](https://www.github.com/googleapis/google-cloud-go/issues/4460)) ([1eaba8b](https://www.github.com/googleapis/google-cloud-go/commit/1eaba8bd694f7552a8e3e09b4f164de8b6ca23f0)), refs [#4447](https://www.github.com/googleapis/google-cloud-go/issues/4447)
* **kms:** Updating WORKSPACE files to use the newest version of the Typescript generator. ([8936bc3](https://www.github.com/googleapis/google-cloud-go/commit/8936bc3f2d0fb2f6514f6e019fa247b8f41bd43c))
## [0.87.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.86.0...v0.87.0) (2021-07-13)
### Features
* **container:** allow updating security group on existing clusters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c))
* **monitoring/dashboard:** added validation only mode when writing dashboards feat: added alert chart widget ([652d7c2](https://www.github.com/googleapis/google-cloud-go/commit/652d7c277da2f6774729064ab65d557875c81567))
* **networkmanagment:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06))
* **secretmanager:** Tune Secret Manager auto retry parameters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c))
* **video/transcoder:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06))
### Bug Fixes
* **compute:** properly generate PUT requests ([#4426](https://www.github.com/googleapis/google-cloud-go/issues/4426)) ([a7491a5](https://www.github.com/googleapis/google-cloud-go/commit/a7491a533e4ad75eb6d5f89718d4dafb0c5b4167))
* **internal:** fix relative pathing for generator ([#4397](https://www.github.com/googleapis/google-cloud-go/issues/4397)) ([25e0eae](https://www.github.com/googleapis/google-cloud-go/commit/25e0eaecf9feb1caa97988c5398ac58f6ca17391))
### Miscellaneous Chores
* **all:** fix release version ([#4427](https://www.github.com/googleapis/google-cloud-go/issues/4427)) ([2c0d267](https://www.github.com/googleapis/google-cloud-go/commit/2c0d2673ccab7281b6432215ee8279f9efd04a15))
## [0.86.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.85.0...v0.86.0) (2021-07-01)
### Features
* **bigquery managedwriter:** schema conversion support ([#4357](https://www.github.com/googleapis/google-cloud-go/issues/4357)) ([f2b20f4](https://www.github.com/googleapis/google-cloud-go/commit/f2b20f493e2ed5a883ce42fa65695c03c574feb5))
## [0.85.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.84.0...v0.85.0) (2021-06-30)
### Features
* **dataflow:** start generating apiv1beta3 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273))
* **datastream:** start generating apiv1alpha1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273))
* **dialogflow:** added Automated agent reply type and allow cancellation flag for partial response feature. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0))
* **documentai:** update document.proto, add the processor management methods. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0))
* **eventarc:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273))
* **gkehub:** added v1alpha messages and client for gkehub ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437))
* **internal/godocfx:** add support for other modules ([#4290](https://www.github.com/googleapis/google-cloud-go/issues/4290)) ([d52bae6](https://www.github.com/googleapis/google-cloud-go/commit/d52bae6cd77474174192c46236d309bf967dfa00))
* **internal/godocfx:** different metadata for different modules ([#4297](https://www.github.com/googleapis/google-cloud-go/issues/4297)) ([598f5b9](https://www.github.com/googleapis/google-cloud-go/commit/598f5b93778b2e2e75265ae54484dd54477433f5))
* **internal:** add force option for regen ([#4310](https://www.github.com/googleapis/google-cloud-go/issues/4310)) ([de654eb](https://www.github.com/googleapis/google-cloud-go/commit/de654ebfcf23a53b4d1fee0aa48c73999a55c1a6))
* **servicecontrol:** Added the gRPC service config for the Service Controller v1 API docs: Updated some comments. ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437))
* **workflows/executions:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273))
### Bug Fixes
* **internal:** add autogenerated header to snippets ([#4261](https://www.github.com/googleapis/google-cloud-go/issues/4261)) ([2220787](https://www.github.com/googleapis/google-cloud-go/commit/222078722c37c3fdadec7bbbe0bcf81edd105f1a)), refs [#4260](https://www.github.com/googleapis/google-cloud-go/issues/4260)
* **internal:** fix googleapis-disco regen ([#4354](https://www.github.com/googleapis/google-cloud-go/issues/4354)) ([aeea1ce](https://www.github.com/googleapis/google-cloud-go/commit/aeea1ce1e5dff3acdfe208932327b52c49851b41))
* **kms:** replace IAMPolicy mixin in service config. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0))
* **security/privateca:** Fixed casing of the Ruby namespace ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0))
## [0.84.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.83.0...v0.84.0) (2021-06-09)
### Features
* **aiplatform:** start generating apiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c))
* **apigeeconnect:** start generating abiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c))
* **dialogflow/cx:** support sentiment analysis in bot testing ([7a57aac](https://www.github.com/googleapis/google-cloud-go/commit/7a57aac996f2bae20ee6ddbd02ad9e56e380099b))
* **dialogflow/cx:** support sentiment analysis in bot testing ([6ad2306](https://www.github.com/googleapis/google-cloud-go/commit/6ad2306f64710ce16059b464342dbc6a98d2d9c2))
* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c))
* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([18375e5](https://www.github.com/googleapis/google-cloud-go/commit/18375e50e8f16e63506129b8927a7b62f85e407b))
* **gkeconnect/gateway:** start generating apiv1beta1 ([#4235](https://www.github.com/googleapis/google-cloud-go/issues/4235)) ([1c3e968](https://www.github.com/googleapis/google-cloud-go/commit/1c3e9689d78670a231a3660db00fd4fd8f5c6345))
* **lifesciences:** strat generating apiv2beta ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c))
* **tpu:** start generating apiv1 ([#4199](https://www.github.com/googleapis/google-cloud-go/issues/4199)) ([cac48ea](https://www.github.com/googleapis/google-cloud-go/commit/cac48eab960cd34cc20732f6a1aeb93c540a036b))
### Bug Fixes
* **bttest:** fix race condition in SampleRowKeys ([#4207](https://www.github.com/googleapis/google-cloud-go/issues/4207)) ([5711fb1](https://www.github.com/googleapis/google-cloud-go/commit/5711fb10d25c458807598d736a232bb2210a047a))
* **documentai:** Fix Ruby gem title of documentai v1 (package not currently published) ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c))
## [0.83.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.82.0...v0.83.0) (2021-06-02)
### Features
* **dialogflow:** added a field in the query result to indicate whether slot filling is cancelled. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3))
* **essentialcontacts:** start generating apiv1 ([#4118](https://www.github.com/googleapis/google-cloud-go/issues/4118)) ([fe14afc](https://www.github.com/googleapis/google-cloud-go/commit/fe14afcf74e09089b22c4f5221cbe37046570fda))
* **gsuiteaddons:** start generating apiv1 ([#4082](https://www.github.com/googleapis/google-cloud-go/issues/4082)) ([6de5c99](https://www.github.com/googleapis/google-cloud-go/commit/6de5c99173c4eeaf777af18c47522ca15637d232))
* **osconfig:** OSConfig: add ExecResourceOutput and per step error message. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3))
* **osconfig:** start generating apiv1alpha ([#4119](https://www.github.com/googleapis/google-cloud-go/issues/4119)) ([8ad471f](https://www.github.com/googleapis/google-cloud-go/commit/8ad471f26087ec076460df6dcf27769ffe1b8834))
* **privatecatalog:** start generating apiv1beta1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076))
* **serviceusage:** start generating apiv1 ([#4120](https://www.github.com/googleapis/google-cloud-go/issues/4120)) ([e4531f9](https://www.github.com/googleapis/google-cloud-go/commit/e4531f93cfeb6388280bb253ef6eb231aba37098))
* **shell:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076))
* **vpcaccess:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076))
## [0.82.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.81.0...v0.82.0) (2021-05-17)
### Features
* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab))
* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab))
* **cloudbuild/apiv1:** Add fields for Pub/Sub triggers ([8b4adbf](https://www.github.com/googleapis/google-cloud-go/commit/8b4adbf9815e1ec229dfbcfb9189d3ea63112e1b))
* **cloudbuild/apiv1:** Implementation of Source Manifests: - Added message StorageSourceManifest as an option for the Source message - Added StorageSourceManifest field to the SourceProvenance message ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea))
* **clouddms:** start generating apiv1 ([#4081](https://www.github.com/googleapis/google-cloud-go/issues/4081)) ([29df85c](https://www.github.com/googleapis/google-cloud-go/commit/29df85c40ab64d59e389a980c9ce550077839763))
* **dataproc:** update the Dataproc V1 API client library ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea))
* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea))
* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
* **dialogflow:** added more Environment RPCs feat: added Versions service feat: added Fulfillment service feat: added TextToSpeechSettings. feat: added location in some resource patterns. ([4f73dc1](https://www.github.com/googleapis/google-cloud-go/commit/4f73dc19c2e05ad6133a8eac3d62ddb522314540))
* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06))
* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([07fdcd1](https://www.github.com/googleapis/google-cloud-go/commit/07fdcd12499eac26f9b5fae01d6c1282c3e02b7c))
* **internal/gapicgen:** only update relevant gapic files ([#4066](https://www.github.com/googleapis/google-cloud-go/issues/4066)) ([5948bee](https://www.github.com/googleapis/google-cloud-go/commit/5948beedbadd491601bdee6a006cf685e94a85f4))
* **internal/gensnippets:** add license header and region tags ([#3924](https://www.github.com/googleapis/google-cloud-go/issues/3924)) ([e9ff7a0](https://www.github.com/googleapis/google-cloud-go/commit/e9ff7a0f9bb1cc67f5d0de47934811960429e72c))
* **internal/gensnippets:** initial commit ([#3922](https://www.github.com/googleapis/google-cloud-go/issues/3922)) ([3fabef0](https://www.github.com/googleapis/google-cloud-go/commit/3fabef032388713f732ab4dbfc51624cdca0f481))
* **internal:** auto-generate snippets ([#3949](https://www.github.com/googleapis/google-cloud-go/issues/3949)) ([b70e0fc](https://www.github.com/googleapis/google-cloud-go/commit/b70e0fccdc86813e0d97ff63b585822d4deafb38))
* **internal:** generate region tags for snippets ([#3962](https://www.github.com/googleapis/google-cloud-go/issues/3962)) ([ef2b90e](https://www.github.com/googleapis/google-cloud-go/commit/ef2b90ea6d47e27744c98a1a9ae0c487c5051808))
* **metastore:** start generateing apiv1 ([#4083](https://www.github.com/googleapis/google-cloud-go/issues/4083)) ([661610a](https://www.github.com/googleapis/google-cloud-go/commit/661610afa6a9113534884cafb138109536724310))
* **security/privateca:** start generating apiv1 ([#4023](https://www.github.com/googleapis/google-cloud-go/issues/4023)) ([08aa83a](https://www.github.com/googleapis/google-cloud-go/commit/08aa83a5371bb6485bc3b19b3ed5300f807ce69f))
* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd))
* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd))
* **speech:** add webm opus support. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06))
* **speech:** Support for spoken punctuation and spoken emojis. ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
### Bug Fixes
* **binaryauthorization:** add Java options to Binaryauthorization protos ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
* **internal/gapicgen:** filter out internal directory changes ([#4085](https://www.github.com/googleapis/google-cloud-go/issues/4085)) ([01473f6](https://www.github.com/googleapis/google-cloud-go/commit/01473f6d8db26c6e18969ace7f9e87c66e94ad9e))
* **internal/gapicgen:** use correct region tags for gensnippets ([#4022](https://www.github.com/googleapis/google-cloud-go/issues/4022)) ([8ccd689](https://www.github.com/googleapis/google-cloud-go/commit/8ccd689cab08f016008ca06a939a4828817d4a25))
* **internal/gensnippets:** run goimports ([#3931](https://www.github.com/googleapis/google-cloud-go/issues/3931)) ([10050f0](https://www.github.com/googleapis/google-cloud-go/commit/10050f05c20c226547d87c08168fa4bc551395c5))
* **internal:** append a new line to comply with go fmt ([#4028](https://www.github.com/googleapis/google-cloud-go/issues/4028)) ([a297278](https://www.github.com/googleapis/google-cloud-go/commit/a2972783c4af806199d1c67c9f63ad9677f20f34))
* **internal:** make sure formatting is run on snippets ([#4039](https://www.github.com/googleapis/google-cloud-go/issues/4039)) ([130dfc5](https://www.github.com/googleapis/google-cloud-go/commit/130dfc535396e98fc009585b0457e3bc48ead941)), refs [#4037](https://www.github.com/googleapis/google-cloud-go/issues/4037)
* **metastore:** increase metastore lro polling timeouts ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab))
### Miscellaneous Chores
* **all:** fix release version ([#4040](https://www.github.com/googleapis/google-cloud-go/issues/4040)) ([4c991a9](https://www.github.com/googleapis/google-cloud-go/commit/4c991a928665d9be93691decce0c653f430688b7))
## [0.81.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.80.0...v0.81.0) (2021-04-02)

View file

@ -69,7 +69,7 @@ these projects as "general project" and "Firestore project".
After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount)
for each project. Ensure the project-level **Owner**
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to
[IAM role](https://console.cloud.google.com/iam-admin/iam/project) role is added to
each service account. During the creation of the service account, you should
download the JSON credential file for use later.
@ -136,6 +136,9 @@ As part of the setup that follows, the following variables will be configured:
- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
in the form
"projects/P/locations/L/keyRings/R". The creation of this is described below.
- `GCLOUD_TESTS_BIGTABLE_KEYRING`: The full name of the keyring for the bigtable tests,
in the form
"projects/P/locations/L/keyRings/R". The creation of this is described below. Expected to be single region.
- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.
Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
@ -172,6 +175,7 @@ $ gcloud beta spanner instances create go-integration-test --config regional-us-
$ export MY_KEYRING=some-keyring-name
$ export MY_LOCATION=global
$ export MY_SINGLE_LOCATION=us-central1
# Creates a KMS keyring, in the same location as the default location for your
# project's buckets.
$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION
@ -182,6 +186,26 @@ $ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --pu
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
# Authorizes Google Cloud Storage to encrypt and decrypt using key1.
$ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
# Create KMS Key in one region for Bigtable
$ gcloud kms keyrings create $MY_KEYRING --location $MY_SINGLE_LOCATION
$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_SINGLE_LOCATION --purpose encryption
# Sets the GCLOUD_TESTS_BIGTABLE_KEYRING environment variable.
$ export GCLOUD_TESTS_BIGTABLE_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_SINGLE_LOCATION/keyRings/$MY_KEYRING
# Create a service agent, https://cloud.google.com/bigtable/docs/use-cmek#gcloud:
$ gcloud beta services identity create \
--service=bigtableadmin.googleapis.com \
--project $GCLOUD_TESTS_GOLANG_PROJECT_ID
# Note the service agent email for the agent created.
$ export SERVICE_AGENT_EMAIL=<service agent email, from last step>
# Authorizes Google Cloud Bigtable to encrypt and decrypt using key1
$ gcloud kms keys add-iam-policy-binding key1 \
--keyring $MY_KEYRING \
--location $MY_SINGLE_LOCATION \
--role roles/cloudkms.cryptoKeyEncrypterDecrypter \
--member "serviceAccount:$SERVICE_AGENT_EMAIL" \
--project $GCLOUD_TESTS_GOLANG_PROJECT_ID
```
It may be useful to add exports to your shell initialization for future use.
@ -295,8 +319,8 @@ Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported by opening an issue
or contacting one or more of the project maintainers.
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org), version 1.2.0,
available at [https://contributor-covenant.org/version/1/2/0/](https://contributor-covenant.org/version/1/2/0/)
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
[indvcla]: https://developers.google.com/open-source/cla/individual

View file

@ -12,7 +12,7 @@ To install the packages on your system, *do not clone the repo*. Instead:
1. Change to your project directory:
```
```bash
cd /my/cloud/project
```
1. Get the package you want to use. Some products have their own module, so it's
@ -35,6 +35,7 @@ make backwards-incompatible changes.
| [Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`](https://pkg.go.dev/cloud.google.com/go/bigtable) |
| [Cloudbuild][cloud-build] | stable | [`cloud.google.com/go/cloudbuild/apiv1`](https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1) |
| [Cloudtasks][cloud-tasks] | stable | [`cloud.google.com/go/cloudtasks/apiv2`](https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2) |
| [Compute Engine][cloud-compute] | alpha | [`cloud.google.com/go/compute/apiv1`](https://pkg.go.dev/cloud.google.com/go/compute/apiv1) |
| [Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`](https://pkg.go.dev/cloud.google.com/go/container/apiv1) |
| [ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1`](https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1) |
| [Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`](https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1) |
@ -54,7 +55,7 @@ make backwards-incompatible changes.
| [Monitoring][cloud-monitoring] | stable | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3) |
| [OS Login][cloud-oslogin] | stable | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1) |
| [Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub) |
| [Pub/Sub Lite][cloud-pubsublite] | beta | [`cloud.google.com/go/pubsublite`](https://pkg.go.dev/cloud.google.com/go/pubsublite) |
| [Pub/Sub Lite][cloud-pubsublite] | stable | [`cloud.google.com/go/pubsublite`](https://pkg.go.dev/cloud.google.com/go/pubsublite) |
| [Phishing Protection][cloud-phishingprotection] | alpha | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1) |
| [reCAPTCHA Enterprise][cloud-recaptcha] | alpha | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1) |
| [Recommender][cloud-recommender] | beta | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1) |
@ -137,6 +138,7 @@ for more information.
[cloud-build]: https://cloud.google.com/cloud-build/
[cloud-bigquery]: https://cloud.google.com/bigquery/
[cloud-bigtable]: https://cloud.google.com/bigtable/
[cloud-compute]: https://cloud.google.com/compute
[cloud-container]: https://cloud.google.com/containers/
[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis
[cloud-dataproc]: https://cloud.google.com/dataproc/

7
vendor/cloud.google.com/go/SECURITY.md generated vendored Normal file
View file

@ -0,0 +1,7 @@
# Security Policy
To report a security issue, please use [g.co/vulnz](https://g.co/vulnz).
The Google Security Team will respond within 5 working days of your report on g.co/vulnz.
We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue.

31
vendor/cloud.google.com/go/tools.go generated vendored
View file

@ -1,31 +0,0 @@
// +build tools
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This package exists to cause `go mod` and `go get` to believe these tools
// are dependencies, even though they are not runtime dependencies of any
// package (these are tools used by our CI builds). This means they will appear
// in our `go.mod` file, but will not be a part of the build. Also, since the
// build target is something non-existent, these should not be included in any
// binaries.
package cloud
import (
_ "github.com/golang/protobuf/protoc-gen-go"
_ "github.com/jstemmer/go-junit-report"
_ "golang.org/x/lint/golint"
_ "golang.org/x/tools/cmd/goimports"
)

View file

@ -1,398 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gengogrpc contains the gRPC code generator.
package gengogrpc
import (
"fmt"
"strconv"
"strings"
"google.golang.org/protobuf/compiler/protogen"
"google.golang.org/protobuf/types/descriptorpb"
)
const (
contextPackage = protogen.GoImportPath("context")
grpcPackage = protogen.GoImportPath("google.golang.org/grpc")
codesPackage = protogen.GoImportPath("google.golang.org/grpc/codes")
statusPackage = protogen.GoImportPath("google.golang.org/grpc/status")
)
// GenerateFile generates a _grpc.pb.go file containing gRPC service definitions.
func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile {
if len(file.Services) == 0 {
return nil
}
filename := file.GeneratedFilenamePrefix + "_grpc.pb.go"
g := gen.NewGeneratedFile(filename, file.GoImportPath)
g.P("// Code generated by protoc-gen-go-grpc. DO NOT EDIT.")
g.P()
g.P("package ", file.GoPackageName)
g.P()
GenerateFileContent(gen, file, g)
return g
}
// GenerateFileContent generates the gRPC service definitions, excluding the package statement.
func GenerateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile) {
if len(file.Services) == 0 {
return
}
// TODO: Remove this. We don't need to include these references any more.
g.P("// Reference imports to suppress errors if they are not otherwise used.")
g.P("var _ ", contextPackage.Ident("Context"))
g.P("var _ ", grpcPackage.Ident("ClientConnInterface"))
g.P()
g.P("// This is a compile-time assertion to ensure that this generated file")
g.P("// is compatible with the grpc package it is being compiled against.")
g.P("const _ = ", grpcPackage.Ident("SupportPackageIsVersion6"))
g.P()
for _, service := range file.Services {
genService(gen, file, g, service)
}
}
func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) {
clientName := service.GoName + "Client"
g.P("// ", clientName, " is the client API for ", service.GoName, " service.")
g.P("//")
g.P("// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.")
// Client interface.
if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() {
g.P("//")
g.P(deprecationComment)
}
g.Annotate(clientName, service.Location)
g.P("type ", clientName, " interface {")
for _, method := range service.Methods {
g.Annotate(clientName+"."+method.GoName, method.Location)
if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() {
g.P(deprecationComment)
}
g.P(method.Comments.Leading,
clientSignature(g, method))
}
g.P("}")
g.P()
// Client structure.
g.P("type ", unexport(clientName), " struct {")
g.P("cc ", grpcPackage.Ident("ClientConnInterface"))
g.P("}")
g.P()
// NewClient factory.
if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() {
g.P(deprecationComment)
}
g.P("func New", clientName, " (cc ", grpcPackage.Ident("ClientConnInterface"), ") ", clientName, " {")
g.P("return &", unexport(clientName), "{cc}")
g.P("}")
g.P()
var methodIndex, streamIndex int
// Client method implementations.
for _, method := range service.Methods {
if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() {
// Unary RPC method
genClientMethod(gen, file, g, method, methodIndex)
methodIndex++
} else {
// Streaming RPC method
genClientMethod(gen, file, g, method, streamIndex)
streamIndex++
}
}
// Server interface.
serverType := service.GoName + "Server"
g.P("// ", serverType, " is the server API for ", service.GoName, " service.")
if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() {
g.P("//")
g.P(deprecationComment)
}
g.Annotate(serverType, service.Location)
g.P("type ", serverType, " interface {")
for _, method := range service.Methods {
g.Annotate(serverType+"."+method.GoName, method.Location)
if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() {
g.P(deprecationComment)
}
g.P(method.Comments.Leading,
serverSignature(g, method))
}
g.P("}")
g.P()
// Server Unimplemented struct for forward compatibility.
g.P("// Unimplemented", serverType, " can be embedded to have forward compatible implementations.")
g.P("type Unimplemented", serverType, " struct {")
g.P("}")
g.P()
for _, method := range service.Methods {
nilArg := ""
if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() {
nilArg = "nil,"
}
g.P("func (*Unimplemented", serverType, ") ", serverSignature(g, method), "{")
g.P("return ", nilArg, statusPackage.Ident("Errorf"), "(", codesPackage.Ident("Unimplemented"), `, "method `, method.GoName, ` not implemented")`)
g.P("}")
}
g.P()
// Server registration.
if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() {
g.P(deprecationComment)
}
serviceDescVar := "_" + service.GoName + "_serviceDesc"
g.P("func Register", service.GoName, "Server(s *", grpcPackage.Ident("Server"), ", srv ", serverType, ") {")
g.P("s.RegisterService(&", serviceDescVar, `, srv)`)
g.P("}")
g.P()
// Server handler implementations.
var handlerNames []string
for _, method := range service.Methods {
hname := genServerMethod(gen, file, g, method)
handlerNames = append(handlerNames, hname)
}
// Service descriptor.
g.P("var ", serviceDescVar, " = ", grpcPackage.Ident("ServiceDesc"), " {")
g.P("ServiceName: ", strconv.Quote(string(service.Desc.FullName())), ",")
g.P("HandlerType: (*", serverType, ")(nil),")
g.P("Methods: []", grpcPackage.Ident("MethodDesc"), "{")
for i, method := range service.Methods {
if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() {
continue
}
g.P("{")
g.P("MethodName: ", strconv.Quote(string(method.Desc.Name())), ",")
g.P("Handler: ", handlerNames[i], ",")
g.P("},")
}
g.P("},")
g.P("Streams: []", grpcPackage.Ident("StreamDesc"), "{")
for i, method := range service.Methods {
if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() {
continue
}
g.P("{")
g.P("StreamName: ", strconv.Quote(string(method.Desc.Name())), ",")
g.P("Handler: ", handlerNames[i], ",")
if method.Desc.IsStreamingServer() {
g.P("ServerStreams: true,")
}
if method.Desc.IsStreamingClient() {
g.P("ClientStreams: true,")
}
g.P("},")
}
g.P("},")
g.P("Metadata: \"", file.Desc.Path(), "\",")
g.P("}")
g.P()
}
func clientSignature(g *protogen.GeneratedFile, method *protogen.Method) string {
s := method.GoName + "(ctx " + g.QualifiedGoIdent(contextPackage.Ident("Context"))
if !method.Desc.IsStreamingClient() {
s += ", in *" + g.QualifiedGoIdent(method.Input.GoIdent)
}
s += ", opts ..." + g.QualifiedGoIdent(grpcPackage.Ident("CallOption")) + ") ("
if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() {
s += "*" + g.QualifiedGoIdent(method.Output.GoIdent)
} else {
s += method.Parent.GoName + "_" + method.GoName + "Client"
}
s += ", error)"
return s
}
func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, index int) {
service := method.Parent
sname := fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name())
if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() {
g.P(deprecationComment)
}
g.P("func (c *", unexport(service.GoName), "Client) ", clientSignature(g, method), "{")
if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() {
g.P("out := new(", method.Output.GoIdent, ")")
g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`)
g.P("if err != nil { return nil, err }")
g.P("return out, nil")
g.P("}")
g.P()
return
}
streamType := unexport(service.GoName) + method.GoName + "Client"
serviceDescVar := "_" + service.GoName + "_serviceDesc"
g.P("stream, err := c.cc.NewStream(ctx, &", serviceDescVar, ".Streams[", index, `], "`, sname, `", opts...)`)
g.P("if err != nil { return nil, err }")
g.P("x := &", streamType, "{stream}")
if !method.Desc.IsStreamingClient() {
g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }")
g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
}
g.P("return x, nil")
g.P("}")
g.P()
genSend := method.Desc.IsStreamingClient()
genRecv := method.Desc.IsStreamingServer()
genCloseAndRecv := !method.Desc.IsStreamingServer()
// Stream auxiliary types and methods.
g.P("type ", service.GoName, "_", method.GoName, "Client interface {")
if genSend {
g.P("Send(*", method.Input.GoIdent, ") error")
}
if genRecv {
g.P("Recv() (*", method.Output.GoIdent, ", error)")
}
if genCloseAndRecv {
g.P("CloseAndRecv() (*", method.Output.GoIdent, ", error)")
}
g.P(grpcPackage.Ident("ClientStream"))
g.P("}")
g.P()
g.P("type ", streamType, " struct {")
g.P(grpcPackage.Ident("ClientStream"))
g.P("}")
g.P()
if genSend {
g.P("func (x *", streamType, ") Send(m *", method.Input.GoIdent, ") error {")
g.P("return x.ClientStream.SendMsg(m)")
g.P("}")
g.P()
}
if genRecv {
g.P("func (x *", streamType, ") Recv() (*", method.Output.GoIdent, ", error) {")
g.P("m := new(", method.Output.GoIdent, ")")
g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
g.P("return m, nil")
g.P("}")
g.P()
}
if genCloseAndRecv {
g.P("func (x *", streamType, ") CloseAndRecv() (*", method.Output.GoIdent, ", error) {")
g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
g.P("m := new(", method.Output.GoIdent, ")")
g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
g.P("return m, nil")
g.P("}")
g.P()
}
}
func serverSignature(g *protogen.GeneratedFile, method *protogen.Method) string {
var reqArgs []string
ret := "error"
if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() {
reqArgs = append(reqArgs, g.QualifiedGoIdent(contextPackage.Ident("Context")))
ret = "(*" + g.QualifiedGoIdent(method.Output.GoIdent) + ", error)"
}
if !method.Desc.IsStreamingClient() {
reqArgs = append(reqArgs, "*"+g.QualifiedGoIdent(method.Input.GoIdent))
}
if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() {
reqArgs = append(reqArgs, method.Parent.GoName+"_"+method.GoName+"Server")
}
return method.GoName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
}
func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method) string {
service := method.Parent
hname := fmt.Sprintf("_%s_%s_Handler", service.GoName, method.GoName)
if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() {
g.P("func ", hname, "(srv interface{}, ctx ", contextPackage.Ident("Context"), ", dec func(interface{}) error, interceptor ", grpcPackage.Ident("UnaryServerInterceptor"), ") (interface{}, error) {")
g.P("in := new(", method.Input.GoIdent, ")")
g.P("if err := dec(in); err != nil { return nil, err }")
g.P("if interceptor == nil { return srv.(", service.GoName, "Server).", method.GoName, "(ctx, in) }")
g.P("info := &", grpcPackage.Ident("UnaryServerInfo"), "{")
g.P("Server: srv,")
g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.GoName)), ",")
g.P("}")
g.P("handler := func(ctx ", contextPackage.Ident("Context"), ", req interface{}) (interface{}, error) {")
g.P("return srv.(", service.GoName, "Server).", method.GoName, "(ctx, req.(*", method.Input.GoIdent, "))")
g.P("}")
g.P("return interceptor(ctx, in, info, handler)")
g.P("}")
g.P()
return hname
}
streamType := unexport(service.GoName) + method.GoName + "Server"
g.P("func ", hname, "(srv interface{}, stream ", grpcPackage.Ident("ServerStream"), ") error {")
if !method.Desc.IsStreamingClient() {
g.P("m := new(", method.Input.GoIdent, ")")
g.P("if err := stream.RecvMsg(m); err != nil { return err }")
g.P("return srv.(", service.GoName, "Server).", method.GoName, "(m, &", streamType, "{stream})")
} else {
g.P("return srv.(", service.GoName, "Server).", method.GoName, "(&", streamType, "{stream})")
}
g.P("}")
g.P()
genSend := method.Desc.IsStreamingServer()
genSendAndClose := !method.Desc.IsStreamingServer()
genRecv := method.Desc.IsStreamingClient()
// Stream auxiliary types and methods.
g.P("type ", service.GoName, "_", method.GoName, "Server interface {")
if genSend {
g.P("Send(*", method.Output.GoIdent, ") error")
}
if genSendAndClose {
g.P("SendAndClose(*", method.Output.GoIdent, ") error")
}
if genRecv {
g.P("Recv() (*", method.Input.GoIdent, ", error)")
}
g.P(grpcPackage.Ident("ServerStream"))
g.P("}")
g.P()
g.P("type ", streamType, " struct {")
g.P(grpcPackage.Ident("ServerStream"))
g.P("}")
g.P()
if genSend {
g.P("func (x *", streamType, ") Send(m *", method.Output.GoIdent, ") error {")
g.P("return x.ServerStream.SendMsg(m)")
g.P("}")
g.P()
}
if genSendAndClose {
g.P("func (x *", streamType, ") SendAndClose(m *", method.Output.GoIdent, ") error {")
g.P("return x.ServerStream.SendMsg(m)")
g.P("}")
g.P()
}
if genRecv {
g.P("func (x *", streamType, ") Recv() (*", method.Input.GoIdent, ", error) {")
g.P("m := new(", method.Input.GoIdent, ")")
g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }")
g.P("return m, nil")
g.P("}")
g.P()
}
return hname
}
const deprecationComment = "// Deprecated: Do not use."
func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] }

View file

@ -1,74 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate
// Go code. Install it by building this program and making it accessible within
// your PATH with the name:
// protoc-gen-go
//
// The 'go' suffix becomes part of the argument for the protocol compiler,
// such that it can be invoked as:
// protoc --go_out=paths=source_relative:. path/to/file.proto
//
// This generates Go bindings for the protocol buffer defined by file.proto.
// With that input, the output will be written to:
// path/to/file.pb.go
//
// See the README and documentation for protocol buffers to learn more:
// https://developers.google.com/protocol-buffers/
package main
import (
"flag"
"fmt"
"strings"
"github.com/golang/protobuf/internal/gengogrpc"
gengo "google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo"
"google.golang.org/protobuf/compiler/protogen"
)
func main() {
var (
flags flag.FlagSet
plugins = flags.String("plugins", "", "list of plugins to enable (supported values: grpc)")
importPrefix = flags.String("import_prefix", "", "prefix to prepend to import paths")
)
importRewriteFunc := func(importPath protogen.GoImportPath) protogen.GoImportPath {
switch importPath {
case "context", "fmt", "math":
return importPath
}
if *importPrefix != "" {
return protogen.GoImportPath(*importPrefix) + importPath
}
return importPath
}
protogen.Options{
ParamFunc: flags.Set,
ImportRewriteFunc: importRewriteFunc,
}.Run(func(gen *protogen.Plugin) error {
grpc := false
for _, plugin := range strings.Split(*plugins, ",") {
switch plugin {
case "grpc":
grpc = true
case "":
default:
return fmt.Errorf("protoc-gen-go: unknown plugin %q", plugin)
}
}
for _, f := range gen.Files {
if !f.Generate {
continue
}
g := gengo.GenerateFile(gen, f)
if grpc {
gengogrpc.GenerateFileContent(gen, f, g)
}
}
gen.SupportedFeatures = gengo.SupportedFeatures
return nil
})
}

View file

@ -1 +0,0 @@
go-junit-report

View file

@ -1,16 +0,0 @@
language: go
go:
- tip
- "1.13.x"
- "1.12.x"
- "1.11.x"
- "1.10.x"
- "1.9.x"
- "1.8.x"
- "1.7.x"
- "1.6.x"
- "1.5.x"
- "1.4.x"
- "1.3.x"
- "1.2.x"

View file

@ -1,20 +0,0 @@
Copyright (c) 2012 Joel Stemmer
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,49 +0,0 @@
# go-junit-report
Converts `go test` output to an xml report, suitable for applications that
expect junit xml reports (e.g. [Jenkins](http://jenkins-ci.org)).
[![Build Status][travis-badge]][travis-link]
[![Report Card][report-badge]][report-link]
## Installation
Go version 1.2 or higher is required. Install or update using the `go get`
command:
```bash
go get -u github.com/jstemmer/go-junit-report
```
## Usage
go-junit-report reads the `go test` verbose output from standard in and writes
junit compatible XML to standard out.
```bash
go test -v 2>&1 | go-junit-report > report.xml
```
Note that it also can parse benchmark output with `-bench` flag:
```bash
go test -v -bench . -count 5 2>&1 | go-junit-report > report.xml
```
## Contribution
Create an Issue and discuss the fix or feature, then fork the package.
Clone to github.com/jstemmer/go-junit-report. This is necessary because go import uses this path.
Fix or implement feature. Test and then commit change.
Specify #Issue and describe change in the commit message.
Create Pull Request. It can be merged by owner or administrator then.
### Run Tests
```bash
go test
```
[travis-badge]: https://travis-ci.org/jstemmer/go-junit-report.svg
[travis-link]: https://travis-ci.org/jstemmer/go-junit-report
[report-badge]: https://goreportcard.com/badge/github.com/jstemmer/go-junit-report
[report-link]: https://goreportcard.com/report/github.com/jstemmer/go-junit-report

View file

@ -1,182 +0,0 @@
package formatter
import (
"bufio"
"encoding/xml"
"fmt"
"io"
"runtime"
"strings"
"time"
"github.com/jstemmer/go-junit-report/parser"
)
// JUnitTestSuites is a collection of JUnit test suites.
type JUnitTestSuites struct {
XMLName xml.Name `xml:"testsuites"`
Suites []JUnitTestSuite `xml:"testsuite"`
}
// JUnitTestSuite is a single JUnit test suite which may contain many
// testcases.
type JUnitTestSuite struct {
XMLName xml.Name `xml:"testsuite"`
Tests int `xml:"tests,attr"`
Failures int `xml:"failures,attr"`
Time string `xml:"time,attr"`
Name string `xml:"name,attr"`
Properties []JUnitProperty `xml:"properties>property,omitempty"`
TestCases []JUnitTestCase `xml:"testcase"`
}
// JUnitTestCase is a single test case with its result.
type JUnitTestCase struct {
XMLName xml.Name `xml:"testcase"`
Classname string `xml:"classname,attr"`
Name string `xml:"name,attr"`
Time string `xml:"time,attr"`
SkipMessage *JUnitSkipMessage `xml:"skipped,omitempty"`
Failure *JUnitFailure `xml:"failure,omitempty"`
}
// JUnitSkipMessage contains the reason why a testcase was skipped.
type JUnitSkipMessage struct {
Message string `xml:"message,attr"`
}
// JUnitProperty represents a key/value pair used to define properties.
type JUnitProperty struct {
Name string `xml:"name,attr"`
Value string `xml:"value,attr"`
}
// JUnitFailure contains data related to a failed test.
type JUnitFailure struct {
Message string `xml:"message,attr"`
Type string `xml:"type,attr"`
Contents string `xml:",chardata"`
}
// JUnitReportXML writes a JUnit xml representation of the given report to w
// in the format described at http://windyroad.org/dl/Open%20Source/JUnit.xsd
func JUnitReportXML(report *parser.Report, noXMLHeader bool, goVersion string, w io.Writer) error {
suites := JUnitTestSuites{}
// convert Report to JUnit test suites
for _, pkg := range report.Packages {
pkg.Benchmarks = mergeBenchmarks(pkg.Benchmarks)
ts := JUnitTestSuite{
Tests: len(pkg.Tests) + len(pkg.Benchmarks),
Failures: 0,
Time: formatTime(pkg.Duration),
Name: pkg.Name,
Properties: []JUnitProperty{},
TestCases: []JUnitTestCase{},
}
classname := pkg.Name
if idx := strings.LastIndex(classname, "/"); idx > -1 && idx < len(pkg.Name) {
classname = pkg.Name[idx+1:]
}
// properties
if goVersion == "" {
// if goVersion was not specified as a flag, fall back to version reported by runtime
goVersion = runtime.Version()
}
ts.Properties = append(ts.Properties, JUnitProperty{"go.version", goVersion})
if pkg.CoveragePct != "" {
ts.Properties = append(ts.Properties, JUnitProperty{"coverage.statements.pct", pkg.CoveragePct})
}
// individual test cases
for _, test := range pkg.Tests {
testCase := JUnitTestCase{
Classname: classname,
Name: test.Name,
Time: formatTime(test.Duration),
Failure: nil,
}
if test.Result == parser.FAIL {
ts.Failures++
testCase.Failure = &JUnitFailure{
Message: "Failed",
Type: "",
Contents: strings.Join(test.Output, "\n"),
}
}
if test.Result == parser.SKIP {
testCase.SkipMessage = &JUnitSkipMessage{strings.Join(test.Output, "\n")}
}
ts.TestCases = append(ts.TestCases, testCase)
}
// individual benchmarks
for _, benchmark := range pkg.Benchmarks {
benchmarkCase := JUnitTestCase{
Classname: classname,
Name: benchmark.Name,
Time: formatBenchmarkTime(benchmark.Duration),
}
ts.TestCases = append(ts.TestCases, benchmarkCase)
}
suites.Suites = append(suites.Suites, ts)
}
// to xml
bytes, err := xml.MarshalIndent(suites, "", "\t")
if err != nil {
return err
}
writer := bufio.NewWriter(w)
if !noXMLHeader {
writer.WriteString(xml.Header)
}
writer.Write(bytes)
writer.WriteByte('\n')
writer.Flush()
return nil
}
func mergeBenchmarks(benchmarks []*parser.Benchmark) []*parser.Benchmark {
var merged []*parser.Benchmark
benchmap := make(map[string][]*parser.Benchmark)
for _, bm := range benchmarks {
if _, ok := benchmap[bm.Name]; !ok {
merged = append(merged, &parser.Benchmark{Name: bm.Name})
}
benchmap[bm.Name] = append(benchmap[bm.Name], bm)
}
for _, bm := range merged {
for _, b := range benchmap[bm.Name] {
bm.Allocs += b.Allocs
bm.Bytes += b.Bytes
bm.Duration += b.Duration
}
n := len(benchmap[bm.Name])
bm.Allocs /= n
bm.Bytes /= n
bm.Duration /= time.Duration(n)
}
return merged
}
func formatTime(d time.Duration) string {
return fmt.Sprintf("%.3f", d.Seconds())
}
func formatBenchmarkTime(d time.Duration) string {
return fmt.Sprintf("%.9f", d.Seconds())
}

View file

@ -1,45 +0,0 @@
package main
import (
"flag"
"fmt"
"os"
"github.com/jstemmer/go-junit-report/formatter"
"github.com/jstemmer/go-junit-report/parser"
)
var (
noXMLHeader = flag.Bool("no-xml-header", false, "do not print xml header")
packageName = flag.String("package-name", "", "specify a package name (compiled test have no package name in output)")
goVersionFlag = flag.String("go-version", "", "specify the value to use for the go.version property in the generated XML")
setExitCode = flag.Bool("set-exit-code", false, "set exit code to 1 if tests failed")
)
func main() {
flag.Parse()
if flag.NArg() != 0 {
fmt.Fprintf(os.Stderr, "%s does not accept positional arguments\n", os.Args[0])
flag.Usage()
os.Exit(1)
}
// Read input
report, err := parser.Parse(os.Stdin, *packageName)
if err != nil {
fmt.Printf("Error reading input: %s\n", err)
os.Exit(1)
}
// Write xml
err = formatter.JUnitReportXML(report, *noXMLHeader, *goVersionFlag, os.Stdout)
if err != nil {
fmt.Printf("Error writing XML: %s\n", err)
os.Exit(1)
}
if *setExitCode && report.Failures() > 0 {
os.Exit(1)
}
}

View file

@ -1,319 +0,0 @@
package parser
import (
"bufio"
"io"
"regexp"
"strconv"
"strings"
"time"
)
// Result represents a test result.
type Result int
// Test result constants
const (
PASS Result = iota
FAIL
SKIP
)
// Report is a collection of package tests.
type Report struct {
Packages []Package
}
// Package contains the test results of a single package.
type Package struct {
Name string
Duration time.Duration
Tests []*Test
Benchmarks []*Benchmark
CoveragePct string
// Time is deprecated, use Duration instead.
Time int // in milliseconds
}
// Test contains the results of a single test.
type Test struct {
Name string
Duration time.Duration
Result Result
Output []string
SubtestIndent string
// Time is deprecated, use Duration instead.
Time int // in milliseconds
}
// Benchmark contains the results of a single benchmark.
type Benchmark struct {
Name string
Duration time.Duration
// number of B/op
Bytes int
// number of allocs/op
Allocs int
}
var (
regexStatus = regexp.MustCompile(`--- (PASS|FAIL|SKIP): (.+) \((\d+\.\d+)(?: seconds|s)\)`)
regexIndent = regexp.MustCompile(`^([ \t]+)---`)
regexCoverage = regexp.MustCompile(`^coverage:\s+(\d+\.\d+)%\s+of\s+statements(?:\sin\s.+)?$`)
regexResult = regexp.MustCompile(`^(ok|FAIL)\s+([^ ]+)\s+(?:(\d+\.\d+)s|\(cached\)|(\[\w+ failed]))(?:\s+coverage:\s+(\d+\.\d+)%\sof\sstatements(?:\sin\s.+)?)?$`)
// regexBenchmark captures 3-5 groups: benchmark name, number of times ran, ns/op (with or without decimal), B/op (optional), and allocs/op (optional).
regexBenchmark = regexp.MustCompile(`^(Benchmark[^ -]+)(?:-\d+\s+|\s+)(\d+)\s+(\d+|\d+\.\d+)\sns/op(?:\s+(\d+)\sB/op)?(?:\s+(\d+)\sallocs/op)?`)
regexOutput = regexp.MustCompile(`( )*\t(.*)`)
regexSummary = regexp.MustCompile(`^(PASS|FAIL|SKIP)$`)
regexPackageWithTest = regexp.MustCompile(`^# ([^\[\]]+) \[[^\]]+\]$`)
)
// Parse parses go test output from reader r and returns a report with the
// results. An optional pkgName can be given, which is used in case a package
// result line is missing.
func Parse(r io.Reader, pkgName string) (*Report, error) {
reader := bufio.NewReader(r)
report := &Report{make([]Package, 0)}
// keep track of tests we find
var tests []*Test
// keep track of benchmarks we find
var benchmarks []*Benchmark
// sum of tests' time, use this if current test has no result line (when it is compiled test)
var testsTime time.Duration
// current test
var cur string
// coverage percentage report for current package
var coveragePct string
// stores mapping between package name and output of build failures
var packageCaptures = map[string][]string{}
// the name of the package which it's build failure output is being captured
var capturedPackage string
// capture any non-test output
var buffers = map[string][]string{}
// parse lines
for {
l, _, err := reader.ReadLine()
if err != nil && err == io.EOF {
break
} else if err != nil {
return nil, err
}
line := string(l)
if strings.HasPrefix(line, "=== RUN ") {
// new test
cur = strings.TrimSpace(line[8:])
tests = append(tests, &Test{
Name: cur,
Result: FAIL,
Output: make([]string, 0),
})
// clear the current build package, so output lines won't be added to that build
capturedPackage = ""
} else if matches := regexBenchmark.FindStringSubmatch(line); len(matches) == 6 {
bytes, _ := strconv.Atoi(matches[4])
allocs, _ := strconv.Atoi(matches[5])
benchmarks = append(benchmarks, &Benchmark{
Name: matches[1],
Duration: parseNanoseconds(matches[3]),
Bytes: bytes,
Allocs: allocs,
})
} else if strings.HasPrefix(line, "=== PAUSE ") {
continue
} else if strings.HasPrefix(line, "=== CONT ") {
cur = strings.TrimSpace(line[8:])
continue
} else if matches := regexResult.FindStringSubmatch(line); len(matches) == 6 {
if matches[5] != "" {
coveragePct = matches[5]
}
if strings.HasSuffix(matches[4], "failed]") {
// the build of the package failed, inject a dummy test into the package
// which indicate about the failure and contain the failure description.
tests = append(tests, &Test{
Name: matches[4],
Result: FAIL,
Output: packageCaptures[matches[2]],
})
} else if matches[1] == "FAIL" && !containsFailures(tests) && len(buffers[cur]) > 0 {
// This package didn't have any failing tests, but still it
// failed with some output. Create a dummy test with the
// output.
tests = append(tests, &Test{
Name: "Failure",
Result: FAIL,
Output: buffers[cur],
})
buffers[cur] = buffers[cur][0:0]
}
// all tests in this package are finished
report.Packages = append(report.Packages, Package{
Name: matches[2],
Duration: parseSeconds(matches[3]),
Tests: tests,
Benchmarks: benchmarks,
CoveragePct: coveragePct,
Time: int(parseSeconds(matches[3]) / time.Millisecond), // deprecated
})
buffers[cur] = buffers[cur][0:0]
tests = make([]*Test, 0)
benchmarks = make([]*Benchmark, 0)
coveragePct = ""
cur = ""
testsTime = 0
} else if matches := regexStatus.FindStringSubmatch(line); len(matches) == 4 {
cur = matches[2]
test := findTest(tests, cur)
if test == nil {
continue
}
// test status
if matches[1] == "PASS" {
test.Result = PASS
} else if matches[1] == "SKIP" {
test.Result = SKIP
} else {
test.Result = FAIL
}
if matches := regexIndent.FindStringSubmatch(line); len(matches) == 2 {
test.SubtestIndent = matches[1]
}
test.Output = buffers[cur]
test.Name = matches[2]
test.Duration = parseSeconds(matches[3])
testsTime += test.Duration
test.Time = int(test.Duration / time.Millisecond) // deprecated
} else if matches := regexCoverage.FindStringSubmatch(line); len(matches) == 2 {
coveragePct = matches[1]
} else if matches := regexOutput.FindStringSubmatch(line); capturedPackage == "" && len(matches) == 3 {
// Sub-tests start with one or more series of 4-space indents, followed by a hard tab,
// followed by the test output
// Top-level tests start with a hard tab.
test := findTest(tests, cur)
if test == nil {
continue
}
test.Output = append(test.Output, matches[2])
} else if strings.HasPrefix(line, "# ") {
// indicates a capture of build output of a package. set the current build package.
packageWithTestBinary := regexPackageWithTest.FindStringSubmatch(line)
if packageWithTestBinary != nil {
// Sometimes, the text after "# " shows the name of the test binary
// ("<package>.test") in addition to the package
// e.g.: "# package/name [package/name.test]"
capturedPackage = packageWithTestBinary[1]
} else {
capturedPackage = line[2:]
}
} else if capturedPackage != "" {
// current line is build failure capture for the current built package
packageCaptures[capturedPackage] = append(packageCaptures[capturedPackage], line)
} else if regexSummary.MatchString(line) {
// unset current test name so any additional output after the
// summary is captured separately.
cur = ""
} else {
// buffer anything else that we didn't recognize
buffers[cur] = append(buffers[cur], line)
// if we have a current test, also append to its output
test := findTest(tests, cur)
if test != nil {
if strings.HasPrefix(line, test.SubtestIndent+" ") {
test.Output = append(test.Output, strings.TrimPrefix(line, test.SubtestIndent+" "))
}
}
}
}
if len(tests) > 0 {
// no result line found
report.Packages = append(report.Packages, Package{
Name: pkgName,
Duration: testsTime,
Time: int(testsTime / time.Millisecond),
Tests: tests,
Benchmarks: benchmarks,
CoveragePct: coveragePct,
})
}
return report, nil
}
func parseSeconds(t string) time.Duration {
if t == "" {
return time.Duration(0)
}
// ignore error
d, _ := time.ParseDuration(t + "s")
return d
}
func parseNanoseconds(t string) time.Duration {
// note: if input < 1 ns precision, result will be 0s.
if t == "" {
return time.Duration(0)
}
// ignore error
d, _ := time.ParseDuration(t + "ns")
return d
}
func findTest(tests []*Test, name string) *Test {
for i := len(tests) - 1; i >= 0; i-- {
if tests[i].Name == name {
return tests[i]
}
}
return nil
}
func containsFailures(tests []*Test) bool {
for _, test := range tests {
if test.Result == FAIL {
return true
}
}
return false
}
// Failures counts the number of failed tests in this report
func (r *Report) Failures() int {
count := 0
for _, p := range r.Packages {
for _, t := range p.Tests {
if t.Result == FAIL {
count++
}
}
}
return count
}

19
vendor/golang.org/x/lint/.travis.yml generated vendored
View file

@ -1,19 +0,0 @@
sudo: false
language: go
go:
- 1.10.x
- 1.11.x
- master
go_import_path: golang.org/x/lint
install:
- go get -t -v ./...
script:
- go test -v -race ./...
matrix:
allow_failures:
- go: master
fast_finish: true

View file

@ -1,15 +0,0 @@
# Contributing to Golint
## Before filing an issue:
### Are you having trouble building golint?
Check you have the latest version of its dependencies. Run
```
go get -u golang.org/x/lint/golint
```
If you still have problems, consider searching for existing issues before filing a new issue.
## Before sending a pull request:
Have you understood the purpose of golint? Make sure to carefully read `README`.

27
vendor/golang.org/x/lint/LICENSE generated vendored
View file

@ -1,27 +0,0 @@
Copyright (c) 2013 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

93
vendor/golang.org/x/lint/README.md generated vendored
View file

@ -1,93 +0,0 @@
**NOTE:** Golint is [deprecated and frozen](https://github.com/golang/go/issues/38968).
There's no drop-in replacement for it, but tools such as [Staticcheck](https://staticcheck.io/)
and `go vet` should be used instead.
Golint is a linter for Go source code.
[![Go Reference](https://pkg.go.dev/badge/golang.org/x/lint.svg)](https://pkg.go.dev/golang.org/x/lint)
[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint)
## Installation
Golint requires a
[supported release of Go](https://golang.org/doc/devel/release.html#policy).
go get -u golang.org/x/lint/golint
To find out where `golint` was installed you can run `go list -f {{.Target}} golang.org/x/lint/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting.
## Usage
Invoke `golint` with one or more filenames, directories, or packages named
by its import path. Golint uses the same
[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as
the `go` command and therefore
also supports relative import paths like `./...`. Additionally the `...`
wildcard can be used as suffix on relative and absolute file paths to recurse
into them.
The output of this tool is a list of suggestions in Vim quickfix format,
which is accepted by lots of different editors.
## Purpose
Golint differs from gofmt. Gofmt reformats Go source code, whereas
golint prints out style mistakes.
Golint differs from govet. Govet is concerned with correctness, whereas
golint is concerned with coding style. Golint is in use at Google, and it
seeks to match the accepted style of the open source Go project.
The suggestions made by golint are exactly that: suggestions.
Golint is not perfect, and has both false positives and false negatives.
Do not treat its output as a gold standard. We will not be adding pragmas
or other knobs to suppress specific warnings, so do not expect or require
code to be completely "lint-free".
In short, this tool is not, and will never be, trustworthy enough for its
suggestions to be enforced automatically, for example as part of a build process.
Golint makes suggestions for many of the mechanically checkable items listed in
[Effective Go](https://golang.org/doc/effective_go.html) and the
[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments).
## Scope
Golint is meant to carry out the stylistic conventions put forth in
[Effective Go](https://golang.org/doc/effective_go.html) and
[CodeReviewComments](https://golang.org/wiki/CodeReviewComments).
Changes that are not aligned with those documents will not be considered.
## Contributions
Contributions to this project are welcome provided they are [in scope](#scope),
though please send mail before starting work on anything major.
Contributors retain their copyright, so we need you to fill out
[a short form](https://developers.google.com/open-source/cla/individual)
before we can accept your contribution.
## Vim
Add this to your ~/.vimrc:
set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim
If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
Running `:Lint` will run golint on the current file and populate the quickfix list.
Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w`
autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow
## Emacs
Add this to your `.emacs` file:
(add-to-list 'load-path (concat (getenv "GOPATH") "/src/golang.org/x/lint/misc/emacs/"))
(require 'golint)
If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
Running M-x golint will run golint on the current file.
For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html).

View file

@ -1,159 +0,0 @@
// Copyright (c) 2013 The Go Authors. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd.
// golint lints the Go source files named on its command line.
package main
import (
"flag"
"fmt"
"go/build"
"io/ioutil"
"os"
"path/filepath"
"strings"
"golang.org/x/lint"
)
var (
minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it")
setExitStatus = flag.Bool("set_exit_status", false, "set exit status to 1 if any issues are found")
suggestions int
)
func usage() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fmt.Fprintf(os.Stderr, "\tgolint [flags] # runs on package in current directory\n")
fmt.Fprintf(os.Stderr, "\tgolint [flags] [packages]\n")
fmt.Fprintf(os.Stderr, "\tgolint [flags] [directories] # where a '/...' suffix includes all sub-directories\n")
fmt.Fprintf(os.Stderr, "\tgolint [flags] [files] # all must belong to a single package\n")
fmt.Fprintf(os.Stderr, "Flags:\n")
flag.PrintDefaults()
}
func main() {
flag.Usage = usage
flag.Parse()
if flag.NArg() == 0 {
lintDir(".")
} else {
// dirsRun, filesRun, and pkgsRun indicate whether golint is applied to
// directory, file or package targets. The distinction affects which
// checks are run. It is no valid to mix target types.
var dirsRun, filesRun, pkgsRun int
var args []string
for _, arg := range flag.Args() {
if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) {
dirsRun = 1
for _, dirname := range allPackagesInFS(arg) {
args = append(args, dirname)
}
} else if isDir(arg) {
dirsRun = 1
args = append(args, arg)
} else if exists(arg) {
filesRun = 1
args = append(args, arg)
} else {
pkgsRun = 1
args = append(args, arg)
}
}
if dirsRun+filesRun+pkgsRun != 1 {
usage()
os.Exit(2)
}
switch {
case dirsRun == 1:
for _, dir := range args {
lintDir(dir)
}
case filesRun == 1:
lintFiles(args...)
case pkgsRun == 1:
for _, pkg := range importPaths(args) {
lintPackage(pkg)
}
}
}
if *setExitStatus && suggestions > 0 {
fmt.Fprintf(os.Stderr, "Found %d lint suggestions; failing.\n", suggestions)
os.Exit(1)
}
}
func isDir(filename string) bool {
fi, err := os.Stat(filename)
return err == nil && fi.IsDir()
}
func exists(filename string) bool {
_, err := os.Stat(filename)
return err == nil
}
func lintFiles(filenames ...string) {
files := make(map[string][]byte)
for _, filename := range filenames {
src, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Fprintln(os.Stderr, err)
continue
}
files[filename] = src
}
l := new(lint.Linter)
ps, err := l.LintFiles(files)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
for _, p := range ps {
if p.Confidence >= *minConfidence {
fmt.Printf("%v: %s\n", p.Position, p.Text)
suggestions++
}
}
}
func lintDir(dirname string) {
pkg, err := build.ImportDir(dirname, 0)
lintImportedPackage(pkg, err)
}
func lintPackage(pkgname string) {
pkg, err := build.Import(pkgname, ".", 0)
lintImportedPackage(pkg, err)
}
func lintImportedPackage(pkg *build.Package, err error) {
if err != nil {
if _, nogo := err.(*build.NoGoError); nogo {
// Don't complain if the failure is due to no Go source files.
return
}
fmt.Fprintln(os.Stderr, err)
return
}
var files []string
files = append(files, pkg.GoFiles...)
files = append(files, pkg.CgoFiles...)
files = append(files, pkg.TestGoFiles...)
if pkg.Dir != "." {
for i, f := range files {
files[i] = filepath.Join(pkg.Dir, f)
}
}
// TODO(dsymonds): Do foo_test too (pkg.XTestGoFiles)
lintFiles(files...)
}

View file

@ -1,309 +0,0 @@
package main
/*
This file holds a direct copy of the import path matching code of
https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be
replaced when https://golang.org/issue/8768 is resolved.
It has been updated to follow upstream changes in a few ways.
*/
import (
"fmt"
"go/build"
"log"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
)
var (
buildContext = build.Default
goroot = filepath.Clean(runtime.GOROOT())
gorootSrc = filepath.Join(goroot, "src")
)
// importPathsNoDotExpansion returns the import paths to use for the given
// command line, but it does no ... expansion.
func importPathsNoDotExpansion(args []string) []string {
if len(args) == 0 {
return []string{"."}
}
var out []string
for _, a := range args {
// Arguments are supposed to be import paths, but
// as a courtesy to Windows developers, rewrite \ to /
// in command-line arguments. Handles .\... and so on.
if filepath.Separator == '\\' {
a = strings.Replace(a, `\`, `/`, -1)
}
// Put argument in canonical form, but preserve leading ./.
if strings.HasPrefix(a, "./") {
a = "./" + path.Clean(a)
if a == "./." {
a = "."
}
} else {
a = path.Clean(a)
}
if a == "all" || a == "std" {
out = append(out, allPackages(a)...)
continue
}
out = append(out, a)
}
return out
}
// importPaths returns the import paths to use for the given command line.
func importPaths(args []string) []string {
args = importPathsNoDotExpansion(args)
var out []string
for _, a := range args {
if strings.Contains(a, "...") {
if build.IsLocalImport(a) {
out = append(out, allPackagesInFS(a)...)
} else {
out = append(out, allPackages(a)...)
}
continue
}
out = append(out, a)
}
return out
}
// matchPattern(pattern)(name) reports whether
// name matches pattern. Pattern is a limited glob
// pattern in which '...' means 'any string' and there
// is no other special syntax.
func matchPattern(pattern string) func(name string) bool {
re := regexp.QuoteMeta(pattern)
re = strings.Replace(re, `\.\.\.`, `.*`, -1)
// Special case: foo/... matches foo too.
if strings.HasSuffix(re, `/.*`) {
re = re[:len(re)-len(`/.*`)] + `(/.*)?`
}
reg := regexp.MustCompile(`^` + re + `$`)
return func(name string) bool {
return reg.MatchString(name)
}
}
// hasPathPrefix reports whether the path s begins with the
// elements in prefix.
func hasPathPrefix(s, prefix string) bool {
switch {
default:
return false
case len(s) == len(prefix):
return s == prefix
case len(s) > len(prefix):
if prefix != "" && prefix[len(prefix)-1] == '/' {
return strings.HasPrefix(s, prefix)
}
return s[len(prefix)] == '/' && s[:len(prefix)] == prefix
}
}
// treeCanMatchPattern(pattern)(name) reports whether
// name or children of name can possibly match pattern.
// Pattern is the same limited glob accepted by matchPattern.
func treeCanMatchPattern(pattern string) func(name string) bool {
wildCard := false
if i := strings.Index(pattern, "..."); i >= 0 {
wildCard = true
pattern = pattern[:i]
}
return func(name string) bool {
return len(name) <= len(pattern) && hasPathPrefix(pattern, name) ||
wildCard && strings.HasPrefix(name, pattern)
}
}
// allPackages returns all the packages that can be found
// under the $GOPATH directories and $GOROOT matching pattern.
// The pattern is either "all" (all packages), "std" (standard packages)
// or a path including "...".
func allPackages(pattern string) []string {
pkgs := matchPackages(pattern)
if len(pkgs) == 0 {
fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
}
return pkgs
}
func matchPackages(pattern string) []string {
match := func(string) bool { return true }
treeCanMatch := func(string) bool { return true }
if pattern != "all" && pattern != "std" {
match = matchPattern(pattern)
treeCanMatch = treeCanMatchPattern(pattern)
}
have := map[string]bool{
"builtin": true, // ignore pseudo-package that exists only for documentation
}
if !buildContext.CgoEnabled {
have["runtime/cgo"] = true // ignore during walk
}
var pkgs []string
// Commands
cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator)
filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error {
if err != nil || !fi.IsDir() || path == cmd {
return nil
}
name := path[len(cmd):]
if !treeCanMatch(name) {
return filepath.SkipDir
}
// Commands are all in cmd/, not in subdirectories.
if strings.Contains(name, string(filepath.Separator)) {
return filepath.SkipDir
}
// We use, e.g., cmd/gofmt as the pseudo import path for gofmt.
name = "cmd/" + name
if have[name] {
return nil
}
have[name] = true
if !match(name) {
return nil
}
_, err = buildContext.ImportDir(path, 0)
if err != nil {
if _, noGo := err.(*build.NoGoError); !noGo {
log.Print(err)
}
return nil
}
pkgs = append(pkgs, name)
return nil
})
for _, src := range buildContext.SrcDirs() {
if (pattern == "std" || pattern == "cmd") && src != gorootSrc {
continue
}
src = filepath.Clean(src) + string(filepath.Separator)
root := src
if pattern == "cmd" {
root += "cmd" + string(filepath.Separator)
}
filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
if err != nil || !fi.IsDir() || path == src {
return nil
}
// Avoid .foo, _foo, and testdata directory trees.
_, elem := filepath.Split(path)
if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
return filepath.SkipDir
}
name := filepath.ToSlash(path[len(src):])
if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") {
// The name "std" is only the standard library.
// If the name is cmd, it's the root of the command tree.
return filepath.SkipDir
}
if !treeCanMatch(name) {
return filepath.SkipDir
}
if have[name] {
return nil
}
have[name] = true
if !match(name) {
return nil
}
_, err = buildContext.ImportDir(path, 0)
if err != nil {
if _, noGo := err.(*build.NoGoError); noGo {
return nil
}
}
pkgs = append(pkgs, name)
return nil
})
}
return pkgs
}
// allPackagesInFS is like allPackages but is passed a pattern
// beginning ./ or ../, meaning it should scan the tree rooted
// at the given directory. There are ... in the pattern too.
func allPackagesInFS(pattern string) []string {
pkgs := matchPackagesInFS(pattern)
if len(pkgs) == 0 {
fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
}
return pkgs
}
func matchPackagesInFS(pattern string) []string {
// Find directory to begin the scan.
// Could be smarter but this one optimization
// is enough for now, since ... is usually at the
// end of a path.
i := strings.Index(pattern, "...")
dir, _ := path.Split(pattern[:i])
// pattern begins with ./ or ../.
// path.Clean will discard the ./ but not the ../.
// We need to preserve the ./ for pattern matching
// and in the returned import paths.
prefix := ""
if strings.HasPrefix(pattern, "./") {
prefix = "./"
}
match := matchPattern(pattern)
var pkgs []string
filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
if err != nil || !fi.IsDir() {
return nil
}
if path == dir {
// filepath.Walk starts at dir and recurses. For the recursive case,
// the path is the result of filepath.Join, which calls filepath.Clean.
// The initial case is not Cleaned, though, so we do this explicitly.
//
// This converts a path like "./io/" to "io". Without this step, running
// "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io
// package, because prepending the prefix "./" to the unclean path would
// result in "././io", and match("././io") returns false.
path = filepath.Clean(path)
}
// Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..".
_, elem := filepath.Split(path)
dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".."
if dot || strings.HasPrefix(elem, "_") || elem == "testdata" {
return filepath.SkipDir
}
name := prefix + filepath.ToSlash(path)
if !match(name) {
return nil
}
if _, err = build.ImportDir(path, 0); err != nil {
if _, noGo := err.(*build.NoGoError); !noGo {
log.Print(err)
}
return nil
}
pkgs = append(pkgs, name)
return nil
})
return pkgs
}

View file

@ -1,13 +0,0 @@
// Copyright (c) 2018 The Go Authors. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd.
// +build go1.12
// Require use of the correct import path only for Go 1.12+ users, so
// any breakages coincide with people updating their CI configs or
// whatnot.
package main // import "golang.org/x/lint/golint"

1615
vendor/golang.org/x/lint/lint.go generated vendored

File diff suppressed because it is too large Load diff

27
vendor/golang.org/x/mod/LICENSE generated vendored
View file

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/mod/PATENTS generated vendored
View file

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View file

@ -1,822 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package module defines the module.Version type along with support code.
//
// The module.Version type is a simple Path, Version pair:
//
// type Version struct {
// Path string
// Version string
// }
//
// There are no restrictions imposed directly by use of this structure,
// but additional checking functions, most notably Check, verify that
// a particular path, version pair is valid.
//
// Escaped Paths
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the escaped form be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe escaped form that
// leaves most paths unaltered.
//
// The safe escaped form is to replace every uppercase letter
// with an exclamation mark followed by the letter's lowercase equivalent.
//
// For example,
//
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the escaped form is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to escape a literal !.
//
// Unicode Restrictions
//
// Today, paths are disallowed from using Unicode.
//
// Although paths are currently disallowed from using Unicode,
// we would like at some point to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention for escaping them in the file system.
// But there are at least two subtle considerations.
//
// First, note that not all case-fold equivalent distinct runes
// form an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('' for Kelvin)
// are three distinct runes that case-fold to each other.
// When we do add Unicode letters, we must not assume that upper/lower
// are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would escape as "!!k", or perhaps as "(212A)".
//
// Second, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
package module
// IMPORTANT NOTE
//
// This file essentially defines the set of valid import paths for the go command.
// There are many subtle considerations, including Unicode ambiguity,
// security, network, and file system representations.
//
// This file also defines the set of valid module path and version combinations,
// another topic with many subtle considerations.
//
// Changes to the semantics in this file require approval from rsc.
import (
"fmt"
"path"
"sort"
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/mod/semver"
errors "golang.org/x/xerrors"
)
// A Version (for clients, a module.Version) is defined by a module path and version pair.
// These are stored in their plain (unescaped) form.
type Version struct {
// Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2".
Path string
// Version is usually a semantic version in canonical form.
// There are three exceptions to this general rule.
// First, the top-level target of a build has no specific version
// and uses Version = "".
// Second, during MVS calculations the version "none" is used
// to represent the decision to take no version of a given module.
// Third, filesystem paths found in "replace" directives are
// represented by a path with an empty version.
Version string `json:",omitempty"`
}
// String returns a representation of the Version suitable for logging
// (Path@Version, or just Path if Version is empty).
func (m Version) String() string {
if m.Version == "" {
return m.Path
}
return m.Path + "@" + m.Version
}
// A ModuleError indicates an error specific to a module.
type ModuleError struct {
Path string
Version string
Err error
}
// VersionError returns a ModuleError derived from a Version and error,
// or err itself if it is already such an error.
func VersionError(v Version, err error) error {
var mErr *ModuleError
if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version {
return err
}
return &ModuleError{
Path: v.Path,
Version: v.Version,
Err: err,
}
}
func (e *ModuleError) Error() string {
if v, ok := e.Err.(*InvalidVersionError); ok {
return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err)
}
if e.Version != "" {
return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err)
}
return fmt.Sprintf("module %s: %v", e.Path, e.Err)
}
func (e *ModuleError) Unwrap() error { return e.Err }
// An InvalidVersionError indicates an error specific to a version, with the
// module path unknown or specified externally.
//
// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError
// must not wrap a ModuleError.
type InvalidVersionError struct {
Version string
Pseudo bool
Err error
}
// noun returns either "version" or "pseudo-version", depending on whether
// e.Version is a pseudo-version.
func (e *InvalidVersionError) noun() string {
if e.Pseudo {
return "pseudo-version"
}
return "version"
}
func (e *InvalidVersionError) Error() string {
return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err)
}
func (e *InvalidVersionError) Unwrap() error { return e.Err }
// Check checks that a given module path, version pair is valid.
// In addition to the path being a valid module path
// and the version being a valid semantic version,
// the two must correspond.
// For example, the path "yaml/v2" only corresponds to
// semantic versions beginning with "v2.".
func Check(path, version string) error {
if err := CheckPath(path); err != nil {
return err
}
if !semver.IsValid(version) {
return &ModuleError{
Path: path,
Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")},
}
}
_, pathMajor, _ := SplitPathVersion(path)
if err := CheckPathMajor(version, pathMajor); err != nil {
return &ModuleError{Path: path, Err: err}
}
return nil
}
// firstPathOK reports whether r can appear in the first element of a module path.
// The first element of the path must be an LDH domain name, at least for now.
// To avoid case ambiguity, the domain name must be entirely lower case.
func firstPathOK(r rune) bool {
return r == '-' || r == '.' ||
'0' <= r && r <= '9' ||
'a' <= r && r <= 'z'
}
// modPathOK reports whether r can appear in a module path element.
// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~.
//
// This matches what "go get" has historically recognized in import paths,
// and avoids confusing sequences like '%20' or '+' that would change meaning
// if used in a URL.
//
// TODO(rsc): We would like to allow Unicode letters, but that requires additional
// care in the safe encoding (see "escaped paths" above).
func modPathOK(r rune) bool {
if r < utf8.RuneSelf {
return r == '-' || r == '.' || r == '_' || r == '~' ||
'0' <= r && r <= '9' ||
'A' <= r && r <= 'Z' ||
'a' <= r && r <= 'z'
}
return false
}
// modPathOK reports whether r can appear in a package import path element.
//
// Import paths are intermediate between module paths and file paths: we allow
// disallow characters that would be confusing or ambiguous as arguments to
// 'go get' (such as '@' and ' ' ), but allow certain characters that are
// otherwise-unambiguous on the command line and historically used for some
// binary names (such as '++' as a suffix for compiler binaries and wrappers).
func importPathOK(r rune) bool {
return modPathOK(r) || r == '+'
}
// fileNameOK reports whether r can appear in a file name.
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
// If we expand the set of allowed characters here, we have to
// work harder at detecting potential case-folding and normalization collisions.
// See note about "escaped paths" above.
func fileNameOK(r rune) bool {
if r < utf8.RuneSelf {
// Entire set of ASCII punctuation, from which we remove characters:
// ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
// We disallow some shell special characters: " ' * < > ? ` |
// (Note that some of those are disallowed by the Windows file system as well.)
// We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
// We allow spaces (U+0020) in file names.
const allowed = "!#$%&()+,-.=@[]^_{}~ "
if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
return true
}
for i := 0; i < len(allowed); i++ {
if rune(allowed[i]) == r {
return true
}
}
return false
}
// It may be OK to add more ASCII punctuation here, but only carefully.
// For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
return unicode.IsLetter(r)
}
// CheckPath checks that a module path is valid.
// A valid module path is a valid import path, as checked by CheckImportPath,
// with three additional constraints.
// First, the leading path element (up to the first slash, if any),
// by convention a domain name, must contain only lower-case ASCII letters,
// ASCII digits, dots (U+002E), and dashes (U+002D);
// it must contain at least one dot and cannot start with a dash.
// Second, for a final path element of the form /vN, where N looks numeric
// (ASCII digits and dots) must not begin with a leading zero, must not be /v1,
// and must not contain any dots. For paths beginning with "gopkg.in/",
// this second requirement is replaced by a requirement that the path
// follow the gopkg.in server's conventions.
// Third, no path element may begin with a dot.
func CheckPath(path string) error {
if err := checkPath(path, modulePath); err != nil {
return fmt.Errorf("malformed module path %q: %v", path, err)
}
i := strings.Index(path, "/")
if i < 0 {
i = len(path)
}
if i == 0 {
return fmt.Errorf("malformed module path %q: leading slash", path)
}
if !strings.Contains(path[:i], ".") {
return fmt.Errorf("malformed module path %q: missing dot in first path element", path)
}
if path[0] == '-' {
return fmt.Errorf("malformed module path %q: leading dash in first path element", path)
}
for _, r := range path[:i] {
if !firstPathOK(r) {
return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r)
}
}
if _, _, ok := SplitPathVersion(path); !ok {
return fmt.Errorf("malformed module path %q: invalid version", path)
}
return nil
}
// CheckImportPath checks that an import path is valid.
//
// A valid import path consists of one or more valid path elements
// separated by slashes (U+002F). (It must not begin with nor end in a slash.)
//
// A valid path element is a non-empty string made up of
// ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~.
// It must not end with a dot (U+002E), nor contain two dots in a row.
//
// The element prefix up to the first dot must not be a reserved file name
// on Windows, regardless of case (CON, com1, NuL, and so on). The element
// must not have a suffix of a tilde followed by one or more ASCII digits
// (to exclude paths elements that look like Windows short-names).
//
// CheckImportPath may be less restrictive in the future, but see the
// top-level package documentation for additional information about
// subtleties of Unicode.
func CheckImportPath(path string) error {
if err := checkPath(path, importPath); err != nil {
return fmt.Errorf("malformed import path %q: %v", path, err)
}
return nil
}
// pathKind indicates what kind of path we're checking. Module paths,
// import paths, and file paths have different restrictions.
type pathKind int
const (
modulePath pathKind = iota
importPath
filePath
)
// checkPath checks that a general path is valid.
// It returns an error describing why but not mentioning path.
// Because these checks apply to both module paths and import paths,
// the caller is expected to add the "malformed ___ path %q: " prefix.
// fileName indicates whether the final element of the path is a file name
// (as opposed to a directory name).
func checkPath(path string, kind pathKind) error {
if !utf8.ValidString(path) {
return fmt.Errorf("invalid UTF-8")
}
if path == "" {
return fmt.Errorf("empty string")
}
if path[0] == '-' {
return fmt.Errorf("leading dash")
}
if strings.Contains(path, "//") {
return fmt.Errorf("double slash")
}
if path[len(path)-1] == '/' {
return fmt.Errorf("trailing slash")
}
elemStart := 0
for i, r := range path {
if r == '/' {
if err := checkElem(path[elemStart:i], kind); err != nil {
return err
}
elemStart = i + 1
}
}
if err := checkElem(path[elemStart:], kind); err != nil {
return err
}
return nil
}
// checkElem checks whether an individual path element is valid.
func checkElem(elem string, kind pathKind) error {
if elem == "" {
return fmt.Errorf("empty path element")
}
if strings.Count(elem, ".") == len(elem) {
return fmt.Errorf("invalid path element %q", elem)
}
if elem[0] == '.' && kind == modulePath {
return fmt.Errorf("leading dot in path element")
}
if elem[len(elem)-1] == '.' {
return fmt.Errorf("trailing dot in path element")
}
for _, r := range elem {
ok := false
switch kind {
case modulePath:
ok = modPathOK(r)
case importPath:
ok = importPathOK(r)
case filePath:
ok = fileNameOK(r)
default:
panic(fmt.Sprintf("internal error: invalid kind %v", kind))
}
if !ok {
return fmt.Errorf("invalid char %q", r)
}
}
// Windows disallows a bunch of path elements, sadly.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
short := elem
if i := strings.Index(short, "."); i >= 0 {
short = short[:i]
}
for _, bad := range badWindowsNames {
if strings.EqualFold(bad, short) {
return fmt.Errorf("%q disallowed as path element component on Windows", short)
}
}
if kind == filePath {
// don't check for Windows short-names in file names. They're
// only an issue for import paths.
return nil
}
// Reject path components that look like Windows short-names.
// Those usually end in a tilde followed by one or more ASCII digits.
if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 {
suffix := short[tilde+1:]
suffixIsDigits := true
for _, r := range suffix {
if r < '0' || r > '9' {
suffixIsDigits = false
break
}
}
if suffixIsDigits {
return fmt.Errorf("trailing tilde and digits in path element")
}
}
return nil
}
// CheckFilePath checks that a slash-separated file path is valid.
// The definition of a valid file path is the same as the definition
// of a valid import path except that the set of allowed characters is larger:
// all Unicode letters, ASCII digits, the ASCII space character (U+0020),
// and the ASCII punctuation characters
// “!#$%&()+,-.=@[]^_{}~”.
// (The excluded punctuation characters, " * < > ? ` ' | / \ and :,
// have special meanings in certain shells or operating systems.)
//
// CheckFilePath may be less restrictive in the future, but see the
// top-level package documentation for additional information about
// subtleties of Unicode.
func CheckFilePath(path string) error {
if err := checkPath(path, filePath); err != nil {
return fmt.Errorf("malformed file path %q: %v", path, err)
}
return nil
}
// badWindowsNames are the reserved file path elements on Windows.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
var badWindowsNames = []string{
"CON",
"PRN",
"AUX",
"NUL",
"COM1",
"COM2",
"COM3",
"COM4",
"COM5",
"COM6",
"COM7",
"COM8",
"COM9",
"LPT1",
"LPT2",
"LPT3",
"LPT4",
"LPT5",
"LPT6",
"LPT7",
"LPT8",
"LPT9",
}
// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
// and version is either empty or "/vN" for N >= 2.
// As a special case, gopkg.in paths are recognized directly;
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
// SplitPathVersion returns with ok = false when presented with
// a path whose last path element does not satisfy the constraints
// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
if strings.HasPrefix(path, "gopkg.in/") {
return splitGopkgIn(path)
}
i := len(path)
dot := false
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
if path[i-1] == '.' {
dot = true
}
i--
}
if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' {
return path, "", true
}
prefix, pathMajor = path[:i-2], path[i-2:]
if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
return path, "", false
}
return prefix, pathMajor, true
}
// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
if !strings.HasPrefix(path, "gopkg.in/") {
return path, "", false
}
i := len(path)
if strings.HasSuffix(path, "-unstable") {
i -= len("-unstable")
}
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
i--
}
if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
// All gopkg.in paths must end in vN for some N.
return path, "", false
}
prefix, pathMajor = path[:i-2], path[i-2:]
if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
return path, "", false
}
return prefix, pathMajor, true
}
// MatchPathMajor reports whether the semantic version v
// matches the path major version pathMajor.
//
// MatchPathMajor returns true if and only if CheckPathMajor returns nil.
func MatchPathMajor(v, pathMajor string) bool {
return CheckPathMajor(v, pathMajor) == nil
}
// CheckPathMajor returns a non-nil error if the semantic version v
// does not match the path major version pathMajor.
func CheckPathMajor(v, pathMajor string) error {
// TODO(jayconrod): return errors or panic for invalid inputs. This function
// (and others) was covered by integration tests for cmd/go, and surrounding
// code protected against invalid inputs like non-canonical versions.
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
}
if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
// Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
// For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
return nil
}
m := semver.Major(v)
if pathMajor == "" {
if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" {
return nil
}
pathMajor = "v0 or v1"
} else if pathMajor[0] == '/' || pathMajor[0] == '.' {
if m == pathMajor[1:] {
return nil
}
pathMajor = pathMajor[1:]
}
return &InvalidVersionError{
Version: v,
Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)),
}
}
// PathMajorPrefix returns the major-version tag prefix implied by pathMajor.
// An empty PathMajorPrefix allows either v0 or v1.
//
// Note that MatchPathMajor may accept some versions that do not actually begin
// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1'
// pathMajor, even though that pathMajor implies 'v1' tagging.
func PathMajorPrefix(pathMajor string) string {
if pathMajor == "" {
return ""
}
if pathMajor[0] != '/' && pathMajor[0] != '.' {
panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator")
}
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
}
m := pathMajor[1:]
if m != semver.Major(m) {
panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version")
}
return m
}
// CanonicalVersion returns the canonical form of the version string v.
// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
func CanonicalVersion(v string) string {
cv := semver.Canonical(v)
if semver.Build(v) == "+incompatible" {
cv += "+incompatible"
}
return cv
}
// Sort sorts the list by Path, breaking ties by comparing Version fields.
// The Version fields are interpreted as semantic versions (using semver.Compare)
// optionally followed by a tie-breaking suffix introduced by a slash character,
// like in "v0.0.1/go.mod".
func Sort(list []Version) {
sort.Slice(list, func(i, j int) bool {
mi := list[i]
mj := list[j]
if mi.Path != mj.Path {
return mi.Path < mj.Path
}
// To help go.sum formatting, allow version/file.
// Compare semver prefix by semver rules,
// file by string order.
vi := mi.Version
vj := mj.Version
var fi, fj string
if k := strings.Index(vi, "/"); k >= 0 {
vi, fi = vi[:k], vi[k:]
}
if k := strings.Index(vj, "/"); k >= 0 {
vj, fj = vj[:k], vj[k:]
}
if vi != vj {
return semver.Compare(vi, vj) < 0
}
return fi < fj
})
}
// EscapePath returns the escaped form of the given module path.
// It fails if the module path is invalid.
func EscapePath(path string) (escaped string, err error) {
if err := CheckPath(path); err != nil {
return "", err
}
return escapeString(path)
}
// EscapeVersion returns the escaped form of the given module version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func EscapeVersion(v string) (escaped string, err error) {
if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") {
return "", &InvalidVersionError{
Version: v,
Err: fmt.Errorf("disallowed version string"),
}
}
return escapeString(v)
}
func escapeString(s string) (escaped string, err error) {
haveUpper := false
for _, r := range s {
if r == '!' || r >= utf8.RuneSelf {
// This should be disallowed by CheckPath, but diagnose anyway.
// The correctness of the escaping loop below depends on it.
return "", fmt.Errorf("internal error: inconsistency in EscapePath")
}
if 'A' <= r && r <= 'Z' {
haveUpper = true
}
}
if !haveUpper {
return s, nil
}
var buf []byte
for _, r := range s {
if 'A' <= r && r <= 'Z' {
buf = append(buf, '!', byte(r+'a'-'A'))
} else {
buf = append(buf, byte(r))
}
}
return string(buf), nil
}
// UnescapePath returns the module path for the given escaped path.
// It fails if the escaped path is invalid or describes an invalid path.
func UnescapePath(escaped string) (path string, err error) {
path, ok := unescapeString(escaped)
if !ok {
return "", fmt.Errorf("invalid escaped module path %q", escaped)
}
if err := CheckPath(path); err != nil {
return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err)
}
return path, nil
}
// UnescapeVersion returns the version string for the given escaped version.
// It fails if the escaped form is invalid or describes an invalid version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func UnescapeVersion(escaped string) (v string, err error) {
v, ok := unescapeString(escaped)
if !ok {
return "", fmt.Errorf("invalid escaped version %q", escaped)
}
if err := checkElem(v, filePath); err != nil {
return "", fmt.Errorf("invalid escaped version %q: %v", v, err)
}
return v, nil
}
func unescapeString(escaped string) (string, bool) {
var buf []byte
bang := false
for _, r := range escaped {
if r >= utf8.RuneSelf {
return "", false
}
if bang {
bang = false
if r < 'a' || 'z' < r {
return "", false
}
buf = append(buf, byte(r+'A'-'a'))
continue
}
if r == '!' {
bang = true
continue
}
if 'A' <= r && r <= 'Z' {
return "", false
}
buf = append(buf, byte(r))
}
if bang {
return "", false
}
return string(buf), true
}
// MatchPrefixPatterns reports whether any path prefix of target matches one of
// the glob patterns (as defined by path.Match) in the comma-separated globs
// list. This implements the algorithm used when matching a module path to the
// GOPRIVATE environment variable, as described by 'go help module-private'.
//
// It ignores any empty or malformed patterns in the list.
func MatchPrefixPatterns(globs, target string) bool {
for globs != "" {
// Extract next non-empty glob in comma-separated list.
var glob string
if i := strings.Index(globs, ","); i >= 0 {
glob, globs = globs[:i], globs[i+1:]
} else {
glob, globs = globs, ""
}
if glob == "" {
continue
}
// A glob with N+1 path elements (N slashes) needs to be matched
// against the first N+1 path elements of target,
// which end just before the N+1'th slash.
n := strings.Count(glob, "/")
prefix := target
// Walk target, counting slashes, truncating at the N+1'th slash.
for i := 0; i < len(target); i++ {
if target[i] == '/' {
if n == 0 {
prefix = target[:i]
break
}
n--
}
}
if n > 0 {
// Not enough prefix elements.
continue
}
matched, _ := path.Match(glob, prefix)
if matched {
return true
}
}
return false
}

View file

@ -1,391 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package semver implements comparison of semantic version strings.
// In this package, semantic version strings must begin with a leading "v",
// as in "v1.0.0".
//
// The general form of a semantic version string accepted by this package is
//
// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
//
// where square brackets indicate optional parts of the syntax;
// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
// using only alphanumeric characters and hyphens; and
// all-numeric PRERELEASE identifiers must not have leading zeros.
//
// This package follows Semantic Versioning 2.0.0 (see semver.org)
// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
package semver
// parsed returns the parsed form of a semantic version string.
type parsed struct {
major string
minor string
patch string
short string
prerelease string
build string
err string
}
// IsValid reports whether v is a valid semantic version string.
func IsValid(v string) bool {
_, ok := parse(v)
return ok
}
// Canonical returns the canonical formatting of the semantic version v.
// It fills in any missing .MINOR or .PATCH and discards build metadata.
// Two semantic versions compare equal only if their canonical formattings
// are identical strings.
// The canonical invalid semantic version is the empty string.
func Canonical(v string) string {
p, ok := parse(v)
if !ok {
return ""
}
if p.build != "" {
return v[:len(v)-len(p.build)]
}
if p.short != "" {
return v + p.short
}
return v
}
// Major returns the major version prefix of the semantic version v.
// For example, Major("v2.1.0") == "v2".
// If v is an invalid semantic version string, Major returns the empty string.
func Major(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return v[:1+len(pv.major)]
}
// MajorMinor returns the major.minor version prefix of the semantic version v.
// For example, MajorMinor("v2.1.0") == "v2.1".
// If v is an invalid semantic version string, MajorMinor returns the empty string.
func MajorMinor(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
i := 1 + len(pv.major)
if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
return v[:j]
}
return v[:i] + "." + pv.minor
}
// Prerelease returns the prerelease suffix of the semantic version v.
// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
// If v is an invalid semantic version string, Prerelease returns the empty string.
func Prerelease(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return pv.prerelease
}
// Build returns the build suffix of the semantic version v.
// For example, Build("v2.1.0+meta") == "+meta".
// If v is an invalid semantic version string, Build returns the empty string.
func Build(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return pv.build
}
// Compare returns an integer comparing two versions according to
// semantic version precedence.
// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
//
// An invalid semantic version string is considered less than a valid one.
// All invalid semantic version strings compare equal to each other.
func Compare(v, w string) int {
pv, ok1 := parse(v)
pw, ok2 := parse(w)
if !ok1 && !ok2 {
return 0
}
if !ok1 {
return -1
}
if !ok2 {
return +1
}
if c := compareInt(pv.major, pw.major); c != 0 {
return c
}
if c := compareInt(pv.minor, pw.minor); c != 0 {
return c
}
if c := compareInt(pv.patch, pw.patch); c != 0 {
return c
}
return comparePrerelease(pv.prerelease, pw.prerelease)
}
// Max canonicalizes its arguments and then returns the version string
// that compares greater.
//
// Deprecated: use Compare instead. In most cases, returning a canonicalized
// version is not expected or desired.
func Max(v, w string) string {
v = Canonical(v)
w = Canonical(w)
if Compare(v, w) > 0 {
return v
}
return w
}
func parse(v string) (p parsed, ok bool) {
if v == "" || v[0] != 'v' {
p.err = "missing v prefix"
return
}
p.major, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad major version"
return
}
if v == "" {
p.minor = "0"
p.patch = "0"
p.short = ".0.0"
return
}
if v[0] != '.' {
p.err = "bad minor prefix"
ok = false
return
}
p.minor, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad minor version"
return
}
if v == "" {
p.patch = "0"
p.short = ".0"
return
}
if v[0] != '.' {
p.err = "bad patch prefix"
ok = false
return
}
p.patch, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad patch version"
return
}
if len(v) > 0 && v[0] == '-' {
p.prerelease, v, ok = parsePrerelease(v)
if !ok {
p.err = "bad prerelease"
return
}
}
if len(v) > 0 && v[0] == '+' {
p.build, v, ok = parseBuild(v)
if !ok {
p.err = "bad build"
return
}
}
if v != "" {
p.err = "junk on end"
ok = false
return
}
ok = true
return
}
func parseInt(v string) (t, rest string, ok bool) {
if v == "" {
return
}
if v[0] < '0' || '9' < v[0] {
return
}
i := 1
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
if v[0] == '0' && i != 1 {
return
}
return v[:i], v[i:], true
}
func parsePrerelease(v string) (t, rest string, ok bool) {
// "A pre-release version MAY be denoted by appending a hyphen and
// a series of dot separated identifiers immediately following the patch version.
// Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
// Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
if v == "" || v[0] != '-' {
return
}
i := 1
start := 1
for i < len(v) && v[i] != '+' {
if !isIdentChar(v[i]) && v[i] != '.' {
return
}
if v[i] == '.' {
if start == i || isBadNum(v[start:i]) {
return
}
start = i + 1
}
i++
}
if start == i || isBadNum(v[start:i]) {
return
}
return v[:i], v[i:], true
}
func parseBuild(v string) (t, rest string, ok bool) {
if v == "" || v[0] != '+' {
return
}
i := 1
start := 1
for i < len(v) {
if !isIdentChar(v[i]) && v[i] != '.' {
return
}
if v[i] == '.' {
if start == i {
return
}
start = i + 1
}
i++
}
if start == i {
return
}
return v[:i], v[i:], true
}
func isIdentChar(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
}
func isBadNum(v string) bool {
i := 0
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
return i == len(v) && i > 1 && v[0] == '0'
}
func isNum(v string) bool {
i := 0
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
return i == len(v)
}
func compareInt(x, y string) int {
if x == y {
return 0
}
if len(x) < len(y) {
return -1
}
if len(x) > len(y) {
return +1
}
if x < y {
return -1
} else {
return +1
}
}
func comparePrerelease(x, y string) int {
// "When major, minor, and patch are equal, a pre-release version has
// lower precedence than a normal version.
// Example: 1.0.0-alpha < 1.0.0.
// Precedence for two pre-release versions with the same major, minor,
// and patch version MUST be determined by comparing each dot separated
// identifier from left to right until a difference is found as follows:
// identifiers consisting of only digits are compared numerically and
// identifiers with letters or hyphens are compared lexically in ASCII
// sort order. Numeric identifiers always have lower precedence than
// non-numeric identifiers. A larger set of pre-release fields has a
// higher precedence than a smaller set, if all of the preceding
// identifiers are equal.
// Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
// 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
if x == y {
return 0
}
if x == "" {
return +1
}
if y == "" {
return -1
}
for x != "" && y != "" {
x = x[1:] // skip - or .
y = y[1:] // skip - or .
var dx, dy string
dx, x = nextIdent(x)
dy, y = nextIdent(y)
if dx != dy {
ix := isNum(dx)
iy := isNum(dy)
if ix != iy {
if ix {
return -1
} else {
return +1
}
}
if ix {
if len(dx) < len(dy) {
return -1
}
if len(dx) > len(dy) {
return +1
}
}
if dx < dy {
return -1
} else {
return +1
}
}
}
if x == "" {
return -1
} else {
return +1
}
}
func nextIdent(x string) (dx, rest string) {
i := 0
for i < len(x) && x[i] != '.' {
i++
}
return x[:i], x[i:]
}

3
vendor/golang.org/x/tools/AUTHORS generated vendored
View file

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View file

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/tools/LICENSE generated vendored
View file

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/tools/PATENTS generated vendored
View file

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View file

@ -1,47 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Command goimports updates your Go import lines,
adding missing ones and removing unreferenced ones.
$ go get golang.org/x/tools/cmd/goimports
In addition to fixing imports, goimports also formats
your code in the same style as gofmt so it can be used
as a replacement for your editor's gofmt-on-save hook.
For emacs, make sure you have the latest go-mode.el:
https://github.com/dominikh/go-mode.el
Then in your .emacs file:
(setq gofmt-command "goimports")
(add-hook 'before-save-hook 'gofmt-before-save)
For vim, set "gofmt_command" to "goimports":
https://golang.org/change/39c724dd7f252
https://golang.org/wiki/IDEsAndTextEditorPlugins
etc
For GoSublime, follow the steps described here:
http://michaelwhatcott.com/gosublime-goimports/
For other editors, you probably know what to do.
To exclude directories in your $GOPATH from being scanned for Go
files, goimports respects a configuration file at
$GOPATH/src/.goimportsignore which may contain blank lines, comment
lines (beginning with '#'), or lines naming a directory relative to
the configuration file to ignore when scanning. No globbing or regex
patterns are allowed. Use the "-v" verbose flag to verify it's
working and see what goimports is doing.
File bugs or feature requests at:
https://golang.org/issues/new?title=x/tools/cmd/goimports:+
Happy hacking!
*/
package main // import "golang.org/x/tools/cmd/goimports"

View file

@ -1,380 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
"go/scanner"
exec "golang.org/x/sys/execabs"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
"strings"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/imports"
)
var (
// main operation modes
list = flag.Bool("l", false, "list files whose formatting differs from goimport's")
write = flag.Bool("w", false, "write result to (source) file instead of stdout")
doDiff = flag.Bool("d", false, "display diffs instead of rewriting files")
srcdir = flag.String("srcdir", "", "choose imports as if source code is from `dir`. When operating on a single file, dir may instead be the complete file name.")
verbose bool // verbose logging
cpuProfile = flag.String("cpuprofile", "", "CPU profile output")
memProfile = flag.String("memprofile", "", "memory profile output")
memProfileRate = flag.Int("memrate", 0, "if > 0, sets runtime.MemProfileRate")
options = &imports.Options{
TabWidth: 8,
TabIndent: true,
Comments: true,
Fragment: true,
Env: &imports.ProcessEnv{
GocmdRunner: &gocommand.Runner{},
},
}
exitCode = 0
)
func init() {
flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)")
flag.StringVar(&options.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list")
flag.BoolVar(&options.FormatOnly, "format-only", false, "if true, don't fix imports and only format. In this mode, goimports is effectively gofmt, with the addition that imports are grouped into sections.")
}
func report(err error) {
scanner.PrintError(os.Stderr, err)
exitCode = 2
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: goimports [flags] [path ...]\n")
flag.PrintDefaults()
os.Exit(2)
}
func isGoFile(f os.FileInfo) bool {
// ignore non-Go files
name := f.Name()
return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
}
// argumentType is which mode goimports was invoked as.
type argumentType int
const (
// fromStdin means the user is piping their source into goimports.
fromStdin argumentType = iota
// singleArg is the common case from editors, when goimports is run on
// a single file.
singleArg
// multipleArg is when the user ran "goimports file1.go file2.go"
// or ran goimports on a directory tree.
multipleArg
)
func processFile(filename string, in io.Reader, out io.Writer, argType argumentType) error {
opt := options
if argType == fromStdin {
nopt := *options
nopt.Fragment = true
opt = &nopt
}
if in == nil {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
in = f
}
src, err := ioutil.ReadAll(in)
if err != nil {
return err
}
target := filename
if *srcdir != "" {
// Determine whether the provided -srcdirc is a directory or file
// and then use it to override the target.
//
// See https://github.com/dominikh/go-mode.el/issues/146
if isFile(*srcdir) {
if argType == multipleArg {
return errors.New("-srcdir value can't be a file when passing multiple arguments or when walking directories")
}
target = *srcdir
} else if argType == singleArg && strings.HasSuffix(*srcdir, ".go") && !isDir(*srcdir) {
// For a file which doesn't exist on disk yet, but might shortly.
// e.g. user in editor opens $DIR/newfile.go and newfile.go doesn't yet exist on disk.
// The goimports on-save hook writes the buffer to a temp file
// first and runs goimports before the actual save to newfile.go.
// The editor's buffer is named "newfile.go" so that is passed to goimports as:
// goimports -srcdir=/gopath/src/pkg/newfile.go /tmp/gofmtXXXXXXXX.go
// and then the editor reloads the result from the tmp file and writes
// it to newfile.go.
target = *srcdir
} else {
// Pretend that file is from *srcdir in order to decide
// visible imports correctly.
target = filepath.Join(*srcdir, filepath.Base(filename))
}
}
res, err := imports.Process(target, src, opt)
if err != nil {
return err
}
if !bytes.Equal(src, res) {
// formatting has changed
if *list {
fmt.Fprintln(out, filename)
}
if *write {
if argType == fromStdin {
// filename is "<standard input>"
return errors.New("can't use -w on stdin")
}
// On Windows, we need to re-set the permissions from the file. See golang/go#38225.
var perms os.FileMode
if fi, err := os.Stat(filename); err == nil {
perms = fi.Mode() & os.ModePerm
}
err = ioutil.WriteFile(filename, res, perms)
if err != nil {
return err
}
}
if *doDiff {
if argType == fromStdin {
filename = "stdin.go" // because <standard input>.orig looks silly
}
data, err := diff(src, res, filename)
if err != nil {
return fmt.Errorf("computing diff: %s", err)
}
fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename))
out.Write(data)
}
}
if !*list && !*write && !*doDiff {
_, err = out.Write(res)
}
return err
}
func visitFile(path string, f os.FileInfo, err error) error {
if err == nil && isGoFile(f) {
err = processFile(path, nil, os.Stdout, multipleArg)
}
if err != nil {
report(err)
}
return nil
}
func walkDir(path string) {
filepath.Walk(path, visitFile)
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
// call gofmtMain in a separate function
// so that it can use defer and have them
// run before the exit.
gofmtMain()
os.Exit(exitCode)
}
// parseFlags parses command line flags and returns the paths to process.
// It's a var so that custom implementations can replace it in other files.
var parseFlags = func() []string {
flag.BoolVar(&verbose, "v", false, "verbose logging")
flag.Parse()
return flag.Args()
}
func bufferedFileWriter(dest string) (w io.Writer, close func()) {
f, err := os.Create(dest)
if err != nil {
log.Fatal(err)
}
bw := bufio.NewWriter(f)
return bw, func() {
if err := bw.Flush(); err != nil {
log.Fatalf("error flushing %v: %v", dest, err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
}
}
func gofmtMain() {
flag.Usage = usage
paths := parseFlags()
if *cpuProfile != "" {
bw, flush := bufferedFileWriter(*cpuProfile)
pprof.StartCPUProfile(bw)
defer flush()
defer pprof.StopCPUProfile()
}
// doTrace is a conditionally compiled wrapper around runtime/trace. It is
// used to allow goimports to compile under gccgo, which does not support
// runtime/trace. See https://golang.org/issue/15544.
defer doTrace()()
if *memProfileRate > 0 {
runtime.MemProfileRate = *memProfileRate
bw, flush := bufferedFileWriter(*memProfile)
defer func() {
runtime.GC() // materialize all statistics
if err := pprof.WriteHeapProfile(bw); err != nil {
log.Fatal(err)
}
flush()
}()
}
if verbose {
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
options.Env.Logf = log.Printf
}
if options.TabWidth < 0 {
fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth)
exitCode = 2
return
}
if len(paths) == 0 {
if err := processFile("<standard input>", os.Stdin, os.Stdout, fromStdin); err != nil {
report(err)
}
return
}
argType := singleArg
if len(paths) > 1 {
argType = multipleArg
}
for _, path := range paths {
switch dir, err := os.Stat(path); {
case err != nil:
report(err)
case dir.IsDir():
walkDir(path)
default:
if err := processFile(path, nil, os.Stdout, argType); err != nil {
report(err)
}
}
}
}
func writeTempFile(dir, prefix string, data []byte) (string, error) {
file, err := ioutil.TempFile(dir, prefix)
if err != nil {
return "", err
}
_, err = file.Write(data)
if err1 := file.Close(); err == nil {
err = err1
}
if err != nil {
os.Remove(file.Name())
return "", err
}
return file.Name(), nil
}
func diff(b1, b2 []byte, filename string) (data []byte, err error) {
f1, err := writeTempFile("", "gofmt", b1)
if err != nil {
return
}
defer os.Remove(f1)
f2, err := writeTempFile("", "gofmt", b2)
if err != nil {
return
}
defer os.Remove(f2)
cmd := "diff"
if runtime.GOOS == "plan9" {
cmd = "/bin/ape/diff"
}
data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput()
if len(data) > 0 {
// diff exits with a non-zero status when the files don't match.
// Ignore that failure as long as we get output.
return replaceTempFilename(data, filename)
}
return
}
// replaceTempFilename replaces temporary filenames in diff with actual one.
//
// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500
// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500
// ...
// ->
// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500
// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500
// ...
func replaceTempFilename(diff []byte, filename string) ([]byte, error) {
bs := bytes.SplitN(diff, []byte{'\n'}, 3)
if len(bs) < 3 {
return nil, fmt.Errorf("got unexpected diff for %s", filename)
}
// Preserve timestamps.
var t0, t1 []byte
if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 {
t0 = bs[0][i:]
}
if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 {
t1 = bs[1][i:]
}
// Always print filepath with slash separator.
f := filepath.ToSlash(filename)
bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0))
bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1))
return bytes.Join(bs, []byte{'\n'}), nil
}
// isFile reports whether name is a file.
func isFile(name string) bool {
fi, err := os.Stat(name)
return err == nil && fi.Mode().IsRegular()
}
// isDir reports whether name is a directory.
func isDir(name string) bool {
fi, err := os.Stat(name)
return err == nil && fi.IsDir()
}

View file

@ -1,27 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
// +build gc
package main
import (
"flag"
"runtime/trace"
)
var traceProfile = flag.String("trace", "", "trace profile output")
func doTrace() func() {
if *traceProfile != "" {
bw, flush := bufferedFileWriter(*traceProfile)
trace.Start(bw)
return func() {
flush()
trace.Stop()
}
}
return func() {}
}

View file

@ -1,12 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !gc
// +build !gc
package main
func doTrace() func() {
return func() {}
}

View file

@ -1,627 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package astutil
// This file defines utilities for working with source positions.
import (
"fmt"
"go/ast"
"go/token"
"sort"
)
// PathEnclosingInterval returns the node that encloses the source
// interval [start, end), and all its ancestors up to the AST root.
//
// The definition of "enclosing" used by this function considers
// additional whitespace abutting a node to be enclosed by it.
// In this example:
//
// z := x + y // add them
// <-A->
// <----B----->
//
// the ast.BinaryExpr(+) node is considered to enclose interval B
// even though its [Pos()..End()) is actually only interval A.
// This behaviour makes user interfaces more tolerant of imperfect
// input.
//
// This function treats tokens as nodes, though they are not included
// in the result. e.g. PathEnclosingInterval("+") returns the
// enclosing ast.BinaryExpr("x + y").
//
// If start==end, the 1-char interval following start is used instead.
//
// The 'exact' result is true if the interval contains only path[0]
// and perhaps some adjacent whitespace. It is false if the interval
// overlaps multiple children of path[0], or if it contains only
// interior whitespace of path[0].
// In this example:
//
// z := x + y // add them
// <--C--> <---E-->
// ^
// D
//
// intervals C, D and E are inexact. C is contained by the
// z-assignment statement, because it spans three of its children (:=,
// x, +). So too is the 1-char interval D, because it contains only
// interior whitespace of the assignment. E is considered interior
// whitespace of the BlockStmt containing the assignment.
//
// Precondition: [start, end) both lie within the same file as root.
// TODO(adonovan): return (nil, false) in this case and remove precond.
// Requires FileSet; see loader.tokenFileContainsPos.
//
// Postcondition: path is never nil; it always contains at least 'root'.
//
func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
// fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
// Precondition: node.[Pos..End) and adjoining whitespace contain [start, end).
var visit func(node ast.Node) bool
visit = func(node ast.Node) bool {
path = append(path, node)
nodePos := node.Pos()
nodeEnd := node.End()
// fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging
// Intersect [start, end) with interval of node.
if start < nodePos {
start = nodePos
}
if end > nodeEnd {
end = nodeEnd
}
// Find sole child that contains [start, end).
children := childrenOf(node)
l := len(children)
for i, child := range children {
// [childPos, childEnd) is unaugmented interval of child.
childPos := child.Pos()
childEnd := child.End()
// [augPos, augEnd) is whitespace-augmented interval of child.
augPos := childPos
augEnd := childEnd
if i > 0 {
augPos = children[i-1].End() // start of preceding whitespace
}
if i < l-1 {
nextChildPos := children[i+1].Pos()
// Does [start, end) lie between child and next child?
if start >= augEnd && end <= nextChildPos {
return false // inexact match
}
augEnd = nextChildPos // end of following whitespace
}
// fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n",
// i, augPos, augEnd, start, end) // debugging
// Does augmented child strictly contain [start, end)?
if augPos <= start && end <= augEnd {
_, isToken := child.(tokenNode)
return isToken || visit(child)
}
// Does [start, end) overlap multiple children?
// i.e. left-augmented child contains start
// but LR-augmented child does not contain end.
if start < childEnd && end > augEnd {
break
}
}
// No single child contained [start, end),
// so node is the result. Is it exact?
// (It's tempting to put this condition before the
// child loop, but it gives the wrong result in the
// case where a node (e.g. ExprStmt) and its sole
// child have equal intervals.)
if start == nodePos && end == nodeEnd {
return true // exact match
}
return false // inexact: overlaps multiple children
}
if start > end {
start, end = end, start
}
if start < root.End() && end > root.Pos() {
if start == end {
end = start + 1 // empty interval => interval of size 1
}
exact = visit(root)
// Reverse the path:
for i, l := 0, len(path); i < l/2; i++ {
path[i], path[l-1-i] = path[l-1-i], path[i]
}
} else {
// Selection lies within whitespace preceding the
// first (or following the last) declaration in the file.
// The result nonetheless always includes the ast.File.
path = append(path, root)
}
return
}
// tokenNode is a dummy implementation of ast.Node for a single token.
// They are used transiently by PathEnclosingInterval but never escape
// this package.
//
type tokenNode struct {
pos token.Pos
end token.Pos
}
func (n tokenNode) Pos() token.Pos {
return n.pos
}
func (n tokenNode) End() token.Pos {
return n.end
}
func tok(pos token.Pos, len int) ast.Node {
return tokenNode{pos, pos + token.Pos(len)}
}
// childrenOf returns the direct non-nil children of ast.Node n.
// It may include fake ast.Node implementations for bare tokens.
// it is not safe to call (e.g.) ast.Walk on such nodes.
//
func childrenOf(n ast.Node) []ast.Node {
var children []ast.Node
// First add nodes for all true subtrees.
ast.Inspect(n, func(node ast.Node) bool {
if node == n { // push n
return true // recur
}
if node != nil { // push child
children = append(children, node)
}
return false // no recursion
})
// Then add fake Nodes for bare tokens.
switch n := n.(type) {
case *ast.ArrayType:
children = append(children,
tok(n.Lbrack, len("[")),
tok(n.Elt.End(), len("]")))
case *ast.AssignStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.BasicLit:
children = append(children,
tok(n.ValuePos, len(n.Value)))
case *ast.BinaryExpr:
children = append(children, tok(n.OpPos, len(n.Op.String())))
case *ast.BlockStmt:
children = append(children,
tok(n.Lbrace, len("{")),
tok(n.Rbrace, len("}")))
case *ast.BranchStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.CallExpr:
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
if n.Ellipsis != 0 {
children = append(children, tok(n.Ellipsis, len("...")))
}
case *ast.CaseClause:
if n.List == nil {
children = append(children,
tok(n.Case, len("default")))
} else {
children = append(children,
tok(n.Case, len("case")))
}
children = append(children, tok(n.Colon, len(":")))
case *ast.ChanType:
switch n.Dir {
case ast.RECV:
children = append(children, tok(n.Begin, len("<-chan")))
case ast.SEND:
children = append(children, tok(n.Begin, len("chan<-")))
case ast.RECV | ast.SEND:
children = append(children, tok(n.Begin, len("chan")))
}
case *ast.CommClause:
if n.Comm == nil {
children = append(children,
tok(n.Case, len("default")))
} else {
children = append(children,
tok(n.Case, len("case")))
}
children = append(children, tok(n.Colon, len(":")))
case *ast.Comment:
// nop
case *ast.CommentGroup:
// nop
case *ast.CompositeLit:
children = append(children,
tok(n.Lbrace, len("{")),
tok(n.Rbrace, len("{")))
case *ast.DeclStmt:
// nop
case *ast.DeferStmt:
children = append(children,
tok(n.Defer, len("defer")))
case *ast.Ellipsis:
children = append(children,
tok(n.Ellipsis, len("...")))
case *ast.EmptyStmt:
// nop
case *ast.ExprStmt:
// nop
case *ast.Field:
// TODO(adonovan): Field.{Doc,Comment,Tag}?
case *ast.FieldList:
children = append(children,
tok(n.Opening, len("(")),
tok(n.Closing, len(")")))
case *ast.File:
// TODO test: Doc
children = append(children,
tok(n.Package, len("package")))
case *ast.ForStmt:
children = append(children,
tok(n.For, len("for")))
case *ast.FuncDecl:
// TODO(adonovan): FuncDecl.Comment?
// Uniquely, FuncDecl breaks the invariant that
// preorder traversal yields tokens in lexical order:
// in fact, FuncDecl.Recv precedes FuncDecl.Type.Func.
//
// As a workaround, we inline the case for FuncType
// here and order things correctly.
//
children = nil // discard ast.Walk(FuncDecl) info subtrees
children = append(children, tok(n.Type.Func, len("func")))
if n.Recv != nil {
children = append(children, n.Recv)
}
children = append(children, n.Name)
if n.Type.Params != nil {
children = append(children, n.Type.Params)
}
if n.Type.Results != nil {
children = append(children, n.Type.Results)
}
if n.Body != nil {
children = append(children, n.Body)
}
case *ast.FuncLit:
// nop
case *ast.FuncType:
if n.Func != 0 {
children = append(children,
tok(n.Func, len("func")))
}
case *ast.GenDecl:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
if n.Lparen != 0 {
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
}
case *ast.GoStmt:
children = append(children,
tok(n.Go, len("go")))
case *ast.Ident:
children = append(children,
tok(n.NamePos, len(n.Name)))
case *ast.IfStmt:
children = append(children,
tok(n.If, len("if")))
case *ast.ImportSpec:
// TODO(adonovan): ImportSpec.{Doc,EndPos}?
case *ast.IncDecStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.IndexExpr:
children = append(children,
tok(n.Lbrack, len("{")),
tok(n.Rbrack, len("}")))
case *ast.InterfaceType:
children = append(children,
tok(n.Interface, len("interface")))
case *ast.KeyValueExpr:
children = append(children,
tok(n.Colon, len(":")))
case *ast.LabeledStmt:
children = append(children,
tok(n.Colon, len(":")))
case *ast.MapType:
children = append(children,
tok(n.Map, len("map")))
case *ast.ParenExpr:
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
case *ast.RangeStmt:
children = append(children,
tok(n.For, len("for")),
tok(n.TokPos, len(n.Tok.String())))
case *ast.ReturnStmt:
children = append(children,
tok(n.Return, len("return")))
case *ast.SelectStmt:
children = append(children,
tok(n.Select, len("select")))
case *ast.SelectorExpr:
// nop
case *ast.SendStmt:
children = append(children,
tok(n.Arrow, len("<-")))
case *ast.SliceExpr:
children = append(children,
tok(n.Lbrack, len("[")),
tok(n.Rbrack, len("]")))
case *ast.StarExpr:
children = append(children, tok(n.Star, len("*")))
case *ast.StructType:
children = append(children, tok(n.Struct, len("struct")))
case *ast.SwitchStmt:
children = append(children, tok(n.Switch, len("switch")))
case *ast.TypeAssertExpr:
children = append(children,
tok(n.Lparen-1, len(".")),
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
case *ast.TypeSpec:
// TODO(adonovan): TypeSpec.{Doc,Comment}?
case *ast.TypeSwitchStmt:
children = append(children, tok(n.Switch, len("switch")))
case *ast.UnaryExpr:
children = append(children, tok(n.OpPos, len(n.Op.String())))
case *ast.ValueSpec:
// TODO(adonovan): ValueSpec.{Doc,Comment}?
case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
// nop
}
// TODO(adonovan): opt: merge the logic of ast.Inspect() into
// the switch above so we can make interleaved callbacks for
// both Nodes and Tokens in the right order and avoid the need
// to sort.
sort.Sort(byPos(children))
return children
}
type byPos []ast.Node
func (sl byPos) Len() int {
return len(sl)
}
func (sl byPos) Less(i, j int) bool {
return sl[i].Pos() < sl[j].Pos()
}
func (sl byPos) Swap(i, j int) {
sl[i], sl[j] = sl[j], sl[i]
}
// NodeDescription returns a description of the concrete type of n suitable
// for a user interface.
//
// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
// StarExpr) we could be much more specific given the path to the AST
// root. Perhaps we should do that.
//
func NodeDescription(n ast.Node) string {
switch n := n.(type) {
case *ast.ArrayType:
return "array type"
case *ast.AssignStmt:
return "assignment"
case *ast.BadDecl:
return "bad declaration"
case *ast.BadExpr:
return "bad expression"
case *ast.BadStmt:
return "bad statement"
case *ast.BasicLit:
return "basic literal"
case *ast.BinaryExpr:
return fmt.Sprintf("binary %s operation", n.Op)
case *ast.BlockStmt:
return "block"
case *ast.BranchStmt:
switch n.Tok {
case token.BREAK:
return "break statement"
case token.CONTINUE:
return "continue statement"
case token.GOTO:
return "goto statement"
case token.FALLTHROUGH:
return "fall-through statement"
}
case *ast.CallExpr:
if len(n.Args) == 1 && !n.Ellipsis.IsValid() {
return "function call (or conversion)"
}
return "function call"
case *ast.CaseClause:
return "case clause"
case *ast.ChanType:
return "channel type"
case *ast.CommClause:
return "communication clause"
case *ast.Comment:
return "comment"
case *ast.CommentGroup:
return "comment group"
case *ast.CompositeLit:
return "composite literal"
case *ast.DeclStmt:
return NodeDescription(n.Decl) + " statement"
case *ast.DeferStmt:
return "defer statement"
case *ast.Ellipsis:
return "ellipsis"
case *ast.EmptyStmt:
return "empty statement"
case *ast.ExprStmt:
return "expression statement"
case *ast.Field:
// Can be any of these:
// struct {x, y int} -- struct field(s)
// struct {T} -- anon struct field
// interface {I} -- interface embedding
// interface {f()} -- interface method
// func (A) func(B) C -- receiver, param(s), result(s)
return "field/method/parameter"
case *ast.FieldList:
return "field/method/parameter list"
case *ast.File:
return "source file"
case *ast.ForStmt:
return "for loop"
case *ast.FuncDecl:
return "function declaration"
case *ast.FuncLit:
return "function literal"
case *ast.FuncType:
return "function type"
case *ast.GenDecl:
switch n.Tok {
case token.IMPORT:
return "import declaration"
case token.CONST:
return "constant declaration"
case token.TYPE:
return "type declaration"
case token.VAR:
return "variable declaration"
}
case *ast.GoStmt:
return "go statement"
case *ast.Ident:
return "identifier"
case *ast.IfStmt:
return "if statement"
case *ast.ImportSpec:
return "import specification"
case *ast.IncDecStmt:
if n.Tok == token.INC {
return "increment statement"
}
return "decrement statement"
case *ast.IndexExpr:
return "index expression"
case *ast.InterfaceType:
return "interface type"
case *ast.KeyValueExpr:
return "key/value association"
case *ast.LabeledStmt:
return "statement label"
case *ast.MapType:
return "map type"
case *ast.Package:
return "package"
case *ast.ParenExpr:
return "parenthesized " + NodeDescription(n.X)
case *ast.RangeStmt:
return "range loop"
case *ast.ReturnStmt:
return "return statement"
case *ast.SelectStmt:
return "select statement"
case *ast.SelectorExpr:
return "selector"
case *ast.SendStmt:
return "channel send"
case *ast.SliceExpr:
return "slice expression"
case *ast.StarExpr:
return "*-operation" // load/store expr or pointer type
case *ast.StructType:
return "struct type"
case *ast.SwitchStmt:
return "switch statement"
case *ast.TypeAssertExpr:
return "type assertion"
case *ast.TypeSpec:
return "type specification"
case *ast.TypeSwitchStmt:
return "type switch"
case *ast.UnaryExpr:
return fmt.Sprintf("unary %s operation", n.Op)
case *ast.ValueSpec:
return "value specification"
}
panic(fmt.Sprintf("unexpected node type: %T", n))
}

View file

@ -1,482 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package astutil contains common utilities for working with the Go AST.
package astutil // import "golang.org/x/tools/go/ast/astutil"
import (
"fmt"
"go/ast"
"go/token"
"strconv"
"strings"
)
// AddImport adds the import path to the file f, if absent.
func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) {
return AddNamedImport(fset, f, "", path)
}
// AddNamedImport adds the import with the given name and path to the file f, if absent.
// If name is not empty, it is used to rename the import.
//
// For example, calling
// AddNamedImport(fset, f, "pathpkg", "path")
// adds
// import pathpkg "path"
func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) {
if imports(f, name, path) {
return false
}
newImport := &ast.ImportSpec{
Path: &ast.BasicLit{
Kind: token.STRING,
Value: strconv.Quote(path),
},
}
if name != "" {
newImport.Name = &ast.Ident{Name: name}
}
// Find an import decl to add to.
// The goal is to find an existing import
// whose import path has the longest shared
// prefix with path.
var (
bestMatch = -1 // length of longest shared prefix
lastImport = -1 // index in f.Decls of the file's final import decl
impDecl *ast.GenDecl // import decl containing the best match
impIndex = -1 // spec index in impDecl containing the best match
isThirdPartyPath = isThirdParty(path)
)
for i, decl := range f.Decls {
gen, ok := decl.(*ast.GenDecl)
if ok && gen.Tok == token.IMPORT {
lastImport = i
// Do not add to import "C", to avoid disrupting the
// association with its doc comment, breaking cgo.
if declImports(gen, "C") {
continue
}
// Match an empty import decl if that's all that is available.
if len(gen.Specs) == 0 && bestMatch == -1 {
impDecl = gen
}
// Compute longest shared prefix with imports in this group and find best
// matched import spec.
// 1. Always prefer import spec with longest shared prefix.
// 2. While match length is 0,
// - for stdlib package: prefer first import spec.
// - for third party package: prefer first third party import spec.
// We cannot use last import spec as best match for third party package
// because grouped imports are usually placed last by goimports -local
// flag.
// See issue #19190.
seenAnyThirdParty := false
for j, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
p := importPath(impspec)
n := matchLen(p, path)
if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) {
bestMatch = n
impDecl = gen
impIndex = j
}
seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p)
}
}
}
// If no import decl found, add one after the last import.
if impDecl == nil {
impDecl = &ast.GenDecl{
Tok: token.IMPORT,
}
if lastImport >= 0 {
impDecl.TokPos = f.Decls[lastImport].End()
} else {
// There are no existing imports.
// Our new import, preceded by a blank line, goes after the package declaration
// and after the comment, if any, that starts on the same line as the
// package declaration.
impDecl.TokPos = f.Package
file := fset.File(f.Package)
pkgLine := file.Line(f.Package)
for _, c := range f.Comments {
if file.Line(c.Pos()) > pkgLine {
break
}
// +2 for a blank line
impDecl.TokPos = c.End() + 2
}
}
f.Decls = append(f.Decls, nil)
copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
f.Decls[lastImport+1] = impDecl
}
// Insert new import at insertAt.
insertAt := 0
if impIndex >= 0 {
// insert after the found import
insertAt = impIndex + 1
}
impDecl.Specs = append(impDecl.Specs, nil)
copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
impDecl.Specs[insertAt] = newImport
pos := impDecl.Pos()
if insertAt > 0 {
// If there is a comment after an existing import, preserve the comment
// position by adding the new import after the comment.
if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil {
pos = spec.Comment.End()
} else {
// Assign same position as the previous import,
// so that the sorter sees it as being in the same block.
pos = impDecl.Specs[insertAt-1].Pos()
}
}
if newImport.Name != nil {
newImport.Name.NamePos = pos
}
newImport.Path.ValuePos = pos
newImport.EndPos = pos
// Clean up parens. impDecl contains at least one spec.
if len(impDecl.Specs) == 1 {
// Remove unneeded parens.
impDecl.Lparen = token.NoPos
} else if !impDecl.Lparen.IsValid() {
// impDecl needs parens added.
impDecl.Lparen = impDecl.Specs[0].Pos()
}
f.Imports = append(f.Imports, newImport)
if len(f.Decls) <= 1 {
return true
}
// Merge all the import declarations into the first one.
var first *ast.GenDecl
for i := 0; i < len(f.Decls); i++ {
decl := f.Decls[i]
gen, ok := decl.(*ast.GenDecl)
if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
continue
}
if first == nil {
first = gen
continue // Don't touch the first one.
}
// We now know there is more than one package in this import
// declaration. Ensure that it ends up parenthesized.
first.Lparen = first.Pos()
// Move the imports of the other import declaration to the first one.
for _, spec := range gen.Specs {
spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
first.Specs = append(first.Specs, spec)
}
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
i--
}
return true
}
func isThirdParty(importPath string) bool {
// Third party package import path usually contains "." (".com", ".org", ...)
// This logic is taken from golang.org/x/tools/imports package.
return strings.Contains(importPath, ".")
}
// DeleteImport deletes the import path from the file f, if present.
// If there are duplicate import declarations, all matching ones are deleted.
func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
return DeleteNamedImport(fset, f, "", path)
}
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
// If there are duplicate import declarations, all matching ones are deleted.
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
var delspecs []*ast.ImportSpec
var delcomments []*ast.CommentGroup
// Find the import nodes that import path, if any.
for i := 0; i < len(f.Decls); i++ {
decl := f.Decls[i]
gen, ok := decl.(*ast.GenDecl)
if !ok || gen.Tok != token.IMPORT {
continue
}
for j := 0; j < len(gen.Specs); j++ {
spec := gen.Specs[j]
impspec := spec.(*ast.ImportSpec)
if importName(impspec) != name || importPath(impspec) != path {
continue
}
// We found an import spec that imports path.
// Delete it.
delspecs = append(delspecs, impspec)
deleted = true
copy(gen.Specs[j:], gen.Specs[j+1:])
gen.Specs = gen.Specs[:len(gen.Specs)-1]
// If this was the last import spec in this decl,
// delete the decl, too.
if len(gen.Specs) == 0 {
copy(f.Decls[i:], f.Decls[i+1:])
f.Decls = f.Decls[:len(f.Decls)-1]
i--
break
} else if len(gen.Specs) == 1 {
if impspec.Doc != nil {
delcomments = append(delcomments, impspec.Doc)
}
if impspec.Comment != nil {
delcomments = append(delcomments, impspec.Comment)
}
for _, cg := range f.Comments {
// Found comment on the same line as the import spec.
if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line {
delcomments = append(delcomments, cg)
break
}
}
spec := gen.Specs[0].(*ast.ImportSpec)
// Move the documentation right after the import decl.
if spec.Doc != nil {
for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line {
fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
}
}
for _, cg := range f.Comments {
if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line {
for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line {
fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
}
break
}
}
}
if j > 0 {
lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
line := fset.Position(impspec.Path.ValuePos).Line
// We deleted an entry but now there may be
// a blank line-sized hole where the import was.
if line-lastLine > 1 || !gen.Rparen.IsValid() {
// There was a blank line immediately preceding the deleted import,
// so there's no need to close the hole. The right parenthesis is
// invalid after AddImport to an import statement without parenthesis.
// Do nothing.
} else if line != fset.File(gen.Rparen).LineCount() {
// There was no blank line. Close the hole.
fset.File(gen.Rparen).MergeLine(line)
}
}
j--
}
}
// Delete imports from f.Imports.
for i := 0; i < len(f.Imports); i++ {
imp := f.Imports[i]
for j, del := range delspecs {
if imp == del {
copy(f.Imports[i:], f.Imports[i+1:])
f.Imports = f.Imports[:len(f.Imports)-1]
copy(delspecs[j:], delspecs[j+1:])
delspecs = delspecs[:len(delspecs)-1]
i--
break
}
}
}
// Delete comments from f.Comments.
for i := 0; i < len(f.Comments); i++ {
cg := f.Comments[i]
for j, del := range delcomments {
if cg == del {
copy(f.Comments[i:], f.Comments[i+1:])
f.Comments = f.Comments[:len(f.Comments)-1]
copy(delcomments[j:], delcomments[j+1:])
delcomments = delcomments[:len(delcomments)-1]
i--
break
}
}
}
if len(delspecs) > 0 {
panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
}
return
}
// RewriteImport rewrites any import of path oldPath to path newPath.
func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) {
for _, imp := range f.Imports {
if importPath(imp) == oldPath {
rewrote = true
// record old End, because the default is to compute
// it using the length of imp.Path.Value.
imp.EndPos = imp.End()
imp.Path.Value = strconv.Quote(newPath)
}
}
return
}
// UsesImport reports whether a given import is used.
func UsesImport(f *ast.File, path string) (used bool) {
spec := importSpec(f, path)
if spec == nil {
return
}
name := spec.Name.String()
switch name {
case "<nil>":
// If the package name is not explicitly specified,
// make an educated guess. This is not guaranteed to be correct.
lastSlash := strings.LastIndex(path, "/")
if lastSlash == -1 {
name = path
} else {
name = path[lastSlash+1:]
}
case "_", ".":
// Not sure if this import is used - err on the side of caution.
return true
}
ast.Walk(visitFn(func(n ast.Node) {
sel, ok := n.(*ast.SelectorExpr)
if ok && isTopName(sel.X, name) {
used = true
}
}), f)
return
}
type visitFn func(node ast.Node)
func (fn visitFn) Visit(node ast.Node) ast.Visitor {
fn(node)
return fn
}
// imports reports whether f has an import with the specified name and path.
func imports(f *ast.File, name, path string) bool {
for _, s := range f.Imports {
if importName(s) == name && importPath(s) == path {
return true
}
}
return false
}
// importSpec returns the import spec if f imports path,
// or nil otherwise.
func importSpec(f *ast.File, path string) *ast.ImportSpec {
for _, s := range f.Imports {
if importPath(s) == path {
return s
}
}
return nil
}
// importName returns the name of s,
// or "" if the import is not named.
func importName(s *ast.ImportSpec) string {
if s.Name == nil {
return ""
}
return s.Name.Name
}
// importPath returns the unquoted import path of s,
// or "" if the path is not properly quoted.
func importPath(s *ast.ImportSpec) string {
t, err := strconv.Unquote(s.Path.Value)
if err != nil {
return ""
}
return t
}
// declImports reports whether gen contains an import of path.
func declImports(gen *ast.GenDecl, path string) bool {
if gen.Tok != token.IMPORT {
return false
}
for _, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
if importPath(impspec) == path {
return true
}
}
return false
}
// matchLen returns the length of the longest path segment prefix shared by x and y.
func matchLen(x, y string) int {
n := 0
for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ {
if x[i] == '/' {
n++
}
}
return n
}
// isTopName returns true if n is a top-level unresolved identifier with the given name.
func isTopName(n ast.Expr, name string) bool {
id, ok := n.(*ast.Ident)
return ok && id.Name == name && id.Obj == nil
}
// Imports returns the file imports grouped by paragraph.
func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec {
var groups [][]*ast.ImportSpec
for _, decl := range f.Decls {
genDecl, ok := decl.(*ast.GenDecl)
if !ok || genDecl.Tok != token.IMPORT {
break
}
group := []*ast.ImportSpec{}
var lastLine int
for _, spec := range genDecl.Specs {
importSpec := spec.(*ast.ImportSpec)
pos := importSpec.Path.ValuePos
line := fset.Position(pos).Line
if lastLine > 0 && pos > 0 && line-lastLine > 1 {
groups = append(groups, group)
group = []*ast.ImportSpec{}
}
group = append(group, importSpec)
lastLine = line
}
groups = append(groups, group)
}
return groups
}

View file

@ -1,483 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package astutil
import (
"fmt"
"go/ast"
"reflect"
"sort"
"golang.org/x/tools/internal/typeparams"
)
// An ApplyFunc is invoked by Apply for each node n, even if n is nil,
// before and/or after the node's children, using a Cursor describing
// the current node and providing operations on it.
//
// The return value of ApplyFunc controls the syntax tree traversal.
// See Apply for details.
type ApplyFunc func(*Cursor) bool
// Apply traverses a syntax tree recursively, starting with root,
// and calling pre and post for each node as described below.
// Apply returns the syntax tree, possibly modified.
//
// If pre is not nil, it is called for each node before the node's
// children are traversed (pre-order). If pre returns false, no
// children are traversed, and post is not called for that node.
//
// If post is not nil, and a prior call of pre didn't return false,
// post is called for each node after its children are traversed
// (post-order). If post returns false, traversal is terminated and
// Apply returns immediately.
//
// Only fields that refer to AST nodes are considered children;
// i.e., token.Pos, Scopes, Objects, and fields of basic types
// (strings, etc.) are ignored.
//
// Children are traversed in the order in which they appear in the
// respective node's struct definition. A package's files are
// traversed in the filenames' alphabetical order.
//
func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) {
parent := &struct{ ast.Node }{root}
defer func() {
if r := recover(); r != nil && r != abort {
panic(r)
}
result = parent.Node
}()
a := &application{pre: pre, post: post}
a.apply(parent, "Node", nil, root)
return
}
var abort = new(int) // singleton, to signal termination of Apply
// A Cursor describes a node encountered during Apply.
// Information about the node and its parent is available
// from the Node, Parent, Name, and Index methods.
//
// If p is a variable of type and value of the current parent node
// c.Parent(), and f is the field identifier with name c.Name(),
// the following invariants hold:
//
// p.f == c.Node() if c.Index() < 0
// p.f[c.Index()] == c.Node() if c.Index() >= 0
//
// The methods Replace, Delete, InsertBefore, and InsertAfter
// can be used to change the AST without disrupting Apply.
type Cursor struct {
parent ast.Node
name string
iter *iterator // valid if non-nil
node ast.Node
}
// Node returns the current Node.
func (c *Cursor) Node() ast.Node { return c.node }
// Parent returns the parent of the current Node.
func (c *Cursor) Parent() ast.Node { return c.parent }
// Name returns the name of the parent Node field that contains the current Node.
// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns
// the filename for the current Node.
func (c *Cursor) Name() string { return c.name }
// Index reports the index >= 0 of the current Node in the slice of Nodes that
// contains it, or a value < 0 if the current Node is not part of a slice.
// The index of the current node changes if InsertBefore is called while
// processing the current node.
func (c *Cursor) Index() int {
if c.iter != nil {
return c.iter.index
}
return -1
}
// field returns the current node's parent field value.
func (c *Cursor) field() reflect.Value {
return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name)
}
// Replace replaces the current Node with n.
// The replacement node is not walked by Apply.
func (c *Cursor) Replace(n ast.Node) {
if _, ok := c.node.(*ast.File); ok {
file, ok := n.(*ast.File)
if !ok {
panic("attempt to replace *ast.File with non-*ast.File")
}
c.parent.(*ast.Package).Files[c.name] = file
return
}
v := c.field()
if i := c.Index(); i >= 0 {
v = v.Index(i)
}
v.Set(reflect.ValueOf(n))
}
// Delete deletes the current Node from its containing slice.
// If the current Node is not part of a slice, Delete panics.
// As a special case, if the current node is a package file,
// Delete removes it from the package's Files map.
func (c *Cursor) Delete() {
if _, ok := c.node.(*ast.File); ok {
delete(c.parent.(*ast.Package).Files, c.name)
return
}
i := c.Index()
if i < 0 {
panic("Delete node not contained in slice")
}
v := c.field()
l := v.Len()
reflect.Copy(v.Slice(i, l), v.Slice(i+1, l))
v.Index(l - 1).Set(reflect.Zero(v.Type().Elem()))
v.SetLen(l - 1)
c.iter.step--
}
// InsertAfter inserts n after the current Node in its containing slice.
// If the current Node is not part of a slice, InsertAfter panics.
// Apply does not walk n.
func (c *Cursor) InsertAfter(n ast.Node) {
i := c.Index()
if i < 0 {
panic("InsertAfter node not contained in slice")
}
v := c.field()
v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
l := v.Len()
reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l))
v.Index(i + 1).Set(reflect.ValueOf(n))
c.iter.step++
}
// InsertBefore inserts n before the current Node in its containing slice.
// If the current Node is not part of a slice, InsertBefore panics.
// Apply will not walk n.
func (c *Cursor) InsertBefore(n ast.Node) {
i := c.Index()
if i < 0 {
panic("InsertBefore node not contained in slice")
}
v := c.field()
v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
l := v.Len()
reflect.Copy(v.Slice(i+1, l), v.Slice(i, l))
v.Index(i).Set(reflect.ValueOf(n))
c.iter.index++
}
// application carries all the shared data so we can pass it around cheaply.
type application struct {
pre, post ApplyFunc
cursor Cursor
iter iterator
}
func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) {
// convert typed nil into untyped nil
if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() {
n = nil
}
// avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead
saved := a.cursor
a.cursor.parent = parent
a.cursor.name = name
a.cursor.iter = iter
a.cursor.node = n
if a.pre != nil && !a.pre(&a.cursor) {
a.cursor = saved
return
}
// walk children
// (the order of the cases matches the order of the corresponding node types in go/ast)
switch n := n.(type) {
case nil:
// nothing to do
// Comments and fields
case *ast.Comment:
// nothing to do
case *ast.CommentGroup:
if n != nil {
a.applyList(n, "List")
}
case *ast.Field:
a.apply(n, "Doc", nil, n.Doc)
a.applyList(n, "Names")
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Tag", nil, n.Tag)
a.apply(n, "Comment", nil, n.Comment)
case *ast.FieldList:
a.applyList(n, "List")
// Expressions
case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
// nothing to do
case *ast.Ellipsis:
a.apply(n, "Elt", nil, n.Elt)
case *ast.FuncLit:
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Body", nil, n.Body)
case *ast.CompositeLit:
a.apply(n, "Type", nil, n.Type)
a.applyList(n, "Elts")
case *ast.ParenExpr:
a.apply(n, "X", nil, n.X)
case *ast.SelectorExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Sel", nil, n.Sel)
case *ast.IndexExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Index", nil, n.Index)
case *ast.SliceExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Low", nil, n.Low)
a.apply(n, "High", nil, n.High)
a.apply(n, "Max", nil, n.Max)
case *ast.TypeAssertExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Type", nil, n.Type)
case *ast.CallExpr:
a.apply(n, "Fun", nil, n.Fun)
a.applyList(n, "Args")
case *ast.StarExpr:
a.apply(n, "X", nil, n.X)
case *ast.UnaryExpr:
a.apply(n, "X", nil, n.X)
case *ast.BinaryExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Y", nil, n.Y)
case *ast.KeyValueExpr:
a.apply(n, "Key", nil, n.Key)
a.apply(n, "Value", nil, n.Value)
// Types
case *ast.ArrayType:
a.apply(n, "Len", nil, n.Len)
a.apply(n, "Elt", nil, n.Elt)
case *ast.StructType:
a.apply(n, "Fields", nil, n.Fields)
case *ast.FuncType:
a.apply(n, "Params", nil, n.Params)
a.apply(n, "Results", nil, n.Results)
case *ast.InterfaceType:
a.apply(n, "Methods", nil, n.Methods)
case *ast.MapType:
a.apply(n, "Key", nil, n.Key)
a.apply(n, "Value", nil, n.Value)
case *ast.ChanType:
a.apply(n, "Value", nil, n.Value)
// Statements
case *ast.BadStmt:
// nothing to do
case *ast.DeclStmt:
a.apply(n, "Decl", nil, n.Decl)
case *ast.EmptyStmt:
// nothing to do
case *ast.LabeledStmt:
a.apply(n, "Label", nil, n.Label)
a.apply(n, "Stmt", nil, n.Stmt)
case *ast.ExprStmt:
a.apply(n, "X", nil, n.X)
case *ast.SendStmt:
a.apply(n, "Chan", nil, n.Chan)
a.apply(n, "Value", nil, n.Value)
case *ast.IncDecStmt:
a.apply(n, "X", nil, n.X)
case *ast.AssignStmt:
a.applyList(n, "Lhs")
a.applyList(n, "Rhs")
case *ast.GoStmt:
a.apply(n, "Call", nil, n.Call)
case *ast.DeferStmt:
a.apply(n, "Call", nil, n.Call)
case *ast.ReturnStmt:
a.applyList(n, "Results")
case *ast.BranchStmt:
a.apply(n, "Label", nil, n.Label)
case *ast.BlockStmt:
a.applyList(n, "List")
case *ast.IfStmt:
a.apply(n, "Init", nil, n.Init)
a.apply(n, "Cond", nil, n.Cond)
a.apply(n, "Body", nil, n.Body)
a.apply(n, "Else", nil, n.Else)
case *ast.CaseClause:
a.applyList(n, "List")
a.applyList(n, "Body")
case *ast.SwitchStmt:
a.apply(n, "Init", nil, n.Init)
a.apply(n, "Tag", nil, n.Tag)
a.apply(n, "Body", nil, n.Body)
case *ast.TypeSwitchStmt:
a.apply(n, "Init", nil, n.Init)
a.apply(n, "Assign", nil, n.Assign)
a.apply(n, "Body", nil, n.Body)
case *ast.CommClause:
a.apply(n, "Comm", nil, n.Comm)
a.applyList(n, "Body")
case *ast.SelectStmt:
a.apply(n, "Body", nil, n.Body)
case *ast.ForStmt:
a.apply(n, "Init", nil, n.Init)
a.apply(n, "Cond", nil, n.Cond)
a.apply(n, "Post", nil, n.Post)
a.apply(n, "Body", nil, n.Body)
case *ast.RangeStmt:
a.apply(n, "Key", nil, n.Key)
a.apply(n, "Value", nil, n.Value)
a.apply(n, "X", nil, n.X)
a.apply(n, "Body", nil, n.Body)
// Declarations
case *ast.ImportSpec:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Name", nil, n.Name)
a.apply(n, "Path", nil, n.Path)
a.apply(n, "Comment", nil, n.Comment)
case *ast.ValueSpec:
a.apply(n, "Doc", nil, n.Doc)
a.applyList(n, "Names")
a.apply(n, "Type", nil, n.Type)
a.applyList(n, "Values")
a.apply(n, "Comment", nil, n.Comment)
case *ast.TypeSpec:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Name", nil, n.Name)
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Comment", nil, n.Comment)
case *ast.BadDecl:
// nothing to do
case *ast.GenDecl:
a.apply(n, "Doc", nil, n.Doc)
a.applyList(n, "Specs")
case *ast.FuncDecl:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Recv", nil, n.Recv)
a.apply(n, "Name", nil, n.Name)
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Body", nil, n.Body)
// Files and packages
case *ast.File:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Name", nil, n.Name)
a.applyList(n, "Decls")
// Don't walk n.Comments; they have either been walked already if
// they are Doc comments, or they can be easily walked explicitly.
case *ast.Package:
// collect and sort names for reproducible behavior
var names []string
for name := range n.Files {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
a.apply(n, name, nil, n.Files[name])
}
default:
if typeparams.IsListExpr(n) {
a.applyList(n, "ElemList")
} else {
panic(fmt.Sprintf("Apply: unexpected node type %T", n))
}
}
if a.post != nil && !a.post(&a.cursor) {
panic(abort)
}
a.cursor = saved
}
// An iterator controls iteration over a slice of nodes.
type iterator struct {
index, step int
}
func (a *application) applyList(parent ast.Node, name string) {
// avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead
saved := a.iter
a.iter.index = 0
for {
// must reload parent.name each time, since cursor modifications might change it
v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name)
if a.iter.index >= v.Len() {
break
}
// element x may be nil in a bad AST - be cautious
var x ast.Node
if e := v.Index(a.iter.index); e.IsValid() {
x = e.Interface().(ast.Node)
}
a.iter.step = 1
a.apply(parent, name, &a.iter, x)
a.iter.index += a.iter.step
}
a.iter = saved
}

View file

@ -1,18 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package astutil
import "go/ast"
// Unparen returns e with any enclosing parentheses stripped.
func Unparen(e ast.Expr) ast.Expr {
for {
p, ok := e.(*ast.ParenExpr)
if !ok {
return e
}
e = p.X
}
}

View file

@ -1,133 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gcexportdata provides functions for locating, reading, and
// writing export data files containing type information produced by the
// gc compiler. This package supports go1.7 export data format and all
// later versions.
//
// Although it might seem convenient for this package to live alongside
// go/types in the standard library, this would cause version skew
// problems for developer tools that use it, since they must be able to
// consume the outputs of the gc compiler both before and after a Go
// update such as from Go 1.7 to Go 1.8. Because this package lives in
// golang.org/x/tools, sites can update their version of this repo some
// time before the Go 1.8 release and rebuild and redeploy their
// developer tools, which will then be able to consume both Go 1.7 and
// Go 1.8 export data files, so they will work before and after the
// Go update. (See discussion at https://golang.org/issue/15651.)
//
package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
import (
"bufio"
"bytes"
"fmt"
"go/token"
"go/types"
"io"
"io/ioutil"
"golang.org/x/tools/go/internal/gcimporter"
)
// Find returns the name of an object (.o) or archive (.a) file
// containing type information for the specified import path,
// using the workspace layout conventions of go/build.
// If no file was found, an empty filename is returned.
//
// A relative srcDir is interpreted relative to the current working directory.
//
// Find also returns the package's resolved (canonical) import path,
// reflecting the effects of srcDir and vendoring on importPath.
func Find(importPath, srcDir string) (filename, path string) {
return gcimporter.FindPkg(importPath, srcDir)
}
// NewReader returns a reader for the export data section of an object
// (.o) or archive (.a) file read from r. The new reader may provide
// additional trailing data beyond the end of the export data.
func NewReader(r io.Reader) (io.Reader, error) {
buf := bufio.NewReader(r)
_, err := gcimporter.FindExportData(buf)
// If we ever switch to a zip-like archive format with the ToC
// at the end, we can return the correct portion of export data,
// but for now we must return the entire rest of the file.
return buf, err
}
// Read reads export data from in, decodes it, and returns type
// information for the package.
// The package name is specified by path.
// File position information is added to fset.
//
// Read may inspect and add to the imports map to ensure that references
// within the export data to other packages are consistent. The caller
// must ensure that imports[path] does not exist, or exists but is
// incomplete (see types.Package.Complete), and Read inserts the
// resulting package into this map entry.
//
// On return, the state of the reader is undefined.
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
data, err := ioutil.ReadAll(in)
if err != nil {
return nil, fmt.Errorf("reading export data for %q: %v", path, err)
}
if bytes.HasPrefix(data, []byte("!<arch>")) {
return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
}
// The App Engine Go runtime v1.6 uses the old export data format.
// TODO(adonovan): delete once v1.7 has been around for a while.
if bytes.HasPrefix(data, []byte("package ")) {
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
}
// The indexed export format starts with an 'i'; the older
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
if len(data) > 0 && data[0] == 'i' {
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
return pkg, err
}
_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
return pkg, err
}
// Write writes encoded type information for the specified package to out.
// The FileSet provides file position information for named objects.
func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
if _, err := io.WriteString(out, "i"); err != nil {
return err
}
return gcimporter.IExportData(out, fset, pkg)
}
// ReadBundle reads an export bundle from in, decodes it, and returns type
// information for the packages.
// File position information is added to fset.
//
// ReadBundle may inspect and add to the imports map to ensure that references
// within the export bundle to other packages are consistent.
//
// On return, the state of the reader is undefined.
//
// Experimental: This API is experimental and may change in the future.
func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) {
data, err := ioutil.ReadAll(in)
if err != nil {
return nil, fmt.Errorf("reading export bundle: %v", err)
}
return gcimporter.IImportBundle(fset, imports, data)
}
// WriteBundle writes encoded type information for the specified packages to out.
// The FileSet provides file position information for named objects.
//
// Experimental: This API is experimental and may change in the future.
func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
return gcimporter.IExportBundle(out, fset, pkgs)
}

View file

@ -1,73 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gcexportdata
import (
"fmt"
"go/token"
"go/types"
"os"
)
// NewImporter returns a new instance of the types.Importer interface
// that reads type information from export data files written by gc.
// The Importer also satisfies types.ImporterFrom.
//
// Export data files are located using "go build" workspace conventions
// and the build.Default context.
//
// Use this importer instead of go/importer.For("gc", ...) to avoid the
// version-skew problems described in the documentation of this package,
// or to control the FileSet or access the imports map populated during
// package loading.
//
func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
return importer{fset, imports}
}
type importer struct {
fset *token.FileSet
imports map[string]*types.Package
}
func (imp importer) Import(importPath string) (*types.Package, error) {
return imp.ImportFrom(importPath, "", 0)
}
func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) {
filename, path := Find(importPath, srcDir)
if filename == "" {
if importPath == "unsafe" {
// Even for unsafe, call Find first in case
// the package was vendored.
return types.Unsafe, nil
}
return nil, fmt.Errorf("can't find import: %s", importPath)
}
if pkg, ok := imp.imports[path]; ok && pkg.Complete() {
return pkg, nil // cache hit
}
// open file
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer func() {
f.Close()
if err != nil {
// add file name to error
err = fmt.Errorf("reading export data: %s: %v", filename, err)
}
}()
r, err := NewReader(f)
if err != nil {
return nil, err
}
return Read(r, imp.fset, imp.imports, path)
}

View file

@ -1,852 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Binary package export.
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
// see that file for specification of the format.
package gcimporter
import (
"bytes"
"encoding/binary"
"fmt"
"go/ast"
"go/constant"
"go/token"
"go/types"
"math"
"math/big"
"sort"
"strings"
)
// If debugFormat is set, each integer and string value is preceded by a marker
// and position information in the encoding. This mechanism permits an importer
// to recognize immediately when it is out of sync. The importer recognizes this
// mode automatically (i.e., it can import export data produced with debugging
// support even if debugFormat is not set at the time of import). This mode will
// lead to massively larger export data (by a factor of 2 to 3) and should only
// be enabled during development and debugging.
//
// NOTE: This flag is the first flag to enable if importing dies because of
// (suspected) format errors, and whenever a change is made to the format.
const debugFormat = false // default: false
// If trace is set, debugging output is printed to std out.
const trace = false // default: false
// Current export format version. Increase with each format change.
// Note: The latest binary (non-indexed) export format is at version 6.
// This exporter is still at level 4, but it doesn't matter since
// the binary importer can handle older versions just fine.
// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE
// 4: type name objects support type aliases, uses aliasTag
// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
// 2: removed unused bool in ODCL export (compiler only)
// 1: header format change (more regular), export package for _ struct fields
// 0: Go1.7 encoding
const exportVersion = 4
// trackAllTypes enables cycle tracking for all types, not just named
// types. The existing compiler invariants assume that unnamed types
// that are not completely set up are not used, or else there are spurious
// errors.
// If disabled, only named types are tracked, possibly leading to slightly
// less efficient encoding in rare cases. It also prevents the export of
// some corner-case type declarations (but those are not handled correctly
// with with the textual export format either).
// TODO(gri) enable and remove once issues caused by it are fixed
const trackAllTypes = false
type exporter struct {
fset *token.FileSet
out bytes.Buffer
// object -> index maps, indexed in order of serialization
strIndex map[string]int
pkgIndex map[*types.Package]int
typIndex map[types.Type]int
// position encoding
posInfoFormat bool
prevFile string
prevLine int
// debugging support
written int // bytes written
indent int // for trace
}
// internalError represents an error generated inside this package.
type internalError string
func (e internalError) Error() string { return "gcimporter: " + string(e) }
func internalErrorf(format string, args ...interface{}) error {
return internalError(fmt.Sprintf(format, args...))
}
// BExportData returns binary export data for pkg.
// If no file set is provided, position info will be missing.
func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
defer func() {
if e := recover(); e != nil {
if ierr, ok := e.(internalError); ok {
err = ierr
return
}
// Not an internal error; panic again.
panic(e)
}
}()
p := exporter{
fset: fset,
strIndex: map[string]int{"": 0}, // empty string is mapped to 0
pkgIndex: make(map[*types.Package]int),
typIndex: make(map[types.Type]int),
posInfoFormat: true, // TODO(gri) might become a flag, eventually
}
// write version info
// The version string must start with "version %d" where %d is the version
// number. Additional debugging information may follow after a blank; that
// text is ignored by the importer.
p.rawStringln(fmt.Sprintf("version %d", exportVersion))
var debug string
if debugFormat {
debug = "debug"
}
p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
p.bool(trackAllTypes)
p.bool(p.posInfoFormat)
// --- generic export data ---
// populate type map with predeclared "known" types
for index, typ := range predeclared() {
p.typIndex[typ] = index
}
if len(p.typIndex) != len(predeclared()) {
return nil, internalError("duplicate entries in type map?")
}
// write package data
p.pkg(pkg, true)
if trace {
p.tracef("\n")
}
// write objects
objcount := 0
scope := pkg.Scope()
for _, name := range scope.Names() {
if !ast.IsExported(name) {
continue
}
if trace {
p.tracef("\n")
}
p.obj(scope.Lookup(name))
objcount++
}
// indicate end of list
if trace {
p.tracef("\n")
}
p.tag(endTag)
// for self-verification only (redundant)
p.int(objcount)
if trace {
p.tracef("\n")
}
// --- end of export data ---
return p.out.Bytes(), nil
}
func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
if pkg == nil {
panic(internalError("unexpected nil pkg"))
}
// if we saw the package before, write its index (>= 0)
if i, ok := p.pkgIndex[pkg]; ok {
p.index('P', i)
return
}
// otherwise, remember the package, write the package tag (< 0) and package data
if trace {
p.tracef("P%d = { ", len(p.pkgIndex))
defer p.tracef("} ")
}
p.pkgIndex[pkg] = len(p.pkgIndex)
p.tag(packageTag)
p.string(pkg.Name())
if emptypath {
p.string("")
} else {
p.string(pkg.Path())
}
}
func (p *exporter) obj(obj types.Object) {
switch obj := obj.(type) {
case *types.Const:
p.tag(constTag)
p.pos(obj)
p.qualifiedName(obj)
p.typ(obj.Type())
p.value(obj.Val())
case *types.TypeName:
if obj.IsAlias() {
p.tag(aliasTag)
p.pos(obj)
p.qualifiedName(obj)
} else {
p.tag(typeTag)
}
p.typ(obj.Type())
case *types.Var:
p.tag(varTag)
p.pos(obj)
p.qualifiedName(obj)
p.typ(obj.Type())
case *types.Func:
p.tag(funcTag)
p.pos(obj)
p.qualifiedName(obj)
sig := obj.Type().(*types.Signature)
p.paramList(sig.Params(), sig.Variadic())
p.paramList(sig.Results(), false)
default:
panic(internalErrorf("unexpected object %v (%T)", obj, obj))
}
}
func (p *exporter) pos(obj types.Object) {
if !p.posInfoFormat {
return
}
file, line := p.fileLine(obj)
if file == p.prevFile {
// common case: write line delta
// delta == 0 means different file or no line change
delta := line - p.prevLine
p.int(delta)
if delta == 0 {
p.int(-1) // -1 means no file change
}
} else {
// different file
p.int(0)
// Encode filename as length of common prefix with previous
// filename, followed by (possibly empty) suffix. Filenames
// frequently share path prefixes, so this can save a lot
// of space and make export data size less dependent on file
// path length. The suffix is unlikely to be empty because
// file names tend to end in ".go".
n := commonPrefixLen(p.prevFile, file)
p.int(n) // n >= 0
p.string(file[n:]) // write suffix only
p.prevFile = file
p.int(line)
}
p.prevLine = line
}
func (p *exporter) fileLine(obj types.Object) (file string, line int) {
if p.fset != nil {
pos := p.fset.Position(obj.Pos())
file = pos.Filename
line = pos.Line
}
return
}
func commonPrefixLen(a, b string) int {
if len(a) > len(b) {
a, b = b, a
}
// len(a) <= len(b)
i := 0
for i < len(a) && a[i] == b[i] {
i++
}
return i
}
func (p *exporter) qualifiedName(obj types.Object) {
p.string(obj.Name())
p.pkg(obj.Pkg(), false)
}
func (p *exporter) typ(t types.Type) {
if t == nil {
panic(internalError("nil type"))
}
// Possible optimization: Anonymous pointer types *T where
// T is a named type are common. We could canonicalize all
// such types *T to a single type PT = *T. This would lead
// to at most one *T entry in typIndex, and all future *T's
// would be encoded as the respective index directly. Would
// save 1 byte (pointerTag) per *T and reduce the typIndex
// size (at the cost of a canonicalization map). We can do
// this later, without encoding format change.
// if we saw the type before, write its index (>= 0)
if i, ok := p.typIndex[t]; ok {
p.index('T', i)
return
}
// otherwise, remember the type, write the type tag (< 0) and type data
if trackAllTypes {
if trace {
p.tracef("T%d = {>\n", len(p.typIndex))
defer p.tracef("<\n} ")
}
p.typIndex[t] = len(p.typIndex)
}
switch t := t.(type) {
case *types.Named:
if !trackAllTypes {
// if we don't track all types, track named types now
p.typIndex[t] = len(p.typIndex)
}
p.tag(namedTag)
p.pos(t.Obj())
p.qualifiedName(t.Obj())
p.typ(t.Underlying())
if !types.IsInterface(t) {
p.assocMethods(t)
}
case *types.Array:
p.tag(arrayTag)
p.int64(t.Len())
p.typ(t.Elem())
case *types.Slice:
p.tag(sliceTag)
p.typ(t.Elem())
case *dddSlice:
p.tag(dddTag)
p.typ(t.elem)
case *types.Struct:
p.tag(structTag)
p.fieldList(t)
case *types.Pointer:
p.tag(pointerTag)
p.typ(t.Elem())
case *types.Signature:
p.tag(signatureTag)
p.paramList(t.Params(), t.Variadic())
p.paramList(t.Results(), false)
case *types.Interface:
p.tag(interfaceTag)
p.iface(t)
case *types.Map:
p.tag(mapTag)
p.typ(t.Key())
p.typ(t.Elem())
case *types.Chan:
p.tag(chanTag)
p.int(int(3 - t.Dir())) // hack
p.typ(t.Elem())
default:
panic(internalErrorf("unexpected type %T: %s", t, t))
}
}
func (p *exporter) assocMethods(named *types.Named) {
// Sort methods (for determinism).
var methods []*types.Func
for i := 0; i < named.NumMethods(); i++ {
methods = append(methods, named.Method(i))
}
sort.Sort(methodsByName(methods))
p.int(len(methods))
if trace && methods != nil {
p.tracef("associated methods {>\n")
}
for i, m := range methods {
if trace && i > 0 {
p.tracef("\n")
}
p.pos(m)
name := m.Name()
p.string(name)
if !exported(name) {
p.pkg(m.Pkg(), false)
}
sig := m.Type().(*types.Signature)
p.paramList(types.NewTuple(sig.Recv()), false)
p.paramList(sig.Params(), sig.Variadic())
p.paramList(sig.Results(), false)
p.int(0) // dummy value for go:nointerface pragma - ignored by importer
}
if trace && methods != nil {
p.tracef("<\n} ")
}
}
type methodsByName []*types.Func
func (x methodsByName) Len() int { return len(x) }
func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
func (p *exporter) fieldList(t *types.Struct) {
if trace && t.NumFields() > 0 {
p.tracef("fields {>\n")
defer p.tracef("<\n} ")
}
p.int(t.NumFields())
for i := 0; i < t.NumFields(); i++ {
if trace && i > 0 {
p.tracef("\n")
}
p.field(t.Field(i))
p.string(t.Tag(i))
}
}
func (p *exporter) field(f *types.Var) {
if !f.IsField() {
panic(internalError("field expected"))
}
p.pos(f)
p.fieldName(f)
p.typ(f.Type())
}
func (p *exporter) iface(t *types.Interface) {
// TODO(gri): enable importer to load embedded interfaces,
// then emit Embeddeds and ExplicitMethods separately here.
p.int(0)
n := t.NumMethods()
if trace && n > 0 {
p.tracef("methods {>\n")
defer p.tracef("<\n} ")
}
p.int(n)
for i := 0; i < n; i++ {
if trace && i > 0 {
p.tracef("\n")
}
p.method(t.Method(i))
}
}
func (p *exporter) method(m *types.Func) {
sig := m.Type().(*types.Signature)
if sig.Recv() == nil {
panic(internalError("method expected"))
}
p.pos(m)
p.string(m.Name())
if m.Name() != "_" && !ast.IsExported(m.Name()) {
p.pkg(m.Pkg(), false)
}
// interface method; no need to encode receiver.
p.paramList(sig.Params(), sig.Variadic())
p.paramList(sig.Results(), false)
}
func (p *exporter) fieldName(f *types.Var) {
name := f.Name()
if f.Anonymous() {
// anonymous field - we distinguish between 3 cases:
// 1) field name matches base type name and is exported
// 2) field name matches base type name and is not exported
// 3) field name doesn't match base type name (alias name)
bname := basetypeName(f.Type())
if name == bname {
if ast.IsExported(name) {
name = "" // 1) we don't need to know the field name or package
} else {
name = "?" // 2) use unexported name "?" to force package export
}
} else {
// 3) indicate alias and export name as is
// (this requires an extra "@" but this is a rare case)
p.string("@")
}
}
p.string(name)
if name != "" && !ast.IsExported(name) {
p.pkg(f.Pkg(), false)
}
}
func basetypeName(typ types.Type) string {
switch typ := deref(typ).(type) {
case *types.Basic:
return typ.Name()
case *types.Named:
return typ.Obj().Name()
default:
return "" // unnamed type
}
}
func (p *exporter) paramList(params *types.Tuple, variadic bool) {
// use negative length to indicate unnamed parameters
// (look at the first parameter only since either all
// names are present or all are absent)
n := params.Len()
if n > 0 && params.At(0).Name() == "" {
n = -n
}
p.int(n)
for i := 0; i < params.Len(); i++ {
q := params.At(i)
t := q.Type()
if variadic && i == params.Len()-1 {
t = &dddSlice{t.(*types.Slice).Elem()}
}
p.typ(t)
if n > 0 {
name := q.Name()
p.string(name)
if name != "_" {
p.pkg(q.Pkg(), false)
}
}
p.string("") // no compiler-specific info
}
}
func (p *exporter) value(x constant.Value) {
if trace {
p.tracef("= ")
}
switch x.Kind() {
case constant.Bool:
tag := falseTag
if constant.BoolVal(x) {
tag = trueTag
}
p.tag(tag)
case constant.Int:
if v, exact := constant.Int64Val(x); exact {
// common case: x fits into an int64 - use compact encoding
p.tag(int64Tag)
p.int64(v)
return
}
// uncommon case: large x - use float encoding
// (powers of 2 will be encoded efficiently with exponent)
p.tag(floatTag)
p.float(constant.ToFloat(x))
case constant.Float:
p.tag(floatTag)
p.float(x)
case constant.Complex:
p.tag(complexTag)
p.float(constant.Real(x))
p.float(constant.Imag(x))
case constant.String:
p.tag(stringTag)
p.string(constant.StringVal(x))
case constant.Unknown:
// package contains type errors
p.tag(unknownTag)
default:
panic(internalErrorf("unexpected value %v (%T)", x, x))
}
}
func (p *exporter) float(x constant.Value) {
if x.Kind() != constant.Float {
panic(internalErrorf("unexpected constant %v, want float", x))
}
// extract sign (there is no -0)
sign := constant.Sign(x)
if sign == 0 {
// x == 0
p.int(0)
return
}
// x != 0
var f big.Float
if v, exact := constant.Float64Val(x); exact {
// float64
f.SetFloat64(v)
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
// TODO(gri): add big.Rat accessor to constant.Value.
r := valueToRat(num)
f.SetRat(r.Quo(r, valueToRat(denom)))
} else {
// Value too large to represent as a fraction => inaccessible.
// TODO(gri): add big.Float accessor to constant.Value.
f.SetFloat64(math.MaxFloat64) // FIXME
}
// extract exponent such that 0.5 <= m < 1.0
var m big.Float
exp := f.MantExp(&m)
// extract mantissa as *big.Int
// - set exponent large enough so mant satisfies mant.IsInt()
// - get *big.Int from mant
m.SetMantExp(&m, int(m.MinPrec()))
mant, acc := m.Int(nil)
if acc != big.Exact {
panic(internalError("internal error"))
}
p.int(sign)
p.int(exp)
p.string(string(mant.Bytes()))
}
func valueToRat(x constant.Value) *big.Rat {
// Convert little-endian to big-endian.
// I can't believe this is necessary.
bytes := constant.Bytes(x)
for i := 0; i < len(bytes)/2; i++ {
bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
}
return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
}
func (p *exporter) bool(b bool) bool {
if trace {
p.tracef("[")
defer p.tracef("= %v] ", b)
}
x := 0
if b {
x = 1
}
p.int(x)
return b
}
// ----------------------------------------------------------------------------
// Low-level encoders
func (p *exporter) index(marker byte, index int) {
if index < 0 {
panic(internalError("invalid index < 0"))
}
if debugFormat {
p.marker('t')
}
if trace {
p.tracef("%c%d ", marker, index)
}
p.rawInt64(int64(index))
}
func (p *exporter) tag(tag int) {
if tag >= 0 {
panic(internalError("invalid tag >= 0"))
}
if debugFormat {
p.marker('t')
}
if trace {
p.tracef("%s ", tagString[-tag])
}
p.rawInt64(int64(tag))
}
func (p *exporter) int(x int) {
p.int64(int64(x))
}
func (p *exporter) int64(x int64) {
if debugFormat {
p.marker('i')
}
if trace {
p.tracef("%d ", x)
}
p.rawInt64(x)
}
func (p *exporter) string(s string) {
if debugFormat {
p.marker('s')
}
if trace {
p.tracef("%q ", s)
}
// if we saw the string before, write its index (>= 0)
// (the empty string is mapped to 0)
if i, ok := p.strIndex[s]; ok {
p.rawInt64(int64(i))
return
}
// otherwise, remember string and write its negative length and bytes
p.strIndex[s] = len(p.strIndex)
p.rawInt64(-int64(len(s)))
for i := 0; i < len(s); i++ {
p.rawByte(s[i])
}
}
// marker emits a marker byte and position information which makes
// it easy for a reader to detect if it is "out of sync". Used for
// debugFormat format only.
func (p *exporter) marker(m byte) {
p.rawByte(m)
// Enable this for help tracking down the location
// of an incorrect marker when running in debugFormat.
if false && trace {
p.tracef("#%d ", p.written)
}
p.rawInt64(int64(p.written))
}
// rawInt64 should only be used by low-level encoders.
func (p *exporter) rawInt64(x int64) {
var tmp [binary.MaxVarintLen64]byte
n := binary.PutVarint(tmp[:], x)
for i := 0; i < n; i++ {
p.rawByte(tmp[i])
}
}
// rawStringln should only be used to emit the initial version string.
func (p *exporter) rawStringln(s string) {
for i := 0; i < len(s); i++ {
p.rawByte(s[i])
}
p.rawByte('\n')
}
// rawByte is the bottleneck interface to write to p.out.
// rawByte escapes b as follows (any encoding does that
// hides '$'):
//
// '$' => '|' 'S'
// '|' => '|' '|'
//
// Necessary so other tools can find the end of the
// export data by searching for "$$".
// rawByte should only be used by low-level encoders.
func (p *exporter) rawByte(b byte) {
switch b {
case '$':
// write '$' as '|' 'S'
b = 'S'
fallthrough
case '|':
// write '|' as '|' '|'
p.out.WriteByte('|')
p.written++
}
p.out.WriteByte(b)
p.written++
}
// tracef is like fmt.Printf but it rewrites the format string
// to take care of indentation.
func (p *exporter) tracef(format string, args ...interface{}) {
if strings.ContainsAny(format, "<>\n") {
var buf bytes.Buffer
for i := 0; i < len(format); i++ {
// no need to deal with runes
ch := format[i]
switch ch {
case '>':
p.indent++
continue
case '<':
p.indent--
continue
}
buf.WriteByte(ch)
if ch == '\n' {
for j := p.indent; j > 0; j-- {
buf.WriteString(". ")
}
}
}
format = buf.String()
}
fmt.Printf(format, args...)
}
// Debugging support.
// (tagString is only used when tracing is enabled)
var tagString = [...]string{
// Packages
-packageTag: "package",
// Types
-namedTag: "named type",
-arrayTag: "array",
-sliceTag: "slice",
-dddTag: "ddd",
-structTag: "struct",
-pointerTag: "pointer",
-signatureTag: "signature",
-interfaceTag: "interface",
-mapTag: "map",
-chanTag: "chan",
// Values
-falseTag: "false",
-trueTag: "true",
-int64Tag: "int64",
-floatTag: "float",
-fractionTag: "fraction",
-complexTag: "complex",
-stringTag: "string",
-unknownTag: "unknown",
// Type aliases
-aliasTag: "alias",
}

File diff suppressed because it is too large Load diff

View file

@ -1,93 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
// This file implements FindExportData.
package gcimporter
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
)
func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
// See $GOROOT/include/ar.h.
hdr := make([]byte, 16+12+6+6+8+10+2)
_, err = io.ReadFull(r, hdr)
if err != nil {
return
}
// leave for debugging
if false {
fmt.Printf("header: %s", hdr)
}
s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
size, err = strconv.Atoi(s)
if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
err = fmt.Errorf("invalid archive header")
return
}
name = strings.TrimSpace(string(hdr[:16]))
return
}
// FindExportData positions the reader r at the beginning of the
// export data section of an underlying GC-created object/archive
// file by reading from it. The reader must be positioned at the
// start of the file before calling this function. The hdr result
// is the string before the export data, either "$$" or "$$B".
//
func FindExportData(r *bufio.Reader) (hdr string, err error) {
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
if string(line) == "!<arch>\n" {
// Archive file. Scan to __.PKGDEF.
var name string
if name, _, err = readGopackHeader(r); err != nil {
return
}
// First entry should be __.PKGDEF.
if name != "__.PKGDEF" {
err = fmt.Errorf("go archive is missing __.PKGDEF")
return
}
// Read first line of __.PKGDEF data, so that line
// is once again the first line of the input.
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
}
// Now at __.PKGDEF in archive or still at beginning of file.
// Either way, line should begin with "go object ".
if !strings.HasPrefix(string(line), "go object ") {
err = fmt.Errorf("not a Go object file")
return
}
// Skip over object header to export data.
// Begins after first line starting with $$.
for line[0] != '$' {
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
}
hdr = string(line)
return
}

File diff suppressed because it is too large Load diff

View file

@ -1,781 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Indexed binary package export.
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
// see that file for specification of the format.
package gcimporter
import (
"bytes"
"encoding/binary"
"go/ast"
"go/constant"
"go/token"
"go/types"
"io"
"math/big"
"reflect"
"sort"
)
// Current indexed export format version. Increase with each format change.
// 0: Go1.11 encoding
const iexportVersion = 0
// Current bundled export format version. Increase with each format change.
// 0: initial implementation
const bundleVersion = 0
// IExportData writes indexed export data for pkg to out.
//
// If no file set is provided, position info will be missing.
// The package path of the top-level package will not be recorded,
// so that calls to IImportData can override with a provided package path.
func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
return iexportCommon(out, fset, false, []*types.Package{pkg})
}
// IExportBundle writes an indexed export bundle for pkgs to out.
func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
return iexportCommon(out, fset, true, pkgs)
}
func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, pkgs []*types.Package) (err error) {
defer func() {
if e := recover(); e != nil {
if ierr, ok := e.(internalError); ok {
err = ierr
return
}
// Not an internal error; panic again.
panic(e)
}
}()
p := iexporter{
fset: fset,
allPkgs: map[*types.Package]bool{},
stringIndex: map[string]uint64{},
declIndex: map[types.Object]uint64{},
typIndex: map[types.Type]uint64{},
}
if !bundle {
p.localpkg = pkgs[0]
}
for i, pt := range predeclared() {
p.typIndex[pt] = uint64(i)
}
if len(p.typIndex) > predeclReserved {
panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
}
// Initialize work queue with exported declarations.
for _, pkg := range pkgs {
scope := pkg.Scope()
for _, name := range scope.Names() {
if ast.IsExported(name) {
p.pushDecl(scope.Lookup(name))
}
}
if bundle {
// Ensure pkg and its imports are included in the index.
p.allPkgs[pkg] = true
for _, imp := range pkg.Imports() {
p.allPkgs[imp] = true
}
}
}
// Loop until no more work.
for !p.declTodo.empty() {
p.doDecl(p.declTodo.popHead())
}
// Append indices to data0 section.
dataLen := uint64(p.data0.Len())
w := p.newWriter()
w.writeIndex(p.declIndex)
if bundle {
w.uint64(uint64(len(pkgs)))
for _, pkg := range pkgs {
w.pkg(pkg)
imps := pkg.Imports()
w.uint64(uint64(len(imps)))
for _, imp := range imps {
w.pkg(imp)
}
}
}
w.flush()
// Assemble header.
var hdr intWriter
if bundle {
hdr.uint64(bundleVersion)
}
hdr.uint64(iexportVersion)
hdr.uint64(uint64(p.strings.Len()))
hdr.uint64(dataLen)
// Flush output.
io.Copy(out, &hdr)
io.Copy(out, &p.strings)
io.Copy(out, &p.data0)
return nil
}
// writeIndex writes out an object index. mainIndex indicates whether
// we're writing out the main index, which is also read by
// non-compiler tools and includes a complete package description
// (i.e., name and height).
func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
// Build a map from packages to objects from that package.
pkgObjs := map[*types.Package][]types.Object{}
// For the main index, make sure to include every package that
// we reference, even if we're not exporting (or reexporting)
// any symbols from it.
if w.p.localpkg != nil {
pkgObjs[w.p.localpkg] = nil
}
for pkg := range w.p.allPkgs {
pkgObjs[pkg] = nil
}
for obj := range index {
pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj)
}
var pkgs []*types.Package
for pkg, objs := range pkgObjs {
pkgs = append(pkgs, pkg)
sort.Slice(objs, func(i, j int) bool {
return objs[i].Name() < objs[j].Name()
})
}
sort.Slice(pkgs, func(i, j int) bool {
return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])
})
w.uint64(uint64(len(pkgs)))
for _, pkg := range pkgs {
w.string(w.exportPath(pkg))
w.string(pkg.Name())
w.uint64(uint64(0)) // package height is not needed for go/types
objs := pkgObjs[pkg]
w.uint64(uint64(len(objs)))
for _, obj := range objs {
w.string(obj.Name())
w.uint64(index[obj])
}
}
}
type iexporter struct {
fset *token.FileSet
out *bytes.Buffer
localpkg *types.Package
// allPkgs tracks all packages that have been referenced by
// the export data, so we can ensure to include them in the
// main index.
allPkgs map[*types.Package]bool
declTodo objQueue
strings intWriter
stringIndex map[string]uint64
data0 intWriter
declIndex map[types.Object]uint64
typIndex map[types.Type]uint64
}
// stringOff returns the offset of s within the string section.
// If not already present, it's added to the end.
func (p *iexporter) stringOff(s string) uint64 {
off, ok := p.stringIndex[s]
if !ok {
off = uint64(p.strings.Len())
p.stringIndex[s] = off
p.strings.uint64(uint64(len(s)))
p.strings.WriteString(s)
}
return off
}
// pushDecl adds n to the declaration work queue, if not already present.
func (p *iexporter) pushDecl(obj types.Object) {
// Package unsafe is known to the compiler and predeclared.
assert(obj.Pkg() != types.Unsafe)
if _, ok := p.declIndex[obj]; ok {
return
}
p.declIndex[obj] = ^uint64(0) // mark n present in work queue
p.declTodo.pushTail(obj)
}
// exportWriter handles writing out individual data section chunks.
type exportWriter struct {
p *iexporter
data intWriter
currPkg *types.Package
prevFile string
prevLine int64
}
func (w *exportWriter) exportPath(pkg *types.Package) string {
if pkg == w.p.localpkg {
return ""
}
return pkg.Path()
}
func (p *iexporter) doDecl(obj types.Object) {
w := p.newWriter()
w.setPkg(obj.Pkg(), false)
switch obj := obj.(type) {
case *types.Var:
w.tag('V')
w.pos(obj.Pos())
w.typ(obj.Type(), obj.Pkg())
case *types.Func:
sig, _ := obj.Type().(*types.Signature)
if sig.Recv() != nil {
panic(internalErrorf("unexpected method: %v", sig))
}
w.tag('F')
w.pos(obj.Pos())
w.signature(sig)
case *types.Const:
w.tag('C')
w.pos(obj.Pos())
w.value(obj.Type(), obj.Val())
case *types.TypeName:
if obj.IsAlias() {
w.tag('A')
w.pos(obj.Pos())
w.typ(obj.Type(), obj.Pkg())
break
}
// Defined type.
w.tag('T')
w.pos(obj.Pos())
underlying := obj.Type().Underlying()
w.typ(underlying, obj.Pkg())
t := obj.Type()
if types.IsInterface(t) {
break
}
named, ok := t.(*types.Named)
if !ok {
panic(internalErrorf("%s is not a defined type", t))
}
n := named.NumMethods()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
m := named.Method(i)
w.pos(m.Pos())
w.string(m.Name())
sig, _ := m.Type().(*types.Signature)
w.param(sig.Recv())
w.signature(sig)
}
default:
panic(internalErrorf("unexpected object: %v", obj))
}
p.declIndex[obj] = w.flush()
}
func (w *exportWriter) tag(tag byte) {
w.data.WriteByte(tag)
}
func (w *exportWriter) pos(pos token.Pos) {
if w.p.fset == nil {
w.int64(0)
return
}
p := w.p.fset.Position(pos)
file := p.Filename
line := int64(p.Line)
// When file is the same as the last position (common case),
// we can save a few bytes by delta encoding just the line
// number.
//
// Note: Because data objects may be read out of order (or not
// at all), we can only apply delta encoding within a single
// object. This is handled implicitly by tracking prevFile and
// prevLine as fields of exportWriter.
if file == w.prevFile {
delta := line - w.prevLine
w.int64(delta)
if delta == deltaNewFile {
w.int64(-1)
}
} else {
w.int64(deltaNewFile)
w.int64(line) // line >= 0
w.string(file)
w.prevFile = file
}
w.prevLine = line
}
func (w *exportWriter) pkg(pkg *types.Package) {
// Ensure any referenced packages are declared in the main index.
w.p.allPkgs[pkg] = true
w.string(w.exportPath(pkg))
}
func (w *exportWriter) qualifiedIdent(obj types.Object) {
// Ensure any referenced declarations are written out too.
w.p.pushDecl(obj)
w.string(obj.Name())
w.pkg(obj.Pkg())
}
func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
w.data.uint64(w.p.typOff(t, pkg))
}
func (p *iexporter) newWriter() *exportWriter {
return &exportWriter{p: p}
}
func (w *exportWriter) flush() uint64 {
off := uint64(w.p.data0.Len())
io.Copy(&w.p.data0, &w.data)
return off
}
func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
off, ok := p.typIndex[t]
if !ok {
w := p.newWriter()
w.doTyp(t, pkg)
off = predeclReserved + w.flush()
p.typIndex[t] = off
}
return off
}
func (w *exportWriter) startType(k itag) {
w.data.uint64(uint64(k))
}
func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
switch t := t.(type) {
case *types.Named:
w.startType(definedType)
w.qualifiedIdent(t.Obj())
case *types.Pointer:
w.startType(pointerType)
w.typ(t.Elem(), pkg)
case *types.Slice:
w.startType(sliceType)
w.typ(t.Elem(), pkg)
case *types.Array:
w.startType(arrayType)
w.uint64(uint64(t.Len()))
w.typ(t.Elem(), pkg)
case *types.Chan:
w.startType(chanType)
// 1 RecvOnly; 2 SendOnly; 3 SendRecv
var dir uint64
switch t.Dir() {
case types.RecvOnly:
dir = 1
case types.SendOnly:
dir = 2
case types.SendRecv:
dir = 3
}
w.uint64(dir)
w.typ(t.Elem(), pkg)
case *types.Map:
w.startType(mapType)
w.typ(t.Key(), pkg)
w.typ(t.Elem(), pkg)
case *types.Signature:
w.startType(signatureType)
w.setPkg(pkg, true)
w.signature(t)
case *types.Struct:
w.startType(structType)
w.setPkg(pkg, true)
n := t.NumFields()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
f := t.Field(i)
w.pos(f.Pos())
w.string(f.Name())
w.typ(f.Type(), pkg)
w.bool(f.Anonymous())
w.string(t.Tag(i)) // note (or tag)
}
case *types.Interface:
w.startType(interfaceType)
w.setPkg(pkg, true)
n := t.NumEmbeddeds()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
f := t.Embedded(i)
w.pos(f.Obj().Pos())
w.typ(f.Obj().Type(), f.Obj().Pkg())
}
n = t.NumExplicitMethods()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
m := t.ExplicitMethod(i)
w.pos(m.Pos())
w.string(m.Name())
sig, _ := m.Type().(*types.Signature)
w.signature(sig)
}
default:
panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
}
}
func (w *exportWriter) setPkg(pkg *types.Package, write bool) {
if write {
w.pkg(pkg)
}
w.currPkg = pkg
}
func (w *exportWriter) signature(sig *types.Signature) {
w.paramList(sig.Params())
w.paramList(sig.Results())
if sig.Params().Len() > 0 {
w.bool(sig.Variadic())
}
}
func (w *exportWriter) paramList(tup *types.Tuple) {
n := tup.Len()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
w.param(tup.At(i))
}
}
func (w *exportWriter) param(obj types.Object) {
w.pos(obj.Pos())
w.localIdent(obj)
w.typ(obj.Type(), obj.Pkg())
}
func (w *exportWriter) value(typ types.Type, v constant.Value) {
w.typ(typ, nil)
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
case types.IsBoolean:
w.bool(constant.BoolVal(v))
case types.IsInteger:
var i big.Int
if i64, exact := constant.Int64Val(v); exact {
i.SetInt64(i64)
} else if ui64, exact := constant.Uint64Val(v); exact {
i.SetUint64(ui64)
} else {
i.SetString(v.ExactString(), 10)
}
w.mpint(&i, typ)
case types.IsFloat:
f := constantToFloat(v)
w.mpfloat(f, typ)
case types.IsComplex:
w.mpfloat(constantToFloat(constant.Real(v)), typ)
w.mpfloat(constantToFloat(constant.Imag(v)), typ)
case types.IsString:
w.string(constant.StringVal(v))
default:
if b.Kind() == types.Invalid {
// package contains type errors
break
}
panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying()))
}
}
// constantToFloat converts a constant.Value with kind constant.Float to a
// big.Float.
func constantToFloat(x constant.Value) *big.Float {
x = constant.ToFloat(x)
// Use the same floating-point precision (512) as cmd/compile
// (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
const mpprec = 512
var f big.Float
f.SetPrec(mpprec)
if v, exact := constant.Float64Val(x); exact {
// float64
f.SetFloat64(v)
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
// TODO(gri): add big.Rat accessor to constant.Value.
n := valueToRat(num)
d := valueToRat(denom)
f.SetRat(n.Quo(n, d))
} else {
// Value too large to represent as a fraction => inaccessible.
// TODO(gri): add big.Float accessor to constant.Value.
_, ok := f.SetString(x.ExactString())
assert(ok)
}
return &f
}
// mpint exports a multi-precision integer.
//
// For unsigned types, small values are written out as a single
// byte. Larger values are written out as a length-prefixed big-endian
// byte string, where the length prefix is encoded as its complement.
// For example, bytes 0, 1, and 2 directly represent the integer
// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
// 2-, and 3-byte big-endian string follow.
//
// Encoding for signed types use the same general approach as for
// unsigned types, except small values use zig-zag encoding and the
// bottom bit of length prefix byte for large values is reserved as a
// sign bit.
//
// The exact boundary between small and large encodings varies
// according to the maximum number of bytes needed to encode a value
// of type typ. As a special case, 8-bit types are always encoded as a
// single byte.
//
// TODO(mdempsky): Is this level of complexity really worthwhile?
func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
basic, ok := typ.Underlying().(*types.Basic)
if !ok {
panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
}
signed, maxBytes := intSize(basic)
negative := x.Sign() < 0
if !signed && negative {
panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
}
b := x.Bytes()
if len(b) > 0 && b[0] == 0 {
panic(internalErrorf("leading zeros"))
}
if uint(len(b)) > maxBytes {
panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
}
maxSmall := 256 - maxBytes
if signed {
maxSmall = 256 - 2*maxBytes
}
if maxBytes == 1 {
maxSmall = 256
}
// Check if x can use small value encoding.
if len(b) <= 1 {
var ux uint
if len(b) == 1 {
ux = uint(b[0])
}
if signed {
ux <<= 1
if negative {
ux--
}
}
if ux < maxSmall {
w.data.WriteByte(byte(ux))
return
}
}
n := 256 - uint(len(b))
if signed {
n = 256 - 2*uint(len(b))
if negative {
n |= 1
}
}
if n < maxSmall || n >= 256 {
panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
}
w.data.WriteByte(byte(n))
w.data.Write(b)
}
// mpfloat exports a multi-precision floating point number.
//
// The number's value is decomposed into mantissa × 2**exponent, where
// mantissa is an integer. The value is written out as mantissa (as a
// multi-precision integer) and then the exponent, except exponent is
// omitted if mantissa is zero.
func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
if f.IsInf() {
panic("infinite constant")
}
// Break into f = mant × 2**exp, with 0.5 <= mant < 1.
var mant big.Float
exp := int64(f.MantExp(&mant))
// Scale so that mant is an integer.
prec := mant.MinPrec()
mant.SetMantExp(&mant, int(prec))
exp -= int64(prec)
manti, acc := mant.Int(nil)
if acc != big.Exact {
panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
}
w.mpint(manti, typ)
if manti.Sign() != 0 {
w.int64(exp)
}
}
func (w *exportWriter) bool(b bool) bool {
var x uint64
if b {
x = 1
}
w.uint64(x)
return b
}
func (w *exportWriter) int64(x int64) { w.data.int64(x) }
func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
func (w *exportWriter) localIdent(obj types.Object) {
// Anonymous parameters.
if obj == nil {
w.string("")
return
}
name := obj.Name()
if name == "_" {
w.string("_")
return
}
w.string(name)
}
type intWriter struct {
bytes.Buffer
}
func (w *intWriter) int64(x int64) {
var buf [binary.MaxVarintLen64]byte
n := binary.PutVarint(buf[:], x)
w.Write(buf[:n])
}
func (w *intWriter) uint64(x uint64) {
var buf [binary.MaxVarintLen64]byte
n := binary.PutUvarint(buf[:], x)
w.Write(buf[:n])
}
func assert(cond bool) {
if !cond {
panic("internal error: assertion failed")
}
}
// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
// a ready-to-use empty queue.
type objQueue struct {
ring []types.Object
head, tail int
}
// empty returns true if q contains no Nodes.
func (q *objQueue) empty() bool {
return q.head == q.tail
}
// pushTail appends n to the tail of the queue.
func (q *objQueue) pushTail(obj types.Object) {
if len(q.ring) == 0 {
q.ring = make([]types.Object, 16)
} else if q.head+len(q.ring) == q.tail {
// Grow the ring.
nring := make([]types.Object, len(q.ring)*2)
// Copy the old elements.
part := q.ring[q.head%len(q.ring):]
if q.tail-q.head <= len(part) {
part = part[:q.tail-q.head]
copy(nring, part)
} else {
pos := copy(nring, part)
copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
}
q.ring, q.head, q.tail = nring, 0, q.tail-q.head
}
q.ring[q.tail%len(q.ring)] = obj
q.tail++
}
// popHead pops a node from the head of the queue. It panics if q is empty.
func (q *objQueue) popHead() types.Object {
if q.empty() {
panic("dequeue empty")
}
obj := q.ring[q.head%len(q.ring)]
q.head++
return obj
}

View file

@ -1,676 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Indexed package import.
// See cmd/compile/internal/gc/iexport.go for the export data format.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
package gcimporter
import (
"bytes"
"encoding/binary"
"fmt"
"go/constant"
"go/token"
"go/types"
"io"
"sort"
)
type intReader struct {
*bytes.Reader
path string
}
func (r *intReader) int64() int64 {
i, err := binary.ReadVarint(r.Reader)
if err != nil {
errorf("import %q: read varint error: %v", r.path, err)
}
return i
}
func (r *intReader) uint64() uint64 {
i, err := binary.ReadUvarint(r.Reader)
if err != nil {
errorf("import %q: read varint error: %v", r.path, err)
}
return i
}
const predeclReserved = 32
type itag uint64
const (
// Types
definedType itag = iota
pointerType
sliceType
arrayType
chanType
mapType
signatureType
structType
interfaceType
)
// IImportData imports a package from the serialized package data
// and returns 0 and a reference to the package.
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
pkgs, err := iimportCommon(fset, imports, data, false, path)
if err != nil {
return 0, nil, err
}
return 0, pkgs[0], nil
}
// IImportBundle imports a set of packages from the serialized package bundle.
func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
return iimportCommon(fset, imports, data, true, "")
}
func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) {
const currentVersion = 1
version := int64(-1)
defer func() {
if e := recover(); e != nil {
if version > currentVersion {
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
} else {
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
}
}
}()
r := &intReader{bytes.NewReader(data), path}
if bundle {
bundleVersion := r.uint64()
switch bundleVersion {
case bundleVersion:
default:
errorf("unknown bundle format version %d", bundleVersion)
}
}
version = int64(r.uint64())
switch version {
case currentVersion, 0:
default:
errorf("unknown iexport format version %d", version)
}
sLen := int64(r.uint64())
dLen := int64(r.uint64())
whence, _ := r.Seek(0, io.SeekCurrent)
stringData := data[whence : whence+sLen]
declData := data[whence+sLen : whence+sLen+dLen]
r.Seek(sLen+dLen, io.SeekCurrent)
p := iimporter{
ipath: path,
version: int(version),
stringData: stringData,
stringCache: make(map[uint64]string),
pkgCache: make(map[uint64]*types.Package),
declData: declData,
pkgIndex: make(map[*types.Package]map[string]uint64),
typCache: make(map[uint64]types.Type),
fake: fakeFileSet{
fset: fset,
files: make(map[string]*token.File),
},
}
for i, pt := range predeclared() {
p.typCache[uint64(i)] = pt
}
pkgList := make([]*types.Package, r.uint64())
for i := range pkgList {
pkgPathOff := r.uint64()
pkgPath := p.stringAt(pkgPathOff)
pkgName := p.stringAt(r.uint64())
_ = r.uint64() // package height; unused by go/types
if pkgPath == "" {
pkgPath = path
}
pkg := imports[pkgPath]
if pkg == nil {
pkg = types.NewPackage(pkgPath, pkgName)
imports[pkgPath] = pkg
} else if pkg.Name() != pkgName {
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
}
p.pkgCache[pkgPathOff] = pkg
nameIndex := make(map[string]uint64)
for nSyms := r.uint64(); nSyms > 0; nSyms-- {
name := p.stringAt(r.uint64())
nameIndex[name] = r.uint64()
}
p.pkgIndex[pkg] = nameIndex
pkgList[i] = pkg
}
if bundle {
pkgs = make([]*types.Package, r.uint64())
for i := range pkgs {
pkg := p.pkgAt(r.uint64())
imps := make([]*types.Package, r.uint64())
for j := range imps {
imps[j] = p.pkgAt(r.uint64())
}
pkg.SetImports(imps)
pkgs[i] = pkg
}
} else {
if len(pkgList) == 0 {
errorf("no packages found for %s", path)
panic("unreachable")
}
pkgs = pkgList[:1]
// record all referenced packages as imports
list := append(([]*types.Package)(nil), pkgList[1:]...)
sort.Sort(byPath(list))
pkgs[0].SetImports(list)
}
for _, pkg := range pkgs {
if pkg.Complete() {
continue
}
names := make([]string, 0, len(p.pkgIndex[pkg]))
for name := range p.pkgIndex[pkg] {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
p.doDecl(pkg, name)
}
// package was imported completely and without errors
pkg.MarkComplete()
}
for _, typ := range p.interfaceList {
typ.Complete()
}
return pkgs, nil
}
type iimporter struct {
ipath string
version int
stringData []byte
stringCache map[uint64]string
pkgCache map[uint64]*types.Package
declData []byte
pkgIndex map[*types.Package]map[string]uint64
typCache map[uint64]types.Type
fake fakeFileSet
interfaceList []*types.Interface
}
func (p *iimporter) doDecl(pkg *types.Package, name string) {
// See if we've already imported this declaration.
if obj := pkg.Scope().Lookup(name); obj != nil {
return
}
off, ok := p.pkgIndex[pkg][name]
if !ok {
errorf("%v.%v not in index", pkg, name)
}
r := &importReader{p: p, currPkg: pkg}
r.declReader.Reset(p.declData[off:])
r.obj(name)
}
func (p *iimporter) stringAt(off uint64) string {
if s, ok := p.stringCache[off]; ok {
return s
}
slen, n := binary.Uvarint(p.stringData[off:])
if n <= 0 {
errorf("varint failed")
}
spos := off + uint64(n)
s := string(p.stringData[spos : spos+slen])
p.stringCache[off] = s
return s
}
func (p *iimporter) pkgAt(off uint64) *types.Package {
if pkg, ok := p.pkgCache[off]; ok {
return pkg
}
path := p.stringAt(off)
errorf("missing package %q in %q", path, p.ipath)
return nil
}
func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
return t
}
if off < predeclReserved {
errorf("predeclared type missing from cache: %v", off)
}
r := &importReader{p: p}
r.declReader.Reset(p.declData[off-predeclReserved:])
t := r.doType(base)
if base == nil || !isInterface(t) {
p.typCache[off] = t
}
return t
}
type importReader struct {
p *iimporter
declReader bytes.Reader
currPkg *types.Package
prevFile string
prevLine int64
prevColumn int64
}
func (r *importReader) obj(name string) {
tag := r.byte()
pos := r.pos()
switch tag {
case 'A':
typ := r.typ()
r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
case 'C':
typ, val := r.value()
r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
case 'F':
sig := r.signature(nil)
r.declare(types.NewFunc(pos, r.currPkg, name, sig))
case 'T':
// Types can be recursive. We need to setup a stub
// declaration before recursing.
obj := types.NewTypeName(pos, r.currPkg, name, nil)
named := types.NewNamed(obj, nil, nil)
r.declare(obj)
underlying := r.p.typAt(r.uint64(), named).Underlying()
named.SetUnderlying(underlying)
if !isInterface(underlying) {
for n := r.uint64(); n > 0; n-- {
mpos := r.pos()
mname := r.ident()
recv := r.param()
msig := r.signature(recv)
named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
}
}
case 'V':
typ := r.typ()
r.declare(types.NewVar(pos, r.currPkg, name, typ))
default:
errorf("unexpected tag: %v", tag)
}
}
func (r *importReader) declare(obj types.Object) {
obj.Pkg().Scope().Insert(obj)
}
func (r *importReader) value() (typ types.Type, val constant.Value) {
typ = r.typ()
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
case types.IsBoolean:
val = constant.MakeBool(r.bool())
case types.IsString:
val = constant.MakeString(r.string())
case types.IsInteger:
val = r.mpint(b)
case types.IsFloat:
val = r.mpfloat(b)
case types.IsComplex:
re := r.mpfloat(b)
im := r.mpfloat(b)
val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
default:
if b.Kind() == types.Invalid {
val = constant.MakeUnknown()
return
}
errorf("unexpected type %v", typ) // panics
panic("unreachable")
}
return
}
func intSize(b *types.Basic) (signed bool, maxBytes uint) {
if (b.Info() & types.IsUntyped) != 0 {
return true, 64
}
switch b.Kind() {
case types.Float32, types.Complex64:
return true, 3
case types.Float64, types.Complex128:
return true, 7
}
signed = (b.Info() & types.IsUnsigned) == 0
switch b.Kind() {
case types.Int8, types.Uint8:
maxBytes = 1
case types.Int16, types.Uint16:
maxBytes = 2
case types.Int32, types.Uint32:
maxBytes = 4
default:
maxBytes = 8
}
return
}
func (r *importReader) mpint(b *types.Basic) constant.Value {
signed, maxBytes := intSize(b)
maxSmall := 256 - maxBytes
if signed {
maxSmall = 256 - 2*maxBytes
}
if maxBytes == 1 {
maxSmall = 256
}
n, _ := r.declReader.ReadByte()
if uint(n) < maxSmall {
v := int64(n)
if signed {
v >>= 1
if n&1 != 0 {
v = ^v
}
}
return constant.MakeInt64(v)
}
v := -n
if signed {
v = -(n &^ 1) >> 1
}
if v < 1 || uint(v) > maxBytes {
errorf("weird decoding: %v, %v => %v", n, signed, v)
}
buf := make([]byte, v)
io.ReadFull(&r.declReader, buf)
// convert to little endian
// TODO(gri) go/constant should have a more direct conversion function
// (e.g., once it supports a big.Float based implementation)
for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
buf[i], buf[j] = buf[j], buf[i]
}
x := constant.MakeFromBytes(buf)
if signed && n&1 != 0 {
x = constant.UnaryOp(token.SUB, x, 0)
}
return x
}
func (r *importReader) mpfloat(b *types.Basic) constant.Value {
x := r.mpint(b)
if constant.Sign(x) == 0 {
return x
}
exp := r.int64()
switch {
case exp > 0:
x = constant.Shift(x, token.SHL, uint(exp))
// Ensure that the imported Kind is Float, else this constant may run into
// bitsize limits on overlarge integers. Eventually we can instead adopt
// the approach of CL 288632, but that CL relies on go/constant APIs that
// were introduced in go1.13.
//
// TODO(rFindley): sync the logic here with tip Go once we no longer
// support go1.12.
x = constant.ToFloat(x)
case exp < 0:
d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
x = constant.BinaryOp(x, token.QUO, d)
}
return x
}
func (r *importReader) ident() string {
return r.string()
}
func (r *importReader) qualifiedIdent() (*types.Package, string) {
name := r.string()
pkg := r.pkg()
return pkg, name
}
func (r *importReader) pos() token.Pos {
if r.p.version >= 1 {
r.posv1()
} else {
r.posv0()
}
if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
return token.NoPos
}
return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
}
func (r *importReader) posv0() {
delta := r.int64()
if delta != deltaNewFile {
r.prevLine += delta
} else if l := r.int64(); l == -1 {
r.prevLine += deltaNewFile
} else {
r.prevFile = r.string()
r.prevLine = l
}
}
func (r *importReader) posv1() {
delta := r.int64()
r.prevColumn += delta >> 1
if delta&1 != 0 {
delta = r.int64()
r.prevLine += delta >> 1
if delta&1 != 0 {
r.prevFile = r.string()
}
}
}
func (r *importReader) typ() types.Type {
return r.p.typAt(r.uint64(), nil)
}
func isInterface(t types.Type) bool {
_, ok := t.(*types.Interface)
return ok
}
func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
func (r *importReader) doType(base *types.Named) types.Type {
switch k := r.kind(); k {
default:
errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
return nil
case definedType:
pkg, name := r.qualifiedIdent()
r.p.doDecl(pkg, name)
return pkg.Scope().Lookup(name).(*types.TypeName).Type()
case pointerType:
return types.NewPointer(r.typ())
case sliceType:
return types.NewSlice(r.typ())
case arrayType:
n := r.uint64()
return types.NewArray(r.typ(), int64(n))
case chanType:
dir := chanDir(int(r.uint64()))
return types.NewChan(dir, r.typ())
case mapType:
return types.NewMap(r.typ(), r.typ())
case signatureType:
r.currPkg = r.pkg()
return r.signature(nil)
case structType:
r.currPkg = r.pkg()
fields := make([]*types.Var, r.uint64())
tags := make([]string, len(fields))
for i := range fields {
fpos := r.pos()
fname := r.ident()
ftyp := r.typ()
emb := r.bool()
tag := r.string()
fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
tags[i] = tag
}
return types.NewStruct(fields, tags)
case interfaceType:
r.currPkg = r.pkg()
embeddeds := make([]types.Type, r.uint64())
for i := range embeddeds {
_ = r.pos()
embeddeds[i] = r.typ()
}
methods := make([]*types.Func, r.uint64())
for i := range methods {
mpos := r.pos()
mname := r.ident()
// TODO(mdempsky): Matches bimport.go, but I
// don't agree with this.
var recv *types.Var
if base != nil {
recv = types.NewVar(token.NoPos, r.currPkg, "", base)
}
msig := r.signature(recv)
methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
}
typ := newInterface(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
}
}
func (r *importReader) kind() itag {
return itag(r.uint64())
}
func (r *importReader) signature(recv *types.Var) *types.Signature {
params := r.paramList()
results := r.paramList()
variadic := params.Len() > 0 && r.bool()
return types.NewSignature(recv, params, results, variadic)
}
func (r *importReader) paramList() *types.Tuple {
xs := make([]*types.Var, r.uint64())
for i := range xs {
xs[i] = r.param()
}
return types.NewTuple(xs...)
}
func (r *importReader) param() *types.Var {
pos := r.pos()
name := r.ident()
typ := r.typ()
return types.NewParam(pos, r.currPkg, name, typ)
}
func (r *importReader) bool() bool {
return r.uint64() != 0
}
func (r *importReader) int64() int64 {
n, err := binary.ReadVarint(&r.declReader)
if err != nil {
errorf("readVarint: %v", err)
}
return n
}
func (r *importReader) uint64() uint64 {
n, err := binary.ReadUvarint(&r.declReader)
if err != nil {
errorf("readUvarint: %v", err)
}
return n
}
func (r *importReader) byte() byte {
x, err := r.declReader.ReadByte()
if err != nil {
errorf("declReader.ReadByte: %v", err)
}
return x
}

View file

@ -1,22 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.11
// +build !go1.11
package gcimporter
import "go/types"
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
named := make([]*types.Named, len(embeddeds))
for i, e := range embeddeds {
var ok bool
named[i], ok = e.(*types.Named)
if !ok {
panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
}
}
return types.NewInterface(methods, named)
}

View file

@ -1,14 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.11
// +build go1.11
package gcimporter
import "go/types"
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
return types.NewInterfaceType(methods, embeddeds)
}

View file

@ -1,85 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package core provides support for event based telemetry.
package core
import (
"fmt"
"time"
"golang.org/x/tools/internal/event/label"
)
// Event holds the information about an event of note that occurred.
type Event struct {
at time.Time
// As events are often on the stack, storing the first few labels directly
// in the event can avoid an allocation at all for the very common cases of
// simple events.
// The length needs to be large enough to cope with the majority of events
// but no so large as to cause undue stack pressure.
// A log message with two values will use 3 labels (one for each value and
// one for the message itself).
static [3]label.Label // inline storage for the first few labels
dynamic []label.Label // dynamically sized storage for remaining labels
}
// eventLabelMap implements label.Map for a the labels of an Event.
type eventLabelMap struct {
event Event
}
func (ev Event) At() time.Time { return ev.at }
func (ev Event) Format(f fmt.State, r rune) {
if !ev.at.IsZero() {
fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 "))
}
for index := 0; ev.Valid(index); index++ {
if l := ev.Label(index); l.Valid() {
fmt.Fprintf(f, "\n\t%v", l)
}
}
}
func (ev Event) Valid(index int) bool {
return index >= 0 && index < len(ev.static)+len(ev.dynamic)
}
func (ev Event) Label(index int) label.Label {
if index < len(ev.static) {
return ev.static[index]
}
return ev.dynamic[index-len(ev.static)]
}
func (ev Event) Find(key label.Key) label.Label {
for _, l := range ev.static {
if l.Key() == key {
return l
}
}
for _, l := range ev.dynamic {
if l.Key() == key {
return l
}
}
return label.Label{}
}
func MakeEvent(static [3]label.Label, labels []label.Label) Event {
return Event{
static: static,
dynamic: labels,
}
}
// CloneEvent event returns a copy of the event with the time adjusted to at.
func CloneEvent(ev Event, at time.Time) Event {
ev.at = at
return ev
}

View file

@ -1,70 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package core
import (
"context"
"sync/atomic"
"time"
"unsafe"
"golang.org/x/tools/internal/event/label"
)
// Exporter is a function that handles events.
// It may return a modified context and event.
type Exporter func(context.Context, Event, label.Map) context.Context
var (
exporter unsafe.Pointer
)
// SetExporter sets the global exporter function that handles all events.
// The exporter is called synchronously from the event call site, so it should
// return quickly so as not to hold up user code.
func SetExporter(e Exporter) {
p := unsafe.Pointer(&e)
if e == nil {
// &e is always valid, and so p is always valid, but for the early abort
// of ProcessEvent to be efficient it needs to make the nil check on the
// pointer without having to dereference it, so we make the nil function
// also a nil pointer
p = nil
}
atomic.StorePointer(&exporter, p)
}
// deliver is called to deliver an event to the supplied exporter.
// it will fill in the time.
func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context {
// add the current time to the event
ev.at = time.Now()
// hand the event off to the current exporter
return exporter(ctx, ev, ev)
}
// Export is called to deliver an event to the global exporter if set.
func Export(ctx context.Context, ev Event) context.Context {
// get the global exporter and abort early if there is not one
exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
if exporterPtr == nil {
return ctx
}
return deliver(ctx, *exporterPtr, ev)
}
// ExportPair is called to deliver a start event to the supplied exporter.
// It also returns a function that will deliver the end event to the same
// exporter.
// It will fill in the time.
func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) {
// get the global exporter and abort early if there is not one
exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
if exporterPtr == nil {
return ctx, func() {}
}
ctx = deliver(ctx, *exporterPtr, begin)
return ctx, func() { deliver(ctx, *exporterPtr, end) }
}

View file

@ -1,77 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package core
import (
"context"
"golang.org/x/tools/internal/event/keys"
"golang.org/x/tools/internal/event/label"
)
// Log1 takes a message and one label delivers a log event to the exporter.
// It is a customized version of Print that is faster and does no allocation.
func Log1(ctx context.Context, message string, t1 label.Label) {
Export(ctx, MakeEvent([3]label.Label{
keys.Msg.Of(message),
t1,
}, nil))
}
// Log2 takes a message and two labels and delivers a log event to the exporter.
// It is a customized version of Print that is faster and does no allocation.
func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) {
Export(ctx, MakeEvent([3]label.Label{
keys.Msg.Of(message),
t1,
t2,
}, nil))
}
// Metric1 sends a label event to the exporter with the supplied labels.
func Metric1(ctx context.Context, t1 label.Label) context.Context {
return Export(ctx, MakeEvent([3]label.Label{
keys.Metric.New(),
t1,
}, nil))
}
// Metric2 sends a label event to the exporter with the supplied labels.
func Metric2(ctx context.Context, t1, t2 label.Label) context.Context {
return Export(ctx, MakeEvent([3]label.Label{
keys.Metric.New(),
t1,
t2,
}, nil))
}
// Start1 sends a span start event with the supplied label list to the exporter.
// It also returns a function that will end the span, which should normally be
// deferred.
func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) {
return ExportPair(ctx,
MakeEvent([3]label.Label{
keys.Start.Of(name),
t1,
}, nil),
MakeEvent([3]label.Label{
keys.End.New(),
}, nil))
}
// Start2 sends a span start event with the supplied label list to the exporter.
// It also returns a function that will end the span, which should normally be
// deferred.
func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) {
return ExportPair(ctx,
MakeEvent([3]label.Label{
keys.Start.Of(name),
t1,
t2,
}, nil),
MakeEvent([3]label.Label{
keys.End.New(),
}, nil))
}

View file

@ -1,7 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package event provides a set of packages that cover the main
// concepts of telemetry in an implementation agnostic way.
package event

View file

@ -1,127 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package event
import (
"context"
"golang.org/x/tools/internal/event/core"
"golang.org/x/tools/internal/event/keys"
"golang.org/x/tools/internal/event/label"
)
// Exporter is a function that handles events.
// It may return a modified context and event.
type Exporter func(context.Context, core.Event, label.Map) context.Context
// SetExporter sets the global exporter function that handles all events.
// The exporter is called synchronously from the event call site, so it should
// return quickly so as not to hold up user code.
func SetExporter(e Exporter) {
core.SetExporter(core.Exporter(e))
}
// Log takes a message and a label list and combines them into a single event
// before delivering them to the exporter.
func Log(ctx context.Context, message string, labels ...label.Label) {
core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Msg.Of(message),
}, labels))
}
// IsLog returns true if the event was built by the Log function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsLog(ev core.Event) bool {
return ev.Label(0).Key() == keys.Msg
}
// Error takes a message and a label list and combines them into a single event
// before delivering them to the exporter. It captures the error in the
// delivered event.
func Error(ctx context.Context, message string, err error, labels ...label.Label) {
core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Msg.Of(message),
keys.Err.Of(err),
}, labels))
}
// IsError returns true if the event was built by the Error function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsError(ev core.Event) bool {
return ev.Label(0).Key() == keys.Msg &&
ev.Label(1).Key() == keys.Err
}
// Metric sends a label event to the exporter with the supplied labels.
func Metric(ctx context.Context, labels ...label.Label) {
core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Metric.New(),
}, labels))
}
// IsMetric returns true if the event was built by the Metric function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsMetric(ev core.Event) bool {
return ev.Label(0).Key() == keys.Metric
}
// Label sends a label event to the exporter with the supplied labels.
func Label(ctx context.Context, labels ...label.Label) context.Context {
return core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Label.New(),
}, labels))
}
// IsLabel returns true if the event was built by the Label function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsLabel(ev core.Event) bool {
return ev.Label(0).Key() == keys.Label
}
// Start sends a span start event with the supplied label list to the exporter.
// It also returns a function that will end the span, which should normally be
// deferred.
func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) {
return core.ExportPair(ctx,
core.MakeEvent([3]label.Label{
keys.Start.Of(name),
}, labels),
core.MakeEvent([3]label.Label{
keys.End.New(),
}, nil))
}
// IsStart returns true if the event was built by the Start function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsStart(ev core.Event) bool {
return ev.Label(0).Key() == keys.Start
}
// IsEnd returns true if the event was built by the End function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsEnd(ev core.Event) bool {
return ev.Label(0).Key() == keys.End
}
// Detach returns a context without an associated span.
// This allows the creation of spans that are not children of the current span.
func Detach(ctx context.Context) context.Context {
return core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Detach.New(),
}, nil))
}
// IsDetach returns true if the event was built by the Detach function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsDetach(ev core.Event) bool {
return ev.Label(0).Key() == keys.Detach
}

View file

@ -1,564 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package keys
import (
"fmt"
"io"
"math"
"strconv"
"golang.org/x/tools/internal/event/label"
)
// Value represents a key for untyped values.
type Value struct {
name string
description string
}
// New creates a new Key for untyped values.
func New(name, description string) *Value {
return &Value{name: name, description: description}
}
func (k *Value) Name() string { return k.name }
func (k *Value) Description() string { return k.description }
func (k *Value) Format(w io.Writer, buf []byte, l label.Label) {
fmt.Fprint(w, k.From(l))
}
// Get can be used to get a label for the key from a label.Map.
func (k *Value) Get(lm label.Map) interface{} {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return nil
}
// From can be used to get a value from a Label.
func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() }
// Of creates a new Label with this key and the supplied value.
func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) }
// Tag represents a key for tagging labels that have no value.
// These are used when the existence of the label is the entire information it
// carries, such as marking events to be of a specific kind, or from a specific
// package.
type Tag struct {
name string
description string
}
// NewTag creates a new Key for tagging labels.
func NewTag(name, description string) *Tag {
return &Tag{name: name, description: description}
}
func (k *Tag) Name() string { return k.name }
func (k *Tag) Description() string { return k.description }
func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {}
// New creates a new Label with this key.
func (k *Tag) New() label.Label { return label.OfValue(k, nil) }
// Int represents a key
type Int struct {
name string
description string
}
// NewInt creates a new Key for int values.
func NewInt(name, description string) *Int {
return &Int{name: name, description: description}
}
func (k *Int) Name() string { return k.name }
func (k *Int) Description() string { return k.description }
func (k *Int) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int) Get(lm label.Map) int {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int) From(t label.Label) int { return int(t.Unpack64()) }
// Int8 represents a key
type Int8 struct {
name string
description string
}
// NewInt8 creates a new Key for int8 values.
func NewInt8(name, description string) *Int8 {
return &Int8{name: name, description: description}
}
func (k *Int8) Name() string { return k.name }
func (k *Int8) Description() string { return k.description }
func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int8) Get(lm label.Map) int8 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) }
// Int16 represents a key
type Int16 struct {
name string
description string
}
// NewInt16 creates a new Key for int16 values.
func NewInt16(name, description string) *Int16 {
return &Int16{name: name, description: description}
}
func (k *Int16) Name() string { return k.name }
func (k *Int16) Description() string { return k.description }
func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int16) Get(lm label.Map) int16 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) }
// Int32 represents a key
type Int32 struct {
name string
description string
}
// NewInt32 creates a new Key for int32 values.
func NewInt32(name, description string) *Int32 {
return &Int32{name: name, description: description}
}
func (k *Int32) Name() string { return k.name }
func (k *Int32) Description() string { return k.description }
func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int32) Get(lm label.Map) int32 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) }
// Int64 represents a key
type Int64 struct {
name string
description string
}
// NewInt64 creates a new Key for int64 values.
func NewInt64(name, description string) *Int64 {
return &Int64{name: name, description: description}
}
func (k *Int64) Name() string { return k.name }
func (k *Int64) Description() string { return k.description }
func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, k.From(l), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int64) Get(lm label.Map) int64 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) }
// UInt represents a key
type UInt struct {
name string
description string
}
// NewUInt creates a new Key for uint values.
func NewUInt(name, description string) *UInt {
return &UInt{name: name, description: description}
}
func (k *UInt) Name() string { return k.name }
func (k *UInt) Description() string { return k.description }
func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt) Get(lm label.Map) uint {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) }
// UInt8 represents a key
type UInt8 struct {
name string
description string
}
// NewUInt8 creates a new Key for uint8 values.
func NewUInt8(name, description string) *UInt8 {
return &UInt8{name: name, description: description}
}
func (k *UInt8) Name() string { return k.name }
func (k *UInt8) Description() string { return k.description }
func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt8) Get(lm label.Map) uint8 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) }
// UInt16 represents a key
type UInt16 struct {
name string
description string
}
// NewUInt16 creates a new Key for uint16 values.
func NewUInt16(name, description string) *UInt16 {
return &UInt16{name: name, description: description}
}
func (k *UInt16) Name() string { return k.name }
func (k *UInt16) Description() string { return k.description }
func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt16) Get(lm label.Map) uint16 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) }
// UInt32 represents a key
type UInt32 struct {
name string
description string
}
// NewUInt32 creates a new Key for uint32 values.
func NewUInt32(name, description string) *UInt32 {
return &UInt32{name: name, description: description}
}
func (k *UInt32) Name() string { return k.name }
func (k *UInt32) Description() string { return k.description }
func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt32) Get(lm label.Map) uint32 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) }
// UInt64 represents a key
type UInt64 struct {
name string
description string
}
// NewUInt64 creates a new Key for uint64 values.
func NewUInt64(name, description string) *UInt64 {
return &UInt64{name: name, description: description}
}
func (k *UInt64) Name() string { return k.name }
func (k *UInt64) Description() string { return k.description }
func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, k.From(l), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt64) Get(lm label.Map) uint64 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() }
// Float32 represents a key
type Float32 struct {
name string
description string
}
// NewFloat32 creates a new Key for float32 values.
func NewFloat32(name, description string) *Float32 {
return &Float32{name: name, description: description}
}
func (k *Float32) Name() string { return k.name }
func (k *Float32) Description() string { return k.description }
func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32))
}
// Of creates a new Label with this key and the supplied value.
func (k *Float32) Of(v float32) label.Label {
return label.Of64(k, uint64(math.Float32bits(v)))
}
// Get can be used to get a label for the key from a label.Map.
func (k *Float32) Get(lm label.Map) float32 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Float32) From(t label.Label) float32 {
return math.Float32frombits(uint32(t.Unpack64()))
}
// Float64 represents a key
type Float64 struct {
name string
description string
}
// NewFloat64 creates a new Key for int64 values.
func NewFloat64(name, description string) *Float64 {
return &Float64{name: name, description: description}
}
func (k *Float64) Name() string { return k.name }
func (k *Float64) Description() string { return k.description }
func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64))
}
// Of creates a new Label with this key and the supplied value.
func (k *Float64) Of(v float64) label.Label {
return label.Of64(k, math.Float64bits(v))
}
// Get can be used to get a label for the key from a label.Map.
func (k *Float64) Get(lm label.Map) float64 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Float64) From(t label.Label) float64 {
return math.Float64frombits(t.Unpack64())
}
// String represents a key
type String struct {
name string
description string
}
// NewString creates a new Key for int64 values.
func NewString(name, description string) *String {
return &String{name: name, description: description}
}
func (k *String) Name() string { return k.name }
func (k *String) Description() string { return k.description }
func (k *String) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendQuote(buf, k.From(l)))
}
// Of creates a new Label with this key and the supplied value.
func (k *String) Of(v string) label.Label { return label.OfString(k, v) }
// Get can be used to get a label for the key from a label.Map.
func (k *String) Get(lm label.Map) string {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return ""
}
// From can be used to get a value from a Label.
func (k *String) From(t label.Label) string { return t.UnpackString() }
// Boolean represents a key
type Boolean struct {
name string
description string
}
// NewBoolean creates a new Key for bool values.
func NewBoolean(name, description string) *Boolean {
return &Boolean{name: name, description: description}
}
func (k *Boolean) Name() string { return k.name }
func (k *Boolean) Description() string { return k.description }
func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendBool(buf, k.From(l)))
}
// Of creates a new Label with this key and the supplied value.
func (k *Boolean) Of(v bool) label.Label {
if v {
return label.Of64(k, 1)
}
return label.Of64(k, 0)
}
// Get can be used to get a label for the key from a label.Map.
func (k *Boolean) Get(lm label.Map) bool {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return false
}
// From can be used to get a value from a Label.
func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 }
// Error represents a key
type Error struct {
name string
description string
}
// NewError creates a new Key for int64 values.
func NewError(name, description string) *Error {
return &Error{name: name, description: description}
}
func (k *Error) Name() string { return k.name }
func (k *Error) Description() string { return k.description }
func (k *Error) Format(w io.Writer, buf []byte, l label.Label) {
io.WriteString(w, k.From(l).Error())
}
// Of creates a new Label with this key and the supplied value.
func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) }
// Get can be used to get a label for the key from a label.Map.
func (k *Error) Get(lm label.Map) error {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return nil
}
// From can be used to get a value from a Label.
func (k *Error) From(t label.Label) error {
err, _ := t.UnpackValue().(error)
return err
}

View file

@ -1,22 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package keys
var (
// Msg is a key used to add message strings to label lists.
Msg = NewString("message", "a readable message")
// Label is a key used to indicate an event adds labels to the context.
Label = NewTag("label", "a label context marker")
// Start is used for things like traces that have a name.
Start = NewString("start", "span start")
// Metric is a key used to indicate an event records metrics.
End = NewTag("end", "a span end marker")
// Metric is a key used to indicate an event records metrics.
Detach = NewTag("detach", "a span detach marker")
// Err is a key used to add error values to label lists.
Err = NewError("error", "an error that occurred")
// Metric is a key used to indicate an event records metrics.
Metric = NewTag("metric", "a metric event marker")
)

View file

@ -1,215 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package label
import (
"fmt"
"io"
"reflect"
"unsafe"
)
// Key is used as the identity of a Label.
// Keys are intended to be compared by pointer only, the name should be unique
// for communicating with external systems, but it is not required or enforced.
type Key interface {
// Name returns the key name.
Name() string
// Description returns a string that can be used to describe the value.
Description() string
// Format is used in formatting to append the value of the label to the
// supplied buffer.
// The formatter may use the supplied buf as a scratch area to avoid
// allocations.
Format(w io.Writer, buf []byte, l Label)
}
// Label holds a key and value pair.
// It is normally used when passing around lists of labels.
type Label struct {
key Key
packed uint64
untyped interface{}
}
// Map is the interface to a collection of Labels indexed by key.
type Map interface {
// Find returns the label that matches the supplied key.
Find(key Key) Label
}
// List is the interface to something that provides an iterable
// list of labels.
// Iteration should start from 0 and continue until Valid returns false.
type List interface {
// Valid returns true if the index is within range for the list.
// It does not imply the label at that index will itself be valid.
Valid(index int) bool
// Label returns the label at the given index.
Label(index int) Label
}
// list implements LabelList for a list of Labels.
type list struct {
labels []Label
}
// filter wraps a LabelList filtering out specific labels.
type filter struct {
keys []Key
underlying List
}
// listMap implements LabelMap for a simple list of labels.
type listMap struct {
labels []Label
}
// mapChain implements LabelMap for a list of underlying LabelMap.
type mapChain struct {
maps []Map
}
// OfValue creates a new label from the key and value.
// This method is for implementing new key types, label creation should
// normally be done with the Of method of the key.
func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} }
// UnpackValue assumes the label was built using LabelOfValue and returns the value
// that was passed to that constructor.
// This method is for implementing new key types, for type safety normal
// access should be done with the From method of the key.
func (t Label) UnpackValue() interface{} { return t.untyped }
// Of64 creates a new label from a key and a uint64. This is often
// used for non uint64 values that can be packed into a uint64.
// This method is for implementing new key types, label creation should
// normally be done with the Of method of the key.
func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} }
// Unpack64 assumes the label was built using LabelOf64 and returns the value that
// was passed to that constructor.
// This method is for implementing new key types, for type safety normal
// access should be done with the From method of the key.
func (t Label) Unpack64() uint64 { return t.packed }
type stringptr unsafe.Pointer
// OfString creates a new label from a key and a string.
// This method is for implementing new key types, label creation should
// normally be done with the Of method of the key.
func OfString(k Key, v string) Label {
hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
return Label{
key: k,
packed: uint64(hdr.Len),
untyped: stringptr(hdr.Data),
}
}
// UnpackString assumes the label was built using LabelOfString and returns the
// value that was passed to that constructor.
// This method is for implementing new key types, for type safety normal
// access should be done with the From method of the key.
func (t Label) UnpackString() string {
var v string
hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
hdr.Data = uintptr(t.untyped.(stringptr))
hdr.Len = int(t.packed)
return v
}
// Valid returns true if the Label is a valid one (it has a key).
func (t Label) Valid() bool { return t.key != nil }
// Key returns the key of this Label.
func (t Label) Key() Key { return t.key }
// Format is used for debug printing of labels.
func (t Label) Format(f fmt.State, r rune) {
if !t.Valid() {
io.WriteString(f, `nil`)
return
}
io.WriteString(f, t.Key().Name())
io.WriteString(f, "=")
var buf [128]byte
t.Key().Format(f, buf[:0], t)
}
func (l *list) Valid(index int) bool {
return index >= 0 && index < len(l.labels)
}
func (l *list) Label(index int) Label {
return l.labels[index]
}
func (f *filter) Valid(index int) bool {
return f.underlying.Valid(index)
}
func (f *filter) Label(index int) Label {
l := f.underlying.Label(index)
for _, f := range f.keys {
if l.Key() == f {
return Label{}
}
}
return l
}
func (lm listMap) Find(key Key) Label {
for _, l := range lm.labels {
if l.Key() == key {
return l
}
}
return Label{}
}
func (c mapChain) Find(key Key) Label {
for _, src := range c.maps {
l := src.Find(key)
if l.Valid() {
return l
}
}
return Label{}
}
var emptyList = &list{}
func NewList(labels ...Label) List {
if len(labels) == 0 {
return emptyList
}
return &list{labels: labels}
}
func Filter(l List, keys ...Key) List {
if len(keys) == 0 {
return l
}
return &filter{keys: keys, underlying: l}
}
func NewMap(labels ...Label) Map {
return listMap{labels: labels}
}
func MergeMaps(srcs ...Map) Map {
var nonNil []Map
for _, src := range srcs {
if src != nil {
nonNil = append(nonNil, src)
}
}
if len(nonNil) == 1 {
return nonNil[0]
}
return mapChain{maps: nonNil}
}

View file

@ -1,196 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fastwalk provides a faster version of filepath.Walk for file system
// scanning tools.
package fastwalk
import (
"errors"
"os"
"path/filepath"
"runtime"
"sync"
)
// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the
// symlink named in the call may be traversed.
var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the
// callback should not be called for any other files in the current directory.
// Child directories will still be traversed.
var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory")
// Walk is a faster implementation of filepath.Walk.
//
// filepath.Walk's design necessarily calls os.Lstat on each file,
// even if the caller needs less info.
// Many tools need only the type of each file.
// On some platforms, this information is provided directly by the readdir
// system call, avoiding the need to stat each file individually.
// fastwalk_unix.go contains a fork of the syscall routines.
//
// See golang.org/issue/16399
//
// Walk walks the file tree rooted at root, calling walkFn for
// each file or directory in the tree, including root.
//
// If fastWalk returns filepath.SkipDir, the directory is skipped.
//
// Unlike filepath.Walk:
// * file stat calls must be done by the user.
// The only provided metadata is the file type, which does not include
// any permission bits.
// * multiple goroutines stat the filesystem concurrently. The provided
// walkFn must be safe for concurrent use.
// * fastWalk can follow symlinks if walkFn returns the TraverseLink
// sentinel error. It is the walkFn's responsibility to prevent
// fastWalk from going into symlink cycles.
func Walk(root string, walkFn func(path string, typ os.FileMode) error) error {
// TODO(bradfitz): make numWorkers configurable? We used a
// minimum of 4 to give the kernel more info about multiple
// things we want, in hopes its I/O scheduling can take
// advantage of that. Hopefully most are in cache. Maybe 4 is
// even too low of a minimum. Profile more.
numWorkers := 4
if n := runtime.NumCPU(); n > numWorkers {
numWorkers = n
}
// Make sure to wait for all workers to finish, otherwise
// walkFn could still be called after returning. This Wait call
// runs after close(e.donec) below.
var wg sync.WaitGroup
defer wg.Wait()
w := &walker{
fn: walkFn,
enqueuec: make(chan walkItem, numWorkers), // buffered for performance
workc: make(chan walkItem, numWorkers), // buffered for performance
donec: make(chan struct{}),
// buffered for correctness & not leaking goroutines:
resc: make(chan error, numWorkers),
}
defer close(w.donec)
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go w.doWork(&wg)
}
todo := []walkItem{{dir: root}}
out := 0
for {
workc := w.workc
var workItem walkItem
if len(todo) == 0 {
workc = nil
} else {
workItem = todo[len(todo)-1]
}
select {
case workc <- workItem:
todo = todo[:len(todo)-1]
out++
case it := <-w.enqueuec:
todo = append(todo, it)
case err := <-w.resc:
out--
if err != nil {
return err
}
if out == 0 && len(todo) == 0 {
// It's safe to quit here, as long as the buffered
// enqueue channel isn't also readable, which might
// happen if the worker sends both another unit of
// work and its result before the other select was
// scheduled and both w.resc and w.enqueuec were
// readable.
select {
case it := <-w.enqueuec:
todo = append(todo, it)
default:
return nil
}
}
}
}
}
// doWork reads directories as instructed (via workc) and runs the
// user's callback function.
func (w *walker) doWork(wg *sync.WaitGroup) {
defer wg.Done()
for {
select {
case <-w.donec:
return
case it := <-w.workc:
select {
case <-w.donec:
return
case w.resc <- w.walk(it.dir, !it.callbackDone):
}
}
}
}
type walker struct {
fn func(path string, typ os.FileMode) error
donec chan struct{} // closed on fastWalk's return
workc chan walkItem // to workers
enqueuec chan walkItem // from workers
resc chan error // from workers
}
type walkItem struct {
dir string
callbackDone bool // callback already called; don't do it again
}
func (w *walker) enqueue(it walkItem) {
select {
case w.enqueuec <- it:
case <-w.donec:
}
}
func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
joined := dirName + string(os.PathSeparator) + baseName
if typ == os.ModeDir {
w.enqueue(walkItem{dir: joined})
return nil
}
err := w.fn(joined, typ)
if typ == os.ModeSymlink {
if err == ErrTraverseLink {
// Set callbackDone so we don't call it twice for both the
// symlink-as-symlink and the symlink-as-directory later:
w.enqueue(walkItem{dir: joined, callbackDone: true})
return nil
}
if err == filepath.SkipDir {
// Permit SkipDir on symlinks too.
return nil
}
}
return err
}
func (w *walker) walk(root string, runUserCallback bool) error {
if runUserCallback {
err := w.fn(root, os.ModeDir)
if err == filepath.SkipDir {
return nil
}
if err != nil {
return err
}
}
return readDir(root, w.onDirEnt)
}

View file

@ -1,14 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build freebsd || openbsd || netbsd
// +build freebsd openbsd netbsd
package fastwalk
import "syscall"
func direntInode(dirent *syscall.Dirent) uint64 {
return uint64(dirent.Fileno)
}

View file

@ -1,15 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (linux || darwin) && !appengine
// +build linux darwin
// +build !appengine
package fastwalk
import "syscall"
func direntInode(dirent *syscall.Dirent) uint64 {
return uint64(dirent.Ino)
}

View file

@ -1,14 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin || freebsd || openbsd || netbsd
// +build darwin freebsd openbsd netbsd
package fastwalk
import "syscall"
func direntNamlen(dirent *syscall.Dirent) uint64 {
return uint64(dirent.Namlen)
}

View file

@ -1,29 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && !appengine
// +build linux,!appengine
package fastwalk
import (
"bytes"
"syscall"
"unsafe"
)
func direntNamlen(dirent *syscall.Dirent) uint64 {
const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name))
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
const nameBufLen = uint16(len(nameBuf))
limit := dirent.Reclen - fixedHdr
if limit > nameBufLen {
limit = nameBufLen
}
nameLen := bytes.IndexByte(nameBuf[:limit], 0)
if nameLen < 0 {
panic("failed to find terminating 0 byte in dirent")
}
return uint64(nameLen)
}

View file

@ -1,38 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build appengine || (!linux && !darwin && !freebsd && !openbsd && !netbsd)
// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd
package fastwalk
import (
"io/ioutil"
"os"
)
// readDir calls fn for each directory entry in dirName.
// It does not descend into directories or follow symlinks.
// If fn returns a non-nil error, readDir returns with that error
// immediately.
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
fis, err := ioutil.ReadDir(dirName)
if err != nil {
return err
}
skipFiles := false
for _, fi := range fis {
if fi.Mode().IsRegular() && skipFiles {
continue
}
if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil {
if err == ErrSkipFiles {
skipFiles = true
continue
}
return err
}
}
return nil
}

View file

@ -1,153 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (linux || darwin || freebsd || openbsd || netbsd) && !appengine
// +build linux darwin freebsd openbsd netbsd
// +build !appengine
package fastwalk
import (
"fmt"
"os"
"syscall"
"unsafe"
)
const blockSize = 8 << 10
// unknownFileMode is a sentinel (and bogus) os.FileMode
// value used to represent a syscall.DT_UNKNOWN Dirent.Type.
const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
fd, err := open(dirName, 0, 0)
if err != nil {
return &os.PathError{Op: "open", Path: dirName, Err: err}
}
defer syscall.Close(fd)
// The buffer must be at least a block long.
buf := make([]byte, blockSize) // stack-allocated; doesn't escape
bufp := 0 // starting read position in buf
nbuf := 0 // end valid data in buf
skipFiles := false
for {
if bufp >= nbuf {
bufp = 0
nbuf, err = readDirent(fd, buf)
if err != nil {
return os.NewSyscallError("readdirent", err)
}
if nbuf <= 0 {
return nil
}
}
consumed, name, typ := parseDirEnt(buf[bufp:nbuf])
bufp += consumed
if name == "" || name == "." || name == ".." {
continue
}
// Fallback for filesystems (like old XFS) that don't
// support Dirent.Type and have DT_UNKNOWN (0) there
// instead.
if typ == unknownFileMode {
fi, err := os.Lstat(dirName + "/" + name)
if err != nil {
// It got deleted in the meantime.
if os.IsNotExist(err) {
continue
}
return err
}
typ = fi.Mode() & os.ModeType
}
if skipFiles && typ.IsRegular() {
continue
}
if err := fn(dirName, name, typ); err != nil {
if err == ErrSkipFiles {
skipFiles = true
continue
}
return err
}
}
}
func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) {
// golang.org/issue/37269
dirent := &syscall.Dirent{}
copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf)
if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v {
panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v))
}
if len(buf) < int(dirent.Reclen) {
panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen))
}
consumed = int(dirent.Reclen)
if direntInode(dirent) == 0 { // File absent in directory.
return
}
switch dirent.Type {
case syscall.DT_REG:
typ = 0
case syscall.DT_DIR:
typ = os.ModeDir
case syscall.DT_LNK:
typ = os.ModeSymlink
case syscall.DT_BLK:
typ = os.ModeDevice
case syscall.DT_FIFO:
typ = os.ModeNamedPipe
case syscall.DT_SOCK:
typ = os.ModeSocket
case syscall.DT_UNKNOWN:
typ = unknownFileMode
default:
// Skip weird things.
// It's probably a DT_WHT (http://lwn.net/Articles/325369/)
// or something. Revisit if/when this package is moved outside
// of goimports. goimports only cares about regular files,
// symlinks, and directories.
return
}
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
nameLen := direntNamlen(dirent)
// Special cases for common things:
if nameLen == 1 && nameBuf[0] == '.' {
name = "."
} else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' {
name = ".."
} else {
name = string(nameBuf[:nameLen])
}
return
}
// According to https://golang.org/doc/go1.14#runtime
// A consequence of the implementation of preemption is that on Unix systems, including Linux and macOS
// systems, programs built with Go 1.14 will receive more signals than programs built with earlier releases.
//
// This causes syscall.Open and syscall.ReadDirent sometimes fail with EINTR errors.
// We need to retry in this case.
func open(path string, mode int, perm uint32) (fd int, err error) {
for {
fd, err := syscall.Open(path, mode, perm)
if err != syscall.EINTR {
return fd, err
}
}
}
func readDirent(fd int, buf []byte) (n int, err error) {
for {
nbuf, err := syscall.ReadDirent(fd, buf)
if err != syscall.EINTR {
return nbuf, err
}
}
}

View file

@ -1,273 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gocommand is a helper for calling the go command.
package gocommand
import (
"bytes"
"context"
"fmt"
exec "golang.org/x/sys/execabs"
"io"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/tools/internal/event"
)
// An Runner will run go command invocations and serialize
// them if it sees a concurrency error.
type Runner struct {
// once guards the runner initialization.
once sync.Once
// inFlight tracks available workers.
inFlight chan struct{}
// serialized guards the ability to run a go command serially,
// to avoid deadlocks when claiming workers.
serialized chan struct{}
}
const maxInFlight = 10
func (runner *Runner) initialize() {
runner.once.Do(func() {
runner.inFlight = make(chan struct{}, maxInFlight)
runner.serialized = make(chan struct{}, 1)
})
}
// 1.13: go: updates to go.mod needed, but contents have changed
// 1.14: go: updating go.mod: existing contents have changed since last read
var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`)
// Run is a convenience wrapper around RunRaw.
// It returns only stdout and a "friendly" error.
func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
stdout, _, friendly, _ := runner.RunRaw(ctx, inv)
return stdout, friendly
}
// RunPiped runs the invocation serially, always waiting for any concurrent
// invocations to complete first.
func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error {
_, err := runner.runPiped(ctx, inv, stdout, stderr)
return err
}
// RunRaw runs the invocation, serializing requests only if they fight over
// go.mod changes.
func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
// Make sure the runner is always initialized.
runner.initialize()
// First, try to run the go command concurrently.
stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv)
// If we encounter a load concurrency error, we need to retry serially.
if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) {
return stdout, stderr, friendlyErr, err
}
event.Error(ctx, "Load concurrency error, will retry serially", err)
// Run serially by calling runPiped.
stdout.Reset()
stderr.Reset()
friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr)
return stdout, stderr, friendlyErr, err
}
func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
// Wait for 1 worker to become available.
select {
case <-ctx.Done():
return nil, nil, nil, ctx.Err()
case runner.inFlight <- struct{}{}:
defer func() { <-runner.inFlight }()
}
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr)
return stdout, stderr, friendlyErr, err
}
func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) {
// Make sure the runner is always initialized.
runner.initialize()
// Acquire the serialization lock. This avoids deadlocks between two
// runPiped commands.
select {
case <-ctx.Done():
return nil, ctx.Err()
case runner.serialized <- struct{}{}:
defer func() { <-runner.serialized }()
}
// Wait for all in-progress go commands to return before proceeding,
// to avoid load concurrency errors.
for i := 0; i < maxInFlight; i++ {
select {
case <-ctx.Done():
return nil, ctx.Err()
case runner.inFlight <- struct{}{}:
// Make sure we always "return" any workers we took.
defer func() { <-runner.inFlight }()
}
}
return inv.runWithFriendlyError(ctx, stdout, stderr)
}
// An Invocation represents a call to the go command.
type Invocation struct {
Verb string
Args []string
BuildFlags []string
ModFlag string
ModFile string
Overlay string
// If CleanEnv is set, the invocation will run only with the environment
// in Env, not starting with os.Environ.
CleanEnv bool
Env []string
WorkingDir string
Logf func(format string, args ...interface{})
}
func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) {
rawError = i.run(ctx, stdout, stderr)
if rawError != nil {
friendlyError = rawError
// Check for 'go' executable not being found.
if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
friendlyError = fmt.Errorf("go command required, not found: %v", ee)
}
if ctx.Err() != nil {
friendlyError = ctx.Err()
}
friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr)
}
return
}
func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
log := i.Logf
if log == nil {
log = func(string, ...interface{}) {}
}
goArgs := []string{i.Verb}
appendModFile := func() {
if i.ModFile != "" {
goArgs = append(goArgs, "-modfile="+i.ModFile)
}
}
appendModFlag := func() {
if i.ModFlag != "" {
goArgs = append(goArgs, "-mod="+i.ModFlag)
}
}
appendOverlayFlag := func() {
if i.Overlay != "" {
goArgs = append(goArgs, "-overlay="+i.Overlay)
}
}
switch i.Verb {
case "env", "version":
goArgs = append(goArgs, i.Args...)
case "mod":
// mod needs the sub-verb before flags.
goArgs = append(goArgs, i.Args[0])
appendModFile()
goArgs = append(goArgs, i.Args[1:]...)
case "get":
goArgs = append(goArgs, i.BuildFlags...)
appendModFile()
goArgs = append(goArgs, i.Args...)
default: // notably list and build.
goArgs = append(goArgs, i.BuildFlags...)
appendModFile()
appendModFlag()
appendOverlayFlag()
goArgs = append(goArgs, i.Args...)
}
cmd := exec.Command("go", goArgs...)
cmd.Stdout = stdout
cmd.Stderr = stderr
// On darwin the cwd gets resolved to the real path, which breaks anything that
// expects the working directory to keep the original path, including the
// go command when dealing with modules.
// The Go stdlib has a special feature where if the cwd and the PWD are the
// same node then it trusts the PWD, so by setting it in the env for the child
// process we fix up all the paths returned by the go command.
if !i.CleanEnv {
cmd.Env = os.Environ()
}
cmd.Env = append(cmd.Env, i.Env...)
if i.WorkingDir != "" {
cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)
cmd.Dir = i.WorkingDir
}
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
return runCmdContext(ctx, cmd)
}
// runCmdContext is like exec.CommandContext except it sends os.Interrupt
// before os.Kill.
func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
if err := cmd.Start(); err != nil {
return err
}
resChan := make(chan error, 1)
go func() {
resChan <- cmd.Wait()
}()
select {
case err := <-resChan:
return err
case <-ctx.Done():
}
// Cancelled. Interrupt and see if it ends voluntarily.
cmd.Process.Signal(os.Interrupt)
select {
case err := <-resChan:
return err
case <-time.After(time.Second):
}
// Didn't shut down in response to interrupt. Kill it hard.
cmd.Process.Kill()
return <-resChan
}
func cmdDebugStr(cmd *exec.Cmd) string {
env := make(map[string]string)
for _, kv := range cmd.Env {
split := strings.SplitN(kv, "=", 2)
k, v := split[0], split[1]
env[k] = v
}
var args []string
for _, arg := range cmd.Args {
quoted := strconv.Quote(arg)
if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") {
args = append(args, quoted)
} else {
args = append(args, arg)
}
}
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
}

View file

@ -1,107 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocommand
import (
"bytes"
"context"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"golang.org/x/mod/semver"
)
// ModuleJSON holds information about a module.
type ModuleJSON struct {
Path string // module path
Version string // module version
Versions []string // available module versions (with -versions)
Replace *ModuleJSON // replaced by this module
Time *time.Time // time version was created
Update *ModuleJSON // available update, if any (with -u)
Main bool // is this the main module?
Indirect bool // is this module only an indirect dependency of main module?
Dir string // directory holding files for this module, if any
GoMod string // path to go.mod file used when loading this module, if any
GoVersion string // go version used in module
}
var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands
// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields,
// of which only Verb and Args are modified to run the appropriate Go command.
// Inspired by setDefaultBuildMod in modload/init.go
func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) {
mainMod, go114, err := getMainModuleAnd114(ctx, inv, r)
if err != nil {
return nil, false, err
}
// We check the GOFLAGS to see if there is anything overridden or not.
inv.Verb = "env"
inv.Args = []string{"GOFLAGS"}
stdout, err := r.Run(ctx, inv)
if err != nil {
return nil, false, err
}
goflags := string(bytes.TrimSpace(stdout.Bytes()))
matches := modFlagRegexp.FindStringSubmatch(goflags)
var modFlag string
if len(matches) != 0 {
modFlag = matches[1]
}
if modFlag != "" {
// Don't override an explicit '-mod=' argument.
return mainMod, modFlag == "vendor", nil
}
if mainMod == nil || !go114 {
return mainMod, false, nil
}
// Check 1.14's automatic vendor mode.
if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
// The Go version is at least 1.14, and a vendor directory exists.
// Set -mod=vendor by default.
return mainMod, true, nil
}
}
return mainMod, false, nil
}
// getMainModuleAnd114 gets the main module's information and whether the
// go command in use is 1.14+. This is the information needed to figure out
// if vendoring should be enabled.
func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) {
const format = `{{.Path}}
{{.Dir}}
{{.GoMod}}
{{.GoVersion}}
{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
`
inv.Verb = "list"
inv.Args = []string{"-m", "-f", format}
stdout, err := r.Run(ctx, inv)
if err != nil {
return nil, false, err
}
lines := strings.Split(stdout.String(), "\n")
if len(lines) < 5 {
return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String())
}
mod := &ModuleJSON{
Path: lines[0],
Dir: lines[1],
GoMod: lines[2],
GoVersion: lines[3],
Main: true,
}
return mod, lines[4] == "go1.14", nil
}

View file

@ -1,51 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocommand
import (
"context"
"fmt"
"strings"
)
// GoVersion checks the go version by running "go list" with modules off.
// It returns the X in Go 1.X.
func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
inv.Verb = "list"
inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`}
inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off")
// Unset any unneeded flags, and remove them from BuildFlags, if they're
// present.
inv.ModFile = ""
inv.ModFlag = ""
var buildFlags []string
for _, flag := range inv.BuildFlags {
// Flags can be prefixed by one or two dashes.
f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-")
if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") {
continue
}
buildFlags = append(buildFlags, flag)
}
inv.BuildFlags = buildFlags
stdoutBytes, err := r.Run(ctx, inv)
if err != nil {
return 0, err
}
stdout := stdoutBytes.String()
if len(stdout) < 3 {
return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout)
}
// Split up "[go1.1 go1.15]"
tags := strings.Fields(stdout[1 : len(stdout)-2])
for i := len(tags) - 1; i >= 0; i-- {
var version int
if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil {
continue
}
return version, nil
}
return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags)
}

View file

@ -1,264 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gopathwalk is like filepath.Walk but specialized for finding Go
// packages, particularly in $GOPATH and $GOROOT.
package gopathwalk
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"time"
"golang.org/x/tools/internal/fastwalk"
)
// Options controls the behavior of a Walk call.
type Options struct {
// If Logf is non-nil, debug logging is enabled through this function.
Logf func(format string, args ...interface{})
// Search module caches. Also disables legacy goimports ignore rules.
ModulesEnabled bool
}
// RootType indicates the type of a Root.
type RootType int
const (
RootUnknown RootType = iota
RootGOROOT
RootGOPATH
RootCurrentModule
RootModuleCache
RootOther
)
// A Root is a starting point for a Walk.
type Root struct {
Path string
Type RootType
}
// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
// For each package found, add will be called (concurrently) with the absolute
// paths of the containing source directory and the package directory.
// add will be called concurrently.
func Walk(roots []Root, add func(root Root, dir string), opts Options) {
WalkSkip(roots, add, func(Root, string) bool { return false }, opts)
}
// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
// For each package found, add will be called (concurrently) with the absolute
// paths of the containing source directory and the package directory.
// For each directory that will be scanned, skip will be called (concurrently)
// with the absolute paths of the containing source directory and the directory.
// If skip returns false on a directory it will be processed.
// add will be called concurrently.
// skip will be called concurrently.
func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) {
for _, root := range roots {
walkDir(root, add, skip, opts)
}
}
// walkDir creates a walker and starts fastwalk with this walker.
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
if opts.Logf != nil {
opts.Logf("skipping nonexistent directory: %v", root.Path)
}
return
}
start := time.Now()
if opts.Logf != nil {
opts.Logf("gopathwalk: scanning %s", root.Path)
}
w := &walker{
root: root,
add: add,
skip: skip,
opts: opts,
}
w.init()
if err := fastwalk.Walk(root.Path, w.walk); err != nil {
log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err)
}
if opts.Logf != nil {
opts.Logf("gopathwalk: scanned %s in %v", root.Path, time.Since(start))
}
}
// walker is the callback for fastwalk.Walk.
type walker struct {
root Root // The source directory to scan.
add func(Root, string) // The callback that will be invoked for every possible Go package dir.
skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true.
opts Options // Options passed to Walk by the user.
ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
}
// init initializes the walker based on its Options
func (w *walker) init() {
var ignoredPaths []string
if w.root.Type == RootModuleCache {
ignoredPaths = []string{"cache"}
}
if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH {
ignoredPaths = w.getIgnoredDirs(w.root.Path)
ignoredPaths = append(ignoredPaths, "v", "mod")
}
for _, p := range ignoredPaths {
full := filepath.Join(w.root.Path, p)
if fi, err := os.Stat(full); err == nil {
w.ignoredDirs = append(w.ignoredDirs, fi)
if w.opts.Logf != nil {
w.opts.Logf("Directory added to ignore list: %s", full)
}
} else if w.opts.Logf != nil {
w.opts.Logf("Error statting ignored directory: %v", err)
}
}
}
// getIgnoredDirs reads an optional config file at <path>/.goimportsignore
// of relative directories to ignore when scanning for go files.
// The provided path is one of the $GOPATH entries with "src" appended.
func (w *walker) getIgnoredDirs(path string) []string {
file := filepath.Join(path, ".goimportsignore")
slurp, err := ioutil.ReadFile(file)
if w.opts.Logf != nil {
if err != nil {
w.opts.Logf("%v", err)
} else {
w.opts.Logf("Read %s", file)
}
}
if err != nil {
return nil
}
var ignoredDirs []string
bs := bufio.NewScanner(bytes.NewReader(slurp))
for bs.Scan() {
line := strings.TrimSpace(bs.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
ignoredDirs = append(ignoredDirs, line)
}
return ignoredDirs
}
// shouldSkipDir reports whether the file should be skipped or not.
func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool {
for _, ignoredDir := range w.ignoredDirs {
if os.SameFile(fi, ignoredDir) {
return true
}
}
if w.skip != nil {
// Check with the user specified callback.
return w.skip(w.root, dir)
}
return false
}
// walk walks through the given path.
func (w *walker) walk(path string, typ os.FileMode) error {
dir := filepath.Dir(path)
if typ.IsRegular() {
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
// Doesn't make sense to have regular files
// directly in your $GOPATH/src or $GOROOT/src.
return fastwalk.ErrSkipFiles
}
if !strings.HasSuffix(path, ".go") {
return nil
}
w.add(w.root, dir)
return fastwalk.ErrSkipFiles
}
if typ == os.ModeDir {
base := filepath.Base(path)
if base == "" || base[0] == '.' || base[0] == '_' ||
base == "testdata" ||
(w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
(!w.opts.ModulesEnabled && base == "node_modules") {
return filepath.SkipDir
}
fi, err := os.Lstat(path)
if err == nil && w.shouldSkipDir(fi, path) {
return filepath.SkipDir
}
return nil
}
if typ == os.ModeSymlink {
base := filepath.Base(path)
if strings.HasPrefix(base, ".#") {
// Emacs noise.
return nil
}
fi, err := os.Lstat(path)
if err != nil {
// Just ignore it.
return nil
}
if w.shouldTraverse(dir, fi) {
return fastwalk.ErrTraverseLink
}
}
return nil
}
// shouldTraverse reports whether the symlink fi, found in dir,
// should be followed. It makes sure symlinks were never visited
// before to avoid symlink loops.
func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
path := filepath.Join(dir, fi.Name())
target, err := filepath.EvalSymlinks(path)
if err != nil {
return false
}
ts, err := os.Stat(target)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return false
}
if !ts.IsDir() {
return false
}
if w.shouldSkipDir(ts, dir) {
return false
}
// Check for symlink loops by statting each directory component
// and seeing if any are the same file as ts.
for {
parent := filepath.Dir(path)
if parent == path {
// Made it to the root without seeing a cycle.
// Use this symlink.
return true
}
parentInfo, err := os.Stat(parent)
if err != nil {
return false
}
if os.SameFile(ts, parentInfo) {
// Cycle. Don't traverse.
return false
}
path = parent
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,346 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run mkstdlib.go
// Package imports implements a Go pretty-printer (like package "go/format")
// that also adds or removes import statements as necessary.
package imports
import (
"bufio"
"bytes"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/printer"
"go/token"
"io"
"regexp"
"strconv"
"strings"
"golang.org/x/tools/go/ast/astutil"
)
// Options is golang.org/x/tools/imports.Options with extra internal-only options.
type Options struct {
Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state.
// LocalPrefix is a comma-separated string of import path prefixes, which, if
// set, instructs Process to sort the import paths with the given prefixes
// into another group after 3rd-party packages.
LocalPrefix string
Fragment bool // Accept fragment of a source file (no package statement)
AllErrors bool // Report all errors (not just the first 10 on different lines)
Comments bool // Print comments (true if nil *Options provided)
TabIndent bool // Use tabs for indent (true if nil *Options provided)
TabWidth int // Tab width (8 if nil *Options provided)
FormatOnly bool // Disable the insertion and deletion of imports
}
// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env.
func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
fileSet := token.NewFileSet()
file, adjust, err := parse(fileSet, filename, src, opt)
if err != nil {
return nil, err
}
if !opt.FormatOnly {
if err := fixImports(fileSet, file, filename, opt.Env); err != nil {
return nil, err
}
}
return formatFile(fileSet, file, src, adjust, opt)
}
// FixImports returns a list of fixes to the imports that, when applied,
// will leave the imports in the same state as Process. src and opt must
// be specified.
//
// Note that filename's directory influences which imports can be chosen,
// so it is important that filename be accurate.
func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
fileSet := token.NewFileSet()
file, _, err := parse(fileSet, filename, src, opt)
if err != nil {
return nil, err
}
return getFixes(fileSet, file, filename, opt.Env)
}
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
// is added in when parsing the file. src and opts must be specified, but no
// env is needed.
func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) {
// Don't use parse() -- we don't care about fragments or statement lists
// here, and we need to work with unparseable files.
fileSet := token.NewFileSet()
parserMode := parser.Mode(0)
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
}
parserMode |= extraMode
file, err := parser.ParseFile(fileSet, filename, src, parserMode)
if file == nil {
return nil, err
}
// Apply the fixes to the file.
apply(fileSet, file, fixes)
return formatFile(fileSet, file, src, nil, opt)
}
func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
mergeImports(fileSet, file)
sortImports(opt.LocalPrefix, fileSet, file)
imps := astutil.Imports(fileSet, file)
var spacesBefore []string // import paths we need spaces before
for _, impSection := range imps {
// Within each block of contiguous imports, see if any
// import lines are in different group numbers. If so,
// we'll need to put a space between them so it's
// compatible with gofmt.
lastGroup := -1
for _, importSpec := range impSection {
importPath, _ := strconv.Unquote(importSpec.Path.Value)
groupNum := importGroup(opt.LocalPrefix, importPath)
if groupNum != lastGroup && lastGroup != -1 {
spacesBefore = append(spacesBefore, importPath)
}
lastGroup = groupNum
}
}
printerMode := printer.UseSpaces
if opt.TabIndent {
printerMode |= printer.TabIndent
}
printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
var buf bytes.Buffer
err := printConfig.Fprint(&buf, fileSet, file)
if err != nil {
return nil, err
}
out := buf.Bytes()
if adjust != nil {
out = adjust(src, out)
}
if len(spacesBefore) > 0 {
out, err = addImportSpaces(bytes.NewReader(out), spacesBefore)
if err != nil {
return nil, err
}
}
out, err = format.Source(out)
if err != nil {
return nil, err
}
return out, nil
}
// parse parses src, which was read from filename,
// as a Go source file or statement list.
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
parserMode := parser.Mode(0)
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
}
// Try as whole source file.
file, err := parser.ParseFile(fset, filename, src, parserMode)
if err == nil {
return file, nil, nil
}
// If the error is that the source file didn't begin with a
// package line and we accept fragmented input, fall through to
// try as a source fragment. Stop and return on any other error.
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
return nil, nil, err
}
// If this is a declaration list, make it a source file
// by inserting a package clause.
// Insert using a ;, not a newline, so that parse errors are on
// the correct line.
const prefix = "package main;"
psrc := append([]byte(prefix), src...)
file, err = parser.ParseFile(fset, filename, psrc, parserMode)
if err == nil {
// Gofmt will turn the ; into a \n.
// Do that ourselves now and update the file contents,
// so that positions and line numbers are correct going forward.
psrc[len(prefix)-1] = '\n'
fset.File(file.Package).SetLinesForContent(psrc)
// If a main function exists, we will assume this is a main
// package and leave the file.
if containsMainFunc(file) {
return file, nil, nil
}
adjust := func(orig, src []byte) []byte {
// Remove the package clause.
src = src[len(prefix):]
return matchSpace(orig, src)
}
return file, adjust, nil
}
// If the error is that the source file didn't begin with a
// declaration, fall through to try as a statement list.
// Stop and return on any other error.
if !strings.Contains(err.Error(), "expected declaration") {
return nil, nil, err
}
// If this is a statement list, make it a source file
// by inserting a package clause and turning the list
// into a function body. This handles expressions too.
// Insert using a ;, not a newline, so that the line numbers
// in fsrc match the ones in src.
fsrc := append(append([]byte("package p; func _() {"), src...), '}')
file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
if err == nil {
adjust := func(orig, src []byte) []byte {
// Remove the wrapping.
// Gofmt has turned the ; into a \n\n.
src = src[len("package p\n\nfunc _() {"):]
src = src[:len(src)-len("}\n")]
// Gofmt has also indented the function body one level.
// Remove that indent.
src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
return matchSpace(orig, src)
}
return file, adjust, nil
}
// Failed, and out of options.
return nil, nil, err
}
// containsMainFunc checks if a file contains a function declaration with the
// function signature 'func main()'
func containsMainFunc(file *ast.File) bool {
for _, decl := range file.Decls {
if f, ok := decl.(*ast.FuncDecl); ok {
if f.Name.Name != "main" {
continue
}
if len(f.Type.Params.List) != 0 {
continue
}
if f.Type.Results != nil && len(f.Type.Results.List) != 0 {
continue
}
return true
}
}
return false
}
func cutSpace(b []byte) (before, middle, after []byte) {
i := 0
for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') {
i++
}
j := len(b)
for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') {
j--
}
if i <= j {
return b[:i], b[i:j], b[j:]
}
return nil, nil, b[j:]
}
// matchSpace reformats src to use the same space context as orig.
// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src.
// 2) matchSpace copies the indentation of the first non-blank line in orig
// to every non-blank line in src.
// 3) matchSpace copies the trailing space from orig and uses it in place
// of src's trailing space.
func matchSpace(orig []byte, src []byte) []byte {
before, _, after := cutSpace(orig)
i := bytes.LastIndex(before, []byte{'\n'})
before, indent := before[:i+1], before[i+1:]
_, src, _ = cutSpace(src)
var b bytes.Buffer
b.Write(before)
for len(src) > 0 {
line := src
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, src = line[:i+1], line[i+1:]
} else {
src = nil
}
if len(line) > 0 && line[0] != '\n' { // not blank
b.Write(indent)
}
b.Write(line)
}
b.Write(after)
return b.Bytes()
}
var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`)
func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) {
var out bytes.Buffer
in := bufio.NewReader(r)
inImports := false
done := false
for {
s, err := in.ReadString('\n')
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
if !inImports && !done && strings.HasPrefix(s, "import") {
inImports = true
}
if inImports && (strings.HasPrefix(s, "var") ||
strings.HasPrefix(s, "func") ||
strings.HasPrefix(s, "const") ||
strings.HasPrefix(s, "type")) {
done = true
inImports = false
}
if inImports && len(breaks) > 0 {
if m := impLine.FindStringSubmatch(s); m != nil {
if m[1] == breaks[0] {
out.WriteByte('\n')
breaks = breaks[1:]
}
}
}
fmt.Fprint(&out, s)
}
return out.Bytes(), nil
}

View file

@ -1,695 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"golang.org/x/mod/module"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/gopathwalk"
)
// ModuleResolver implements resolver for modules using the go command as little
// as feasible.
type ModuleResolver struct {
env *ProcessEnv
moduleCacheDir string
dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
roots []gopathwalk.Root
scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots.
scannedRoots map[gopathwalk.Root]bool
initialized bool
main *gocommand.ModuleJSON
modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path...
modsByDir []*gocommand.ModuleJSON // ...or Dir.
// moduleCacheCache stores information about the module cache.
moduleCacheCache *dirInfoCache
otherCache *dirInfoCache
}
func newModuleResolver(e *ProcessEnv) *ModuleResolver {
r := &ModuleResolver{
env: e,
scanSema: make(chan struct{}, 1),
}
r.scanSema <- struct{}{}
return r
}
func (r *ModuleResolver) init() error {
if r.initialized {
return nil
}
goenv, err := r.env.goEnv()
if err != nil {
return err
}
inv := gocommand.Invocation{
BuildFlags: r.env.BuildFlags,
ModFlag: r.env.ModFlag,
ModFile: r.env.ModFile,
Env: r.env.env(),
Logf: r.env.Logf,
WorkingDir: r.env.WorkingDir,
}
mainMod, vendorEnabled, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner)
if err != nil {
return err
}
if mainMod != nil && vendorEnabled {
// Vendor mode is on, so all the non-Main modules are irrelevant,
// and we need to search /vendor for everything.
r.main = mainMod
r.dummyVendorMod = &gocommand.ModuleJSON{
Path: "",
Dir: filepath.Join(mainMod.Dir, "vendor"),
}
r.modsByModPath = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod}
r.modsByDir = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod}
} else {
// Vendor mode is off, so run go list -m ... to find everything.
err := r.initAllMods()
// We expect an error when running outside of a module with
// GO111MODULE=on. Other errors are fatal.
if err != nil {
if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") {
return err
}
}
}
if gmc := r.env.Env["GOMODCACHE"]; gmc != "" {
r.moduleCacheDir = gmc
} else {
gopaths := filepath.SplitList(goenv["GOPATH"])
if len(gopaths) == 0 {
return fmt.Errorf("empty GOPATH")
}
r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod")
}
sort.Slice(r.modsByModPath, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.modsByModPath[x].Path, "/")
}
return count(j) < count(i) // descending order
})
sort.Slice(r.modsByDir, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.modsByDir[x].Dir, "/")
}
return count(j) < count(i) // descending order
})
r.roots = []gopathwalk.Root{
{filepath.Join(goenv["GOROOT"], "/src"), gopathwalk.RootGOROOT},
}
if r.main != nil {
r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
}
if vendorEnabled {
r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
} else {
addDep := func(mod *gocommand.ModuleJSON) {
if mod.Replace == nil {
// This is redundant with the cache, but we'll skip it cheaply enough.
r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache})
} else {
r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
}
}
// Walk dependent modules before scanning the full mod cache, direct deps first.
for _, mod := range r.modsByModPath {
if !mod.Indirect && !mod.Main {
addDep(mod)
}
}
for _, mod := range r.modsByModPath {
if mod.Indirect && !mod.Main {
addDep(mod)
}
}
r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache})
}
r.scannedRoots = map[gopathwalk.Root]bool{}
if r.moduleCacheCache == nil {
r.moduleCacheCache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
}
}
if r.otherCache == nil {
r.otherCache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
}
}
r.initialized = true
return nil
}
func (r *ModuleResolver) initAllMods() error {
stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-e", "-json", "...")
if err != nil {
return err
}
for dec := json.NewDecoder(stdout); dec.More(); {
mod := &gocommand.ModuleJSON{}
if err := dec.Decode(mod); err != nil {
return err
}
if mod.Dir == "" {
if r.env.Logf != nil {
r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
}
// Can't do anything with a module that's not downloaded.
continue
}
// golang/go#36193: the go command doesn't always clean paths.
mod.Dir = filepath.Clean(mod.Dir)
r.modsByModPath = append(r.modsByModPath, mod)
r.modsByDir = append(r.modsByDir, mod)
if mod.Main {
r.main = mod
}
}
return nil
}
func (r *ModuleResolver) ClearForNewScan() {
<-r.scanSema
r.scannedRoots = map[gopathwalk.Root]bool{}
r.otherCache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
}
r.scanSema <- struct{}{}
}
func (r *ModuleResolver) ClearForNewMod() {
<-r.scanSema
*r = ModuleResolver{
env: r.env,
moduleCacheCache: r.moduleCacheCache,
otherCache: r.otherCache,
scanSema: r.scanSema,
}
r.init()
r.scanSema <- struct{}{}
}
// findPackage returns the module and directory that contains the package at
// the given import path, or returns nil, "" if no module is in scope.
func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) {
// This can't find packages in the stdlib, but that's harmless for all
// the existing code paths.
for _, m := range r.modsByModPath {
if !strings.HasPrefix(importPath, m.Path) {
continue
}
pathInModule := importPath[len(m.Path):]
pkgDir := filepath.Join(m.Dir, pathInModule)
if r.dirIsNestedModule(pkgDir, m) {
continue
}
if info, ok := r.cacheLoad(pkgDir); ok {
if loaded, err := info.reachedStatus(nameLoaded); loaded {
if err != nil {
continue // No package in this dir.
}
return m, pkgDir
}
if scanned, err := info.reachedStatus(directoryScanned); scanned && err != nil {
continue // Dir is unreadable, etc.
}
// This is slightly wrong: a directory doesn't have to have an
// importable package to count as a package for package-to-module
// resolution. package main or _test files should count but
// don't.
// TODO(heschi): fix this.
if _, err := r.cachePackageName(info); err == nil {
return m, pkgDir
}
}
// Not cached. Read the filesystem.
pkgFiles, err := ioutil.ReadDir(pkgDir)
if err != nil {
continue
}
// A module only contains a package if it has buildable go
// files in that directory. If not, it could be provided by an
// outer module. See #29736.
for _, fi := range pkgFiles {
if ok, _ := r.env.matchFile(pkgDir, fi.Name()); ok {
return m, pkgDir
}
}
}
return nil, ""
}
func (r *ModuleResolver) cacheLoad(dir string) (directoryPackageInfo, bool) {
if info, ok := r.moduleCacheCache.Load(dir); ok {
return info, ok
}
return r.otherCache.Load(dir)
}
func (r *ModuleResolver) cacheStore(info directoryPackageInfo) {
if info.rootType == gopathwalk.RootModuleCache {
r.moduleCacheCache.Store(info.dir, info)
} else {
r.otherCache.Store(info.dir, info)
}
}
func (r *ModuleResolver) cacheKeys() []string {
return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...)
}
// cachePackageName caches the package name for a dir already in the cache.
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) {
if info.rootType == gopathwalk.RootModuleCache {
return r.moduleCacheCache.CachePackageName(info)
}
return r.otherCache.CachePackageName(info)
}
func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
if info.rootType == gopathwalk.RootModuleCache {
return r.moduleCacheCache.CacheExports(ctx, env, info)
}
return r.otherCache.CacheExports(ctx, env, info)
}
// findModuleByDir returns the module that contains dir, or nil if no such
// module is in scope.
func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON {
// This is quite tricky and may not be correct. dir could be:
// - a package in the main module.
// - a replace target underneath the main module's directory.
// - a nested module in the above.
// - a replace target somewhere totally random.
// - a nested module in the above.
// - in the mod cache.
// - in /vendor/ in -mod=vendor mode.
// - nested module? Dunno.
// Rumor has it that replace targets cannot contain other replace targets.
for _, m := range r.modsByDir {
if !strings.HasPrefix(dir, m.Dir) {
continue
}
if r.dirIsNestedModule(dir, m) {
continue
}
return m
}
return nil
}
// dirIsNestedModule reports if dir is contained in a nested module underneath
// mod, not actually in mod.
func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON) bool {
if !strings.HasPrefix(dir, mod.Dir) {
return false
}
if r.dirInModuleCache(dir) {
// Nested modules in the module cache are pruned,
// so it cannot be a nested module.
return false
}
if mod != nil && mod == r.dummyVendorMod {
// The /vendor pseudomodule is flattened and doesn't actually count.
return false
}
modDir, _ := r.modInfo(dir)
if modDir == "" {
return false
}
return modDir != mod.Dir
}
func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) {
readModName := func(modFile string) string {
modBytes, err := ioutil.ReadFile(modFile)
if err != nil {
return ""
}
return modulePath(modBytes)
}
if r.dirInModuleCache(dir) {
if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 {
index := strings.Index(dir, matches[1]+"@"+matches[2])
modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2])
return modDir, readModName(filepath.Join(modDir, "go.mod"))
}
}
for {
if info, ok := r.cacheLoad(dir); ok {
return info.moduleDir, info.moduleName
}
f := filepath.Join(dir, "go.mod")
info, err := os.Stat(f)
if err == nil && !info.IsDir() {
return dir, readModName(f)
}
d := filepath.Dir(dir)
if len(d) >= len(dir) {
return "", "" // reached top of file system, no go.mod
}
dir = d
}
}
func (r *ModuleResolver) dirInModuleCache(dir string) bool {
if r.moduleCacheDir == "" {
return false
}
return strings.HasPrefix(dir, r.moduleCacheDir)
}
func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
if err := r.init(); err != nil {
return nil, err
}
names := map[string]string{}
for _, path := range importPaths {
_, packageDir := r.findPackage(path)
if packageDir == "" {
continue
}
name, err := packageDirToName(packageDir)
if err != nil {
continue
}
names[path] = name
}
return names, nil
}
func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error {
if err := r.init(); err != nil {
return err
}
processDir := func(info directoryPackageInfo) {
// Skip this directory if we were not able to get the package information successfully.
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
return
}
pkg, err := r.canonicalize(info)
if err != nil {
return
}
if !callback.dirFound(pkg) {
return
}
pkg.packageName, err = r.cachePackageName(info)
if err != nil {
return
}
if !callback.packageNameLoaded(pkg) {
return
}
_, exports, err := r.loadExports(ctx, pkg, false)
if err != nil {
return
}
callback.exportsLoaded(pkg, exports)
}
// Start processing everything in the cache, and listen for the new stuff
// we discover in the walk below.
stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir)
defer stop1()
stop2 := r.otherCache.ScanAndListen(ctx, processDir)
defer stop2()
// We assume cached directories are fully cached, including all their
// children, and have not changed. We can skip them.
skip := func(root gopathwalk.Root, dir string) bool {
info, ok := r.cacheLoad(dir)
if !ok {
return false
}
// This directory can be skipped as long as we have already scanned it.
// Packages with errors will continue to have errors, so there is no need
// to rescan them.
packageScanned, _ := info.reachedStatus(directoryScanned)
return packageScanned
}
// Add anything new to the cache, and process it if we're still listening.
add := func(root gopathwalk.Root, dir string) {
r.cacheStore(r.scanDirForPackage(root, dir))
}
// r.roots and the callback are not necessarily safe to use in the
// goroutine below. Process them eagerly.
roots := filterRoots(r.roots, callback.rootFound)
// We can't cancel walks, because we need them to finish to have a usable
// cache. Instead, run them in a separate goroutine and detach.
scanDone := make(chan struct{})
go func() {
select {
case <-ctx.Done():
return
case <-r.scanSema:
}
defer func() { r.scanSema <- struct{}{} }()
// We have the lock on r.scannedRoots, and no other scans can run.
for _, root := range roots {
if ctx.Err() != nil {
return
}
if r.scannedRoots[root] {
continue
}
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true})
r.scannedRoots[root] = true
}
close(scanDone)
}()
select {
case <-ctx.Done():
case <-scanDone:
}
return nil
}
func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 {
if _, ok := stdlib[path]; ok {
return MaxRelevance
}
mod, _ := r.findPackage(path)
return modRelevance(mod)
}
func modRelevance(mod *gocommand.ModuleJSON) float64 {
var relevance float64
switch {
case mod == nil: // out of scope
return MaxRelevance - 4
case mod.Indirect:
relevance = MaxRelevance - 3
case !mod.Main:
relevance = MaxRelevance - 2
default:
relevance = MaxRelevance - 1 // main module ties with stdlib
}
_, versionString, ok := module.SplitPathVersion(mod.Path)
if ok {
index := strings.Index(versionString, "v")
if index == -1 {
return relevance
}
if versionNumber, err := strconv.ParseFloat(versionString[index+1:], 64); err == nil {
relevance += versionNumber / 1000
}
}
return relevance
}
// canonicalize gets the result of canonicalizing the packages using the results
// of initializing the resolver from 'go list -m'.
func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
// Packages in GOROOT are already canonical, regardless of the std/cmd modules.
if info.rootType == gopathwalk.RootGOROOT {
return &pkg{
importPathShort: info.nonCanonicalImportPath,
dir: info.dir,
packageName: path.Base(info.nonCanonicalImportPath),
relevance: MaxRelevance,
}, nil
}
importPath := info.nonCanonicalImportPath
mod := r.findModuleByDir(info.dir)
// Check if the directory is underneath a module that's in scope.
if mod != nil {
// It is. If dir is the target of a replace directive,
// our guessed import path is wrong. Use the real one.
if mod.Dir == info.dir {
importPath = mod.Path
} else {
dirInMod := info.dir[len(mod.Dir)+len("/"):]
importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
}
} else if !strings.HasPrefix(importPath, info.moduleName) {
// The module's name doesn't match the package's import path. It
// probably needs a replace directive we don't have.
return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir)
}
res := &pkg{
importPathShort: importPath,
dir: info.dir,
relevance: modRelevance(mod),
}
// We may have discovered a package that has a different version
// in scope already. Canonicalize to that one if possible.
if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" {
res.dir = canonicalDir
}
return res, nil
}
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
if err := r.init(); err != nil {
return "", nil, err
}
if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest {
return r.cacheExports(ctx, r.env, info)
}
return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest)
}
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
subdir := ""
if dir != root.Path {
subdir = dir[len(root.Path)+len("/"):]
}
importPath := filepath.ToSlash(subdir)
if strings.HasPrefix(importPath, "vendor/") {
// Only enter vendor directories if they're explicitly requested as a root.
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("unwanted vendor directory"),
}
}
switch root.Type {
case gopathwalk.RootCurrentModule:
importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
case gopathwalk.RootModuleCache:
matches := modCacheRegexp.FindStringSubmatch(subdir)
if len(matches) == 0 {
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("invalid module cache path: %v", subdir),
}
}
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
if err != nil {
if r.env.Logf != nil {
r.env.Logf("decoding module cache path %q: %v", subdir, err)
}
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
}
}
importPath = path.Join(modPath, filepath.ToSlash(matches[3]))
}
modDir, modName := r.modInfo(dir)
result := directoryPackageInfo{
status: directoryScanned,
dir: dir,
rootType: root.Type,
nonCanonicalImportPath: importPath,
moduleDir: modDir,
moduleName: modName,
}
if root.Type == gopathwalk.RootGOROOT {
// stdlib packages are always in scope, despite the confusing go.mod
return result
}
return result
}
// modCacheRegexp splits a path in a module cache into module, module version, and package.
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
var (
slashSlash = []byte("//")
moduleStr = []byte("module")
)
// modulePath returns the module path from the gomod file text.
// If it cannot find a module path, it returns an empty string.
// It is tolerant of unrelated problems in the go.mod file.
//
// Copied from cmd/go/internal/modfile.
func modulePath(mod []byte) string {
for len(mod) > 0 {
line := mod
mod = nil
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, mod = line[:i], line[i+1:]
}
if i := bytes.Index(line, slashSlash); i >= 0 {
line = line[:i]
}
line = bytes.TrimSpace(line)
if !bytes.HasPrefix(line, moduleStr) {
continue
}
line = line[len(moduleStr):]
n := len(line)
line = bytes.TrimSpace(line)
if len(line) == n || len(line) == 0 {
continue
}
if line[0] == '"' || line[0] == '`' {
p, err := strconv.Unquote(string(line))
if err != nil {
return "" // malformed quoted string or multiline module path
}
return p
}
return string(line)
}
return "" // missing module path
}

View file

@ -1,236 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import (
"context"
"fmt"
"sync"
"golang.org/x/tools/internal/gopathwalk"
)
// To find packages to import, the resolver needs to know about all of the
// the packages that could be imported. This includes packages that are
// already in modules that are in (1) the current module, (2) replace targets,
// and (3) packages in the module cache. Packages in (1) and (2) may change over
// time, as the client may edit the current module and locally replaced modules.
// The module cache (which includes all of the packages in (3)) can only
// ever be added to.
//
// The resolver can thus save state about packages in the module cache
// and guarantee that this will not change over time. To obtain information
// about new modules added to the module cache, the module cache should be
// rescanned.
//
// It is OK to serve information about modules that have been deleted,
// as they do still exist.
// TODO(suzmue): can we share information with the caller about
// what module needs to be downloaded to import this package?
type directoryPackageStatus int
const (
_ directoryPackageStatus = iota
directoryScanned
nameLoaded
exportsLoaded
)
type directoryPackageInfo struct {
// status indicates the extent to which this struct has been filled in.
status directoryPackageStatus
// err is non-nil when there was an error trying to reach status.
err error
// Set when status >= directoryScanned.
// dir is the absolute directory of this package.
dir string
rootType gopathwalk.RootType
// nonCanonicalImportPath is the package's expected import path. It may
// not actually be importable at that path.
nonCanonicalImportPath string
// Module-related information.
moduleDir string // The directory that is the module root of this dir.
moduleName string // The module name that contains this dir.
// Set when status >= nameLoaded.
packageName string // the package name, as declared in the source.
// Set when status >= exportsLoaded.
exports []string
}
// reachedStatus returns true when info has a status at least target and any error associated with
// an attempt to reach target.
func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (bool, error) {
if info.err == nil {
return info.status >= target, nil
}
if info.status == target {
return true, info.err
}
return true, nil
}
// dirInfoCache is a concurrency safe map for storing information about
// directories that may contain packages.
//
// The information in this cache is built incrementally. Entries are initialized in scan.
// No new keys should be added in any other functions, as all directories containing
// packages are identified in scan.
//
// Other functions, including loadExports and findPackage, may update entries in this cache
// as they discover new things about the directory.
//
// The information in the cache is not expected to change for the cache's
// lifetime, so there is no protection against competing writes. Users should
// take care not to hold the cache across changes to the underlying files.
//
// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc)
type dirInfoCache struct {
mu sync.Mutex
// dirs stores information about packages in directories, keyed by absolute path.
dirs map[string]*directoryPackageInfo
listeners map[*int]cacheListener
}
type cacheListener func(directoryPackageInfo)
// ScanAndListen calls listener on all the items in the cache, and on anything
// newly added. The returned stop function waits for all in-flight callbacks to
// finish and blocks new ones.
func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
ctx, cancel := context.WithCancel(ctx)
// Flushing out all the callbacks is tricky without knowing how many there
// are going to be. Setting an arbitrary limit makes it much easier.
const maxInFlight = 10
sema := make(chan struct{}, maxInFlight)
for i := 0; i < maxInFlight; i++ {
sema <- struct{}{}
}
cookie := new(int) // A unique ID we can use for the listener.
// We can't hold mu while calling the listener.
d.mu.Lock()
var keys []string
for key := range d.dirs {
keys = append(keys, key)
}
d.listeners[cookie] = func(info directoryPackageInfo) {
select {
case <-ctx.Done():
return
case <-sema:
}
listener(info)
sema <- struct{}{}
}
d.mu.Unlock()
stop := func() {
cancel()
d.mu.Lock()
delete(d.listeners, cookie)
d.mu.Unlock()
for i := 0; i < maxInFlight; i++ {
<-sema
}
}
// Process the pre-existing keys.
for _, k := range keys {
select {
case <-ctx.Done():
return stop
default:
}
if v, ok := d.Load(k); ok {
listener(v)
}
}
return stop
}
// Store stores the package info for dir.
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
d.mu.Lock()
_, old := d.dirs[dir]
d.dirs[dir] = &info
var listeners []cacheListener
for _, l := range d.listeners {
listeners = append(listeners, l)
}
d.mu.Unlock()
if !old {
for _, l := range listeners {
l(info)
}
}
}
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
d.mu.Lock()
defer d.mu.Unlock()
info, ok := d.dirs[dir]
if !ok {
return directoryPackageInfo{}, false
}
return *info, true
}
// Keys returns the keys currently present in d.
func (d *dirInfoCache) Keys() (keys []string) {
d.mu.Lock()
defer d.mu.Unlock()
for key := range d.dirs {
keys = append(keys, key)
}
return keys
}
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
if loaded, err := info.reachedStatus(nameLoaded); loaded {
return info.packageName, err
}
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
return "", fmt.Errorf("cannot read package name, scan error: %v", err)
}
info.packageName, info.err = packageDirToName(info.dir)
info.status = nameLoaded
d.Store(info.dir, info)
return info.packageName, info.err
}
func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
if reached, _ := info.reachedStatus(exportsLoaded); reached {
return info.packageName, info.exports, info.err
}
if reached, err := info.reachedStatus(nameLoaded); reached && err != nil {
return "", nil, err
}
info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false)
if info.err == context.Canceled || info.err == context.DeadlineExceeded {
return info.packageName, info.exports, info.err
}
// The cache structure wants things to proceed linearly. We can skip a
// step here, but only if we succeed.
if info.status == nameLoaded || info.err == nil {
info.status = exportsLoaded
} else {
info.status = nameLoaded
}
d.Store(info.dir, info)
return info.packageName, info.exports, info.err
}

View file

@ -1,280 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Hacked up copy of go/ast/import.go
package imports
import (
"go/ast"
"go/token"
"sort"
"strconv"
)
// sortImports sorts runs of consecutive import lines in import blocks in f.
// It also removes duplicate imports when it is possible to do so without data loss.
func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) {
for i, d := range f.Decls {
d, ok := d.(*ast.GenDecl)
if !ok || d.Tok != token.IMPORT {
// Not an import declaration, so we're done.
// Imports are always first.
break
}
if len(d.Specs) == 0 {
// Empty import block, remove it.
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
}
if !d.Lparen.IsValid() {
// Not a block: sorted by default.
continue
}
// Identify and sort runs of specs on successive lines.
i := 0
specs := d.Specs[:0]
for j, s := range d.Specs {
if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
// j begins a new run. End this one.
specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:j])...)
i = j
}
}
specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:])...)
d.Specs = specs
// Deduping can leave a blank line before the rparen; clean that up.
if len(d.Specs) > 0 {
lastSpec := d.Specs[len(d.Specs)-1]
lastLine := fset.Position(lastSpec.Pos()).Line
if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
fset.File(d.Rparen).MergeLine(rParenLine - 1)
}
}
}
}
// mergeImports merges all the import declarations into the first one.
// Taken from golang.org/x/tools/ast/astutil.
func mergeImports(fset *token.FileSet, f *ast.File) {
if len(f.Decls) <= 1 {
return
}
// Merge all the import declarations into the first one.
var first *ast.GenDecl
for i := 0; i < len(f.Decls); i++ {
decl := f.Decls[i]
gen, ok := decl.(*ast.GenDecl)
if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
continue
}
if first == nil {
first = gen
continue // Don't touch the first one.
}
// We now know there is more than one package in this import
// declaration. Ensure that it ends up parenthesized.
first.Lparen = first.Pos()
// Move the imports of the other import declaration to the first one.
for _, spec := range gen.Specs {
spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
first.Specs = append(first.Specs, spec)
}
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
i--
}
}
// declImports reports whether gen contains an import of path.
// Taken from golang.org/x/tools/ast/astutil.
func declImports(gen *ast.GenDecl, path string) bool {
if gen.Tok != token.IMPORT {
return false
}
for _, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
if importPath(impspec) == path {
return true
}
}
return false
}
func importPath(s ast.Spec) string {
t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value)
if err == nil {
return t
}
return ""
}
func importName(s ast.Spec) string {
n := s.(*ast.ImportSpec).Name
if n == nil {
return ""
}
return n.Name
}
func importComment(s ast.Spec) string {
c := s.(*ast.ImportSpec).Comment
if c == nil {
return ""
}
return c.Text()
}
// collapse indicates whether prev may be removed, leaving only next.
func collapse(prev, next ast.Spec) bool {
if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
return false
}
return prev.(*ast.ImportSpec).Comment == nil
}
type posSpan struct {
Start token.Pos
End token.Pos
}
func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
// Can't short-circuit here even if specs are already sorted,
// since they might yet need deduplication.
// A lone import, however, may be safely ignored.
if len(specs) <= 1 {
return specs
}
// Record positions for specs.
pos := make([]posSpan, len(specs))
for i, s := range specs {
pos[i] = posSpan{s.Pos(), s.End()}
}
// Identify comments in this range.
// Any comment from pos[0].Start to the final line counts.
lastLine := fset.Position(pos[len(pos)-1].End).Line
cstart := len(f.Comments)
cend := len(f.Comments)
for i, g := range f.Comments {
if g.Pos() < pos[0].Start {
continue
}
if i < cstart {
cstart = i
}
if fset.Position(g.End()).Line > lastLine {
cend = i
break
}
}
comments := f.Comments[cstart:cend]
// Assign each comment to the import spec preceding it.
importComment := map[*ast.ImportSpec][]*ast.CommentGroup{}
specIndex := 0
for _, g := range comments {
for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
specIndex++
}
s := specs[specIndex].(*ast.ImportSpec)
importComment[s] = append(importComment[s], g)
}
// Sort the import specs by import path.
// Remove duplicates, when possible without data loss.
// Reassign the import paths to have the same position sequence.
// Reassign each comment to abut the end of its spec.
// Sort the comments by new position.
sort.Sort(byImportSpec{localPrefix, specs})
// Dedup. Thanks to our sorting, we can just consider
// adjacent pairs of imports.
deduped := specs[:0]
for i, s := range specs {
if i == len(specs)-1 || !collapse(s, specs[i+1]) {
deduped = append(deduped, s)
} else {
p := s.Pos()
fset.File(p).MergeLine(fset.Position(p).Line)
}
}
specs = deduped
// Fix up comment positions
for i, s := range specs {
s := s.(*ast.ImportSpec)
if s.Name != nil {
s.Name.NamePos = pos[i].Start
}
s.Path.ValuePos = pos[i].Start
s.EndPos = pos[i].End
nextSpecPos := pos[i].End
for _, g := range importComment[s] {
for _, c := range g.List {
c.Slash = pos[i].End
nextSpecPos = c.End()
}
}
if i < len(specs)-1 {
pos[i+1].Start = nextSpecPos
pos[i+1].End = nextSpecPos
}
}
sort.Sort(byCommentPos(comments))
// Fixup comments can insert blank lines, because import specs are on different lines.
// We remove those blank lines here by merging import spec to the first import spec line.
firstSpecLine := fset.Position(specs[0].Pos()).Line
for _, s := range specs[1:] {
p := s.Pos()
line := fset.File(p).Line(p)
for previousLine := line - 1; previousLine >= firstSpecLine; {
fset.File(p).MergeLine(previousLine)
previousLine--
}
}
return specs
}
type byImportSpec struct {
localPrefix string
specs []ast.Spec // slice of *ast.ImportSpec
}
func (x byImportSpec) Len() int { return len(x.specs) }
func (x byImportSpec) Swap(i, j int) { x.specs[i], x.specs[j] = x.specs[j], x.specs[i] }
func (x byImportSpec) Less(i, j int) bool {
ipath := importPath(x.specs[i])
jpath := importPath(x.specs[j])
igroup := importGroup(x.localPrefix, ipath)
jgroup := importGroup(x.localPrefix, jpath)
if igroup != jgroup {
return igroup < jgroup
}
if ipath != jpath {
return ipath < jpath
}
iname := importName(x.specs[i])
jname := importName(x.specs[j])
if iname != jname {
return iname < jname
}
return importComment(x.specs[i]) < importComment(x.specs[j])
}
type byCommentPos []*ast.CommentGroup
func (x byCommentPos) Len() int { return len(x) }
func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }

File diff suppressed because it is too large Load diff

View file

@ -1,11 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package typeparams provides functions to work indirectly with type parameter
// data stored in go/ast and go/types objects, while these API are guarded by a
// build constraint.
//
// This package exists to make it easier for tools to work with generic code,
// while also compiling against older Go versions.
package typeparams

View file

@ -1,90 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !typeparams || !go1.17
// +build !typeparams !go1.17
package typeparams
import (
"go/ast"
"go/types"
)
// NOTE: doc comments must be kept in sync with typeparams.go.
// Enabled reports whether type parameters are enabled in the current build
// environment.
const Enabled = false
// UnpackIndex extracts all index expressions from e. For non-generic code this
// is always one expression: e.Index, but may be more than one expression for
// generic type instantiation.
func UnpackIndex(e *ast.IndexExpr) []ast.Expr {
return []ast.Expr{e.Index}
}
// IsListExpr reports whether n is an *ast.ListExpr, which is a new node type
// introduced to hold type arguments for generic type instantiation.
func IsListExpr(n ast.Node) bool {
return false
}
// ForTypeDecl extracts the (possibly nil) type parameter node list from n.
func ForTypeDecl(*ast.TypeSpec) *ast.FieldList {
return nil
}
// ForFuncDecl extracts the (possibly nil) type parameter node list from n.
func ForFuncDecl(*ast.FuncDecl) *ast.FieldList {
return nil
}
// ForSignature extracts the (possibly empty) type parameter object list from
// sig.
func ForSignature(*types.Signature) []*types.TypeName {
return nil
}
// HasTypeSet reports if iface has a type set.
func HasTypeSet(*types.Interface) bool {
return false
}
// IsComparable reports if iface is the comparable interface.
func IsComparable(*types.Interface) bool {
return false
}
// IsConstraint reports whether iface may only be used as a type parameter
// constraint (i.e. has a type set or is the comparable interface).
func IsConstraint(*types.Interface) bool {
return false
}
// ForNamed extracts the (possibly empty) type parameter object list from
// named.
func ForNamed(*types.Named) []*types.TypeName {
return nil
}
// NamedTArgs extracts the (possibly empty) type argument list from named.
func NamedTArgs(*types.Named) []types.Type {
return nil
}
// InitInferred initializes info to record inferred type information.
func InitInferred(*types.Info) {
}
// GetInferred extracts inferred type information from info for e.
//
// The expression e may have an inferred type if it is an *ast.IndexExpr
// representing partial instantiation of a generic function type for which type
// arguments have been inferred using constraint type inference, or if it is an
// *ast.CallExpr for which type type arguments have be inferred using both
// constraint type inference and function argument inference.
func GetInferred(*types.Info, ast.Expr) ([]types.Type, *types.Signature) {
return nil, nil
}

View file

@ -1,105 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build typeparams && go1.17
// +build typeparams,go1.17
package typeparams
import (
"go/ast"
"go/types"
)
// NOTE: doc comments must be kept in sync with notypeparams.go.
// Enabled reports whether type parameters are enabled in the current build
// environment.
const Enabled = true
// UnpackIndex extracts all index expressions from e. For non-generic code this
// is always one expression: e.Index, but may be more than one expression for
// generic type instantiation.
func UnpackIndex(e *ast.IndexExpr) []ast.Expr {
if x, _ := e.Index.(*ast.ListExpr); x != nil {
return x.ElemList
}
if e.Index != nil {
return []ast.Expr{e.Index}
}
return nil
}
// IsListExpr reports whether n is an *ast.ListExpr, which is a new node type
// introduced to hold type arguments for generic type instantiation.
func IsListExpr(n ast.Node) bool {
_, ok := n.(*ast.ListExpr)
return ok
}
// ForTypeDecl extracts the (possibly nil) type parameter node list from n.
func ForTypeDecl(n *ast.TypeSpec) *ast.FieldList {
return n.TParams
}
// ForFuncDecl extracts the (possibly nil) type parameter node list from n.
func ForFuncDecl(n *ast.FuncDecl) *ast.FieldList {
if n.Type != nil {
return n.Type.TParams
}
return nil
}
// ForSignature extracts the (possibly empty) type parameter object list from
// sig.
func ForSignature(sig *types.Signature) []*types.TypeName {
return sig.TParams()
}
// HasTypeSet reports if iface has a type set.
func HasTypeSet(iface *types.Interface) bool {
return iface.HasTypeList()
}
// IsComparable reports if iface is the comparable interface.
func IsComparable(iface *types.Interface) bool {
return iface.IsComparable()
}
// IsConstraint reports whether iface may only be used as a type parameter
// constraint (i.e. has a type set or is the comparable interface).
func IsConstraint(iface *types.Interface) bool {
return iface.IsConstraint()
}
// ForNamed extracts the (possibly empty) type parameter object list from
// named.
func ForNamed(named *types.Named) []*types.TypeName {
return named.TParams()
}
// NamedTArgs extracts the (possibly empty) type argument list from named.
func NamedTArgs(named *types.Named) []types.Type {
return named.TArgs()
}
// InitInferred initializes info to record inferred type information.
func InitInferred(info *types.Info) {
info.Inferred = make(map[ast.Expr]types.Inferred)
}
// GetInferred extracts inferred type information from info for e.
//
// The expression e may have an inferred type if it is an *ast.IndexExpr
// representing partial instantiation of a generic function type for which type
// arguments have been inferred using constraint type inference, or if it is an
// *ast.CallExpr for which type type arguments have be inferred using both
// constraint type inference and function argument inference.
func GetInferred(info *types.Info, e ast.Expr) ([]types.Type, *types.Signature) {
if info.Inferred == nil {
return nil, nil
}
inf := info.Inferred[e]
return inf.TArgs, inf.Sig
}

View file

@ -7,6 +7,7 @@ package internal
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
@ -62,56 +63,68 @@ const (
serviceAccountKey = "service_account"
)
// credentialsFromJSON returns a google.Credentials based on the input.
// credentialsFromJSON returns a google.Credentials from the JSON data
//
// - A self-signed JWT auth flow will be executed if: the data file is a service
// account, no user are scopes provided, an audience is provided, a user
// specified endpoint is not provided, and credentials will not be
// impersonated.
// - A self-signed JWT flow will be executed if the following conditions are
// met:
// (1) At least one of the following is true:
// (a) No scope is provided
// (b) Scope for self-signed JWT flow is enabled
// (c) Audiences are explicitly provided by users
// (2) No service account impersontation
//
// - Otherwise, executes a stanard OAuth 2.0 flow.
// - Otherwise, executes standard OAuth 2.0 flow
// More details: google.aip.dev/auth/4111
func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) {
// By default, a standard OAuth 2.0 token source is created
cred, err := google.CredentialsFromJSON(ctx, data, ds.GetScopes()...)
if err != nil {
return nil, err
}
// Standard OAuth 2.0 Flow
if len(data) == 0 ||
len(ds.Scopes) > 0 ||
(ds.DefaultAudience == "" && len(ds.Audiences) == 0) ||
ds.ImpersonationConfig != nil ||
ds.Endpoint != "" {
return cred, nil
}
// Check if JSON is a service account and if so create a self-signed JWT.
var f struct {
Type string `json:"type"`
// The rest JSON fields are omitted because they are not used.
}
if err := json.Unmarshal(cred.JSON, &f); err != nil {
// Override the token source to use self-signed JWT if conditions are met
isJWTFlow, err := isSelfSignedJWTFlow(data, ds)
if err != nil {
return nil, err
}
if f.Type == serviceAccountKey {
ts, err := selfSignedJWTTokenSource(data, ds.DefaultAudience, ds.Audiences)
if isJWTFlow {
ts, err := selfSignedJWTTokenSource(data, ds)
if err != nil {
return nil, err
}
cred.TokenSource = ts
}
return cred, err
}
func selfSignedJWTTokenSource(data []byte, defaultAudience string, audiences []string) (oauth2.TokenSource, error) {
audience := defaultAudience
if len(audiences) > 0 {
// TODO(shinfan): Update golang oauth to support multiple audiences.
if len(audiences) > 1 {
return nil, fmt.Errorf("multiple audiences support is not implemented")
func isSelfSignedJWTFlow(data []byte, ds *DialSettings) (bool, error) {
if (ds.EnableJwtWithScope || ds.HasCustomAudience()) &&
ds.ImpersonationConfig == nil {
// Check if JSON is a service account and if so create a self-signed JWT.
var f struct {
Type string `json:"type"`
// The rest JSON fields are omitted because they are not used.
}
audience = audiences[0]
if err := json.Unmarshal(data, &f); err != nil {
return false, err
}
return f.Type == serviceAccountKey, nil
}
return false, nil
}
func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource, error) {
if len(ds.GetScopes()) > 0 && !ds.HasCustomAudience() {
// Scopes are preferred in self-signed JWT unless the scope is not available
// or a custom audience is used.
return google.JWTAccessTokenSourceWithScope(data, ds.GetScopes()...)
} else if ds.GetAudience() != "" {
// Fallback to audience if scope is not provided
return google.JWTAccessTokenSourceFromJSON(data, ds.GetAudience())
} else {
return nil, errors.New("neither scopes or audience are available for the self-signed JWT")
}
return google.JWTAccessTokenSourceFromJSON(data, audience)
}
// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials.

View file

@ -24,6 +24,7 @@ type DialSettings struct {
DefaultMTLSEndpoint string
Scopes []string
DefaultScopes []string
EnableJwtWithScope bool
TokenSource oauth2.TokenSource
Credentials *google.Credentials
CredentialsFile string // if set, Token Source is ignored.
@ -60,6 +61,19 @@ func (ds *DialSettings) GetScopes() []string {
return ds.DefaultScopes
}
// GetAudience returns the user-provided audience, if set, or else falls back to the default audience.
func (ds *DialSettings) GetAudience() string {
if ds.HasCustomAudience() {
return ds.Audiences[0]
}
return ds.DefaultAudience
}
// HasCustomAudience returns true if a custom audience is provided by users.
func (ds *DialSettings) HasCustomAudience() bool {
return len(ds.Audiences) > 0
}
// Validate reports an error if ds is invalid.
func (ds *DialSettings) Validate() error {
if ds.SkipValidation {

View file

@ -94,3 +94,15 @@ func (w withDefaultScopes) Apply(o *internal.DialSettings) {
o.DefaultScopes = make([]string, len(w))
copy(o.DefaultScopes, w)
}
// EnableJwtWithScope returns a ClientOption that specifies if scope can be used
// with self-signed JWT.
func EnableJwtWithScope() option.ClientOption {
return enableJwtWithScope(true)
}
type enableJwtWithScope bool
func (w enableJwtWithScope) Apply(o *internal.DialSettings) {
o.EnableJwtWithScope = bool(w)
}

View file

@ -68,8 +68,6 @@ func GetClientCertificateSourceAndEndpoint(settings *internal.DialSettings) (cer
func getClientCertificateSource(settings *internal.DialSettings) (cert.Source, error) {
if !isClientCertificateEnabled() {
return nil, nil
} else if settings.HTTPClient != nil {
return nil, nil // HTTPClient is incompatible with ClientCertificateSource
} else if settings.ClientCertSource != nil {
return settings.ClientCertSource, nil
} else {

View file

@ -1,168 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal_gengo
import (
"unicode"
"unicode/utf8"
"google.golang.org/protobuf/compiler/protogen"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/types/descriptorpb"
)
type fileInfo struct {
*protogen.File
allEnums []*enumInfo
allMessages []*messageInfo
allExtensions []*extensionInfo
allEnumsByPtr map[*enumInfo]int // value is index into allEnums
allMessagesByPtr map[*messageInfo]int // value is index into allMessages
allMessageFieldsByPtr map[*messageInfo]*structFields
// needRawDesc specifies whether the generator should emit logic to provide
// the legacy raw descriptor in GZIP'd form.
// This is updated by enum and message generation logic as necessary,
// and checked at the end of file generation.
needRawDesc bool
}
type structFields struct {
count int
unexported map[int]string
}
func (sf *structFields) append(name string) {
if r, _ := utf8.DecodeRuneInString(name); !unicode.IsUpper(r) {
if sf.unexported == nil {
sf.unexported = make(map[int]string)
}
sf.unexported[sf.count] = name
}
sf.count++
}
func newFileInfo(file *protogen.File) *fileInfo {
f := &fileInfo{File: file}
// Collect all enums, messages, and extensions in "flattened ordering".
// See filetype.TypeBuilder.
var walkMessages func([]*protogen.Message, func(*protogen.Message))
walkMessages = func(messages []*protogen.Message, f func(*protogen.Message)) {
for _, m := range messages {
f(m)
walkMessages(m.Messages, f)
}
}
initEnumInfos := func(enums []*protogen.Enum) {
for _, enum := range enums {
f.allEnums = append(f.allEnums, newEnumInfo(f, enum))
}
}
initMessageInfos := func(messages []*protogen.Message) {
for _, message := range messages {
f.allMessages = append(f.allMessages, newMessageInfo(f, message))
}
}
initExtensionInfos := func(extensions []*protogen.Extension) {
for _, extension := range extensions {
f.allExtensions = append(f.allExtensions, newExtensionInfo(f, extension))
}
}
initEnumInfos(f.Enums)
initMessageInfos(f.Messages)
initExtensionInfos(f.Extensions)
walkMessages(f.Messages, func(m *protogen.Message) {
initEnumInfos(m.Enums)
initMessageInfos(m.Messages)
initExtensionInfos(m.Extensions)
})
// Derive a reverse mapping of enum and message pointers to their index
// in allEnums and allMessages.
if len(f.allEnums) > 0 {
f.allEnumsByPtr = make(map[*enumInfo]int)
for i, e := range f.allEnums {
f.allEnumsByPtr[e] = i
}
}
if len(f.allMessages) > 0 {
f.allMessagesByPtr = make(map[*messageInfo]int)
f.allMessageFieldsByPtr = make(map[*messageInfo]*structFields)
for i, m := range f.allMessages {
f.allMessagesByPtr[m] = i
f.allMessageFieldsByPtr[m] = new(structFields)
}
}
return f
}
type enumInfo struct {
*protogen.Enum
genJSONMethod bool
genRawDescMethod bool
}
func newEnumInfo(f *fileInfo, enum *protogen.Enum) *enumInfo {
e := &enumInfo{Enum: enum}
e.genJSONMethod = true
e.genRawDescMethod = true
return e
}
type messageInfo struct {
*protogen.Message
genRawDescMethod bool
genExtRangeMethod bool
isTracked bool
hasWeak bool
}
func newMessageInfo(f *fileInfo, message *protogen.Message) *messageInfo {
m := &messageInfo{Message: message}
m.genRawDescMethod = true
m.genExtRangeMethod = true
m.isTracked = isTrackedMessage(m)
for _, field := range m.Fields {
m.hasWeak = m.hasWeak || field.Desc.IsWeak()
}
return m
}
// isTrackedMessage reports whether field tracking is enabled on the message.
func isTrackedMessage(m *messageInfo) (tracked bool) {
const trackFieldUse_fieldNumber = 37383685
// Decode the option from unknown fields to avoid a dependency on the
// annotation proto from protoc-gen-go.
b := m.Desc.Options().(*descriptorpb.MessageOptions).ProtoReflect().GetUnknown()
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
if num == trackFieldUse_fieldNumber && typ == protowire.VarintType {
v, _ := protowire.ConsumeVarint(b)
tracked = protowire.DecodeBool(v)
}
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
return tracked
}
type extensionInfo struct {
*protogen.Extension
}
func newExtensionInfo(f *fileInfo, extension *protogen.Extension) *extensionInfo {
x := &extensionInfo{Extension: extension}
return x
}

View file

@ -1,884 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package internal_gengo is internal to the protobuf module.
package internal_gengo
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"math"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"google.golang.org/protobuf/compiler/protogen"
"google.golang.org/protobuf/internal/encoding/tag"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/version"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoimpl"
"google.golang.org/protobuf/types/descriptorpb"
"google.golang.org/protobuf/types/pluginpb"
)
// SupportedFeatures reports the set of supported protobuf language features.
var SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL)
// GenerateVersionMarkers specifies whether to generate version markers.
var GenerateVersionMarkers = true
// Standard library dependencies.
const (
base64Package = protogen.GoImportPath("encoding/base64")
mathPackage = protogen.GoImportPath("math")
reflectPackage = protogen.GoImportPath("reflect")
sortPackage = protogen.GoImportPath("sort")
stringsPackage = protogen.GoImportPath("strings")
syncPackage = protogen.GoImportPath("sync")
timePackage = protogen.GoImportPath("time")
utf8Package = protogen.GoImportPath("unicode/utf8")
)
// Protobuf library dependencies.
//
// These are declared as an interface type so that they can be more easily
// patched to support unique build environments that impose restrictions
// on the dependencies of generated source code.
var (
protoPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/proto")
protoifacePackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoiface")
protoimplPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoimpl")
protojsonPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/encoding/protojson")
protoreflectPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoreflect")
protoregistryPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoregistry")
)
type goImportPath interface {
String() string
Ident(string) protogen.GoIdent
}
// GenerateFile generates the contents of a .pb.go file.
func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile {
filename := file.GeneratedFilenamePrefix + ".pb.go"
g := gen.NewGeneratedFile(filename, file.GoImportPath)
f := newFileInfo(file)
genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Syntax_field_number))
genGeneratedHeader(gen, g, f)
genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Package_field_number))
packageDoc := genPackageKnownComment(f)
g.P(packageDoc, "package ", f.GoPackageName)
g.P()
// Emit a static check that enforces a minimum version of the proto package.
if GenerateVersionMarkers {
g.P("const (")
g.P("// Verify that this generated code is sufficiently up-to-date.")
g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimpl.GenVersion, " - ", protoimplPackage.Ident("MinVersion"), ")")
g.P("// Verify that runtime/protoimpl is sufficiently up-to-date.")
g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimplPackage.Ident("MaxVersion"), " - ", protoimpl.GenVersion, ")")
g.P(")")
g.P()
}
for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ {
genImport(gen, g, f, imps.Get(i))
}
for _, enum := range f.allEnums {
genEnum(g, f, enum)
}
for _, message := range f.allMessages {
genMessage(g, f, message)
}
genExtensions(g, f)
genReflectFileDescriptor(gen, g, f)
return g
}
// genStandaloneComments prints all leading comments for a FileDescriptorProto
// location identified by the field number n.
func genStandaloneComments(g *protogen.GeneratedFile, f *fileInfo, n int32) {
loc := f.Desc.SourceLocations().ByPath(protoreflect.SourcePath{n})
for _, s := range loc.LeadingDetachedComments {
g.P(protogen.Comments(s))
g.P()
}
if s := loc.LeadingComments; s != "" {
g.P(protogen.Comments(s))
g.P()
}
}
func genGeneratedHeader(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) {
g.P("// Code generated by protoc-gen-go. DO NOT EDIT.")
if GenerateVersionMarkers {
g.P("// versions:")
protocGenGoVersion := version.String()
protocVersion := "(unknown)"
if v := gen.Request.GetCompilerVersion(); v != nil {
protocVersion = fmt.Sprintf("v%v.%v.%v", v.GetMajor(), v.GetMinor(), v.GetPatch())
if s := v.GetSuffix(); s != "" {
protocVersion += "-" + s
}
}
g.P("// \tprotoc-gen-go ", protocGenGoVersion)
g.P("// \tprotoc ", protocVersion)
}
if f.Proto.GetOptions().GetDeprecated() {
g.P("// ", f.Desc.Path(), " is a deprecated file.")
} else {
g.P("// source: ", f.Desc.Path())
}
g.P()
}
func genImport(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo, imp protoreflect.FileImport) {
impFile, ok := gen.FilesByPath[imp.Path()]
if !ok {
return
}
if impFile.GoImportPath == f.GoImportPath {
// Don't generate imports or aliases for types in the same Go package.
return
}
// Generate imports for all non-weak dependencies, even if they are not
// referenced, because other code and tools depend on having the
// full transitive closure of protocol buffer types in the binary.
if !imp.IsWeak {
g.Import(impFile.GoImportPath)
}
if !imp.IsPublic {
return
}
// Generate public imports by generating the imported file, parsing it,
// and extracting every symbol that should receive a forwarding declaration.
impGen := GenerateFile(gen, impFile)
impGen.Skip()
b, err := impGen.Content()
if err != nil {
gen.Error(err)
return
}
fset := token.NewFileSet()
astFile, err := parser.ParseFile(fset, "", b, parser.ParseComments)
if err != nil {
gen.Error(err)
return
}
genForward := func(tok token.Token, name string, expr ast.Expr) {
// Don't import unexported symbols.
r, _ := utf8.DecodeRuneInString(name)
if !unicode.IsUpper(r) {
return
}
// Don't import the FileDescriptor.
if name == impFile.GoDescriptorIdent.GoName {
return
}
// Don't import decls referencing a symbol defined in another package.
// i.e., don't import decls which are themselves public imports:
//
// type T = somepackage.T
if _, ok := expr.(*ast.SelectorExpr); ok {
return
}
g.P(tok, " ", name, " = ", impFile.GoImportPath.Ident(name))
}
g.P("// Symbols defined in public import of ", imp.Path(), ".")
g.P()
for _, decl := range astFile.Decls {
switch decl := decl.(type) {
case *ast.GenDecl:
for _, spec := range decl.Specs {
switch spec := spec.(type) {
case *ast.TypeSpec:
genForward(decl.Tok, spec.Name.Name, spec.Type)
case *ast.ValueSpec:
for i, name := range spec.Names {
var expr ast.Expr
if i < len(spec.Values) {
expr = spec.Values[i]
}
genForward(decl.Tok, name.Name, expr)
}
case *ast.ImportSpec:
default:
panic(fmt.Sprintf("can't generate forward for spec type %T", spec))
}
}
}
}
g.P()
}
func genEnum(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) {
// Enum type declaration.
g.Annotate(e.GoIdent.GoName, e.Location)
leadingComments := appendDeprecationSuffix(e.Comments.Leading,
e.Desc.Options().(*descriptorpb.EnumOptions).GetDeprecated())
g.P(leadingComments,
"type ", e.GoIdent, " int32")
// Enum value constants.
g.P("const (")
for _, value := range e.Values {
g.Annotate(value.GoIdent.GoName, value.Location)
leadingComments := appendDeprecationSuffix(value.Comments.Leading,
value.Desc.Options().(*descriptorpb.EnumValueOptions).GetDeprecated())
g.P(leadingComments,
value.GoIdent, " ", e.GoIdent, " = ", value.Desc.Number(),
trailingComment(value.Comments.Trailing))
}
g.P(")")
g.P()
// Enum value maps.
g.P("// Enum value maps for ", e.GoIdent, ".")
g.P("var (")
g.P(e.GoIdent.GoName+"_name", " = map[int32]string{")
for _, value := range e.Values {
duplicate := ""
if value.Desc != e.Desc.Values().ByNumber(value.Desc.Number()) {
duplicate = "// Duplicate value: "
}
g.P(duplicate, value.Desc.Number(), ": ", strconv.Quote(string(value.Desc.Name())), ",")
}
g.P("}")
g.P(e.GoIdent.GoName+"_value", " = map[string]int32{")
for _, value := range e.Values {
g.P(strconv.Quote(string(value.Desc.Name())), ": ", value.Desc.Number(), ",")
}
g.P("}")
g.P(")")
g.P()
// Enum method.
//
// NOTE: A pointer value is needed to represent presence in proto2.
// Since a proto2 message can reference a proto3 enum, it is useful to
// always generate this method (even on proto3 enums) to support that case.
g.P("func (x ", e.GoIdent, ") Enum() *", e.GoIdent, " {")
g.P("p := new(", e.GoIdent, ")")
g.P("*p = x")
g.P("return p")
g.P("}")
g.P()
// String method.
g.P("func (x ", e.GoIdent, ") String() string {")
g.P("return ", protoimplPackage.Ident("X"), ".EnumStringOf(x.Descriptor(), ", protoreflectPackage.Ident("EnumNumber"), "(x))")
g.P("}")
g.P()
genEnumReflectMethods(g, f, e)
// UnmarshalJSON method.
if e.genJSONMethod && e.Desc.Syntax() == protoreflect.Proto2 {
g.P("// Deprecated: Do not use.")
g.P("func (x *", e.GoIdent, ") UnmarshalJSON(b []byte) error {")
g.P("num, err := ", protoimplPackage.Ident("X"), ".UnmarshalJSONEnum(x.Descriptor(), b)")
g.P("if err != nil {")
g.P("return err")
g.P("}")
g.P("*x = ", e.GoIdent, "(num)")
g.P("return nil")
g.P("}")
g.P()
}
// EnumDescriptor method.
if e.genRawDescMethod {
var indexes []string
for i := 1; i < len(e.Location.Path); i += 2 {
indexes = append(indexes, strconv.Itoa(int(e.Location.Path[i])))
}
g.P("// Deprecated: Use ", e.GoIdent, ".Descriptor instead.")
g.P("func (", e.GoIdent, ") EnumDescriptor() ([]byte, []int) {")
g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}")
g.P("}")
g.P()
f.needRawDesc = true
}
}
func genMessage(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
if m.Desc.IsMapEntry() {
return
}
// Message type declaration.
g.Annotate(m.GoIdent.GoName, m.Location)
leadingComments := appendDeprecationSuffix(m.Comments.Leading,
m.Desc.Options().(*descriptorpb.MessageOptions).GetDeprecated())
g.P(leadingComments,
"type ", m.GoIdent, " struct {")
genMessageFields(g, f, m)
g.P("}")
g.P()
genMessageKnownFunctions(g, f, m)
genMessageDefaultDecls(g, f, m)
genMessageMethods(g, f, m)
genMessageOneofWrapperTypes(g, f, m)
}
func genMessageFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
sf := f.allMessageFieldsByPtr[m]
genMessageInternalFields(g, f, m, sf)
for _, field := range m.Fields {
genMessageField(g, f, m, field, sf)
}
}
func genMessageInternalFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, sf *structFields) {
g.P(genid.State_goname, " ", protoimplPackage.Ident("MessageState"))
sf.append(genid.State_goname)
g.P(genid.SizeCache_goname, " ", protoimplPackage.Ident("SizeCache"))
sf.append(genid.SizeCache_goname)
if m.hasWeak {
g.P(genid.WeakFields_goname, " ", protoimplPackage.Ident("WeakFields"))
sf.append(genid.WeakFields_goname)
}
g.P(genid.UnknownFields_goname, " ", protoimplPackage.Ident("UnknownFields"))
sf.append(genid.UnknownFields_goname)
if m.Desc.ExtensionRanges().Len() > 0 {
g.P(genid.ExtensionFields_goname, " ", protoimplPackage.Ident("ExtensionFields"))
sf.append(genid.ExtensionFields_goname)
}
if sf.count > 0 {
g.P()
}
}
func genMessageField(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field, sf *structFields) {
if oneof := field.Oneof; oneof != nil && !oneof.Desc.IsSynthetic() {
// It would be a bit simpler to iterate over the oneofs below,
// but generating the field here keeps the contents of the Go
// struct in the same order as the contents of the source
// .proto file.
if oneof.Fields[0] != field {
return // only generate for first appearance
}
tags := structTags{
{"protobuf_oneof", string(oneof.Desc.Name())},
}
if m.isTracked {
tags = append(tags, gotrackTags...)
}
g.Annotate(m.GoIdent.GoName+"."+oneof.GoName, oneof.Location)
leadingComments := oneof.Comments.Leading
if leadingComments != "" {
leadingComments += "\n"
}
ss := []string{fmt.Sprintf(" Types that are assignable to %s:\n", oneof.GoName)}
for _, field := range oneof.Fields {
ss = append(ss, "\t*"+field.GoIdent.GoName+"\n")
}
leadingComments += protogen.Comments(strings.Join(ss, ""))
g.P(leadingComments,
oneof.GoName, " ", oneofInterfaceName(oneof), tags)
sf.append(oneof.GoName)
return
}
goType, pointer := fieldGoType(g, f, field)
if pointer {
goType = "*" + goType
}
tags := structTags{
{"protobuf", fieldProtobufTagValue(field)},
{"json", fieldJSONTagValue(field)},
}
if field.Desc.IsMap() {
key := field.Message.Fields[0]
val := field.Message.Fields[1]
tags = append(tags, structTags{
{"protobuf_key", fieldProtobufTagValue(key)},
{"protobuf_val", fieldProtobufTagValue(val)},
}...)
}
if m.isTracked {
tags = append(tags, gotrackTags...)
}
name := field.GoName
if field.Desc.IsWeak() {
name = genid.WeakFieldPrefix_goname + name
}
g.Annotate(m.GoIdent.GoName+"."+name, field.Location)
leadingComments := appendDeprecationSuffix(field.Comments.Leading,
field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
g.P(leadingComments,
name, " ", goType, tags,
trailingComment(field.Comments.Trailing))
sf.append(field.GoName)
}
// genMessageDefaultDecls generates consts and vars holding the default
// values of fields.
func genMessageDefaultDecls(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
var consts, vars []string
for _, field := range m.Fields {
if !field.Desc.HasDefault() {
continue
}
name := "Default_" + m.GoIdent.GoName + "_" + field.GoName
goType, _ := fieldGoType(g, f, field)
defVal := field.Desc.Default()
switch field.Desc.Kind() {
case protoreflect.StringKind:
consts = append(consts, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.String()))
case protoreflect.BytesKind:
vars = append(vars, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.Bytes()))
case protoreflect.EnumKind:
idx := field.Desc.DefaultEnumValue().Index()
val := field.Enum.Values[idx]
if val.GoIdent.GoImportPath == f.GoImportPath {
consts = append(consts, fmt.Sprintf("%s = %s", name, g.QualifiedGoIdent(val.GoIdent)))
} else {
// If the enum value is declared in a different Go package,
// reference it by number since the name may not be correct.
// See https://github.com/golang/protobuf/issues/513.
consts = append(consts, fmt.Sprintf("%s = %s(%d) // %s",
name, g.QualifiedGoIdent(field.Enum.GoIdent), val.Desc.Number(), g.QualifiedGoIdent(val.GoIdent)))
}
case protoreflect.FloatKind, protoreflect.DoubleKind:
if f := defVal.Float(); math.IsNaN(f) || math.IsInf(f, 0) {
var fn, arg string
switch f := defVal.Float(); {
case math.IsInf(f, -1):
fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "-1"
case math.IsInf(f, +1):
fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "+1"
case math.IsNaN(f):
fn, arg = g.QualifiedGoIdent(mathPackage.Ident("NaN")), ""
}
vars = append(vars, fmt.Sprintf("%s = %s(%s(%s))", name, goType, fn, arg))
} else {
consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, f))
}
default:
consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, defVal.Interface()))
}
}
if len(consts) > 0 {
g.P("// Default values for ", m.GoIdent, " fields.")
g.P("const (")
for _, s := range consts {
g.P(s)
}
g.P(")")
}
if len(vars) > 0 {
g.P("// Default values for ", m.GoIdent, " fields.")
g.P("var (")
for _, s := range vars {
g.P(s)
}
g.P(")")
}
g.P()
}
func genMessageMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
genMessageBaseMethods(g, f, m)
genMessageGetterMethods(g, f, m)
genMessageSetterMethods(g, f, m)
}
func genMessageBaseMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
// Reset method.
g.P("func (x *", m.GoIdent, ") Reset() {")
g.P("*x = ", m.GoIdent, "{}")
g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " {")
g.P("mi := &", messageTypesVarName(f), "[", f.allMessagesByPtr[m], "]")
g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))")
g.P("ms.StoreMessageInfo(mi)")
g.P("}")
g.P("}")
g.P()
// String method.
g.P("func (x *", m.GoIdent, ") String() string {")
g.P("return ", protoimplPackage.Ident("X"), ".MessageStringOf(x)")
g.P("}")
g.P()
// ProtoMessage method.
g.P("func (*", m.GoIdent, ") ProtoMessage() {}")
g.P()
// ProtoReflect method.
genMessageReflectMethods(g, f, m)
// Descriptor method.
if m.genRawDescMethod {
var indexes []string
for i := 1; i < len(m.Location.Path); i += 2 {
indexes = append(indexes, strconv.Itoa(int(m.Location.Path[i])))
}
g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor instead.")
g.P("func (*", m.GoIdent, ") Descriptor() ([]byte, []int) {")
g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}")
g.P("}")
g.P()
f.needRawDesc = true
}
}
func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
for _, field := range m.Fields {
genNoInterfacePragma(g, m.isTracked)
// Getter for parent oneof.
if oneof := field.Oneof; oneof != nil && oneof.Fields[0] == field && !oneof.Desc.IsSynthetic() {
g.Annotate(m.GoIdent.GoName+".Get"+oneof.GoName, oneof.Location)
g.P("func (m *", m.GoIdent.GoName, ") Get", oneof.GoName, "() ", oneofInterfaceName(oneof), " {")
g.P("if m != nil {")
g.P("return m.", oneof.GoName)
g.P("}")
g.P("return nil")
g.P("}")
g.P()
}
// Getter for message field.
goType, pointer := fieldGoType(g, f, field)
defaultValue := fieldDefaultValue(g, f, m, field)
g.Annotate(m.GoIdent.GoName+".Get"+field.GoName, field.Location)
leadingComments := appendDeprecationSuffix("",
field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
switch {
case field.Desc.IsWeak():
g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", protoPackage.Ident("Message"), "{")
g.P("var w ", protoimplPackage.Ident("WeakFields"))
g.P("if x != nil {")
g.P("w = x.", genid.WeakFields_goname)
if m.isTracked {
g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName)
}
g.P("}")
g.P("return ", protoimplPackage.Ident("X"), ".GetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ")")
g.P("}")
case field.Oneof != nil && !field.Oneof.Desc.IsSynthetic():
g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {")
g.P("if x, ok := x.Get", field.Oneof.GoName, "().(*", field.GoIdent, "); ok {")
g.P("return x.", field.GoName)
g.P("}")
g.P("return ", defaultValue)
g.P("}")
default:
g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {")
if !field.Desc.HasPresence() || defaultValue == "nil" {
g.P("if x != nil {")
} else {
g.P("if x != nil && x.", field.GoName, " != nil {")
}
star := ""
if pointer {
star = "*"
}
g.P("return ", star, " x.", field.GoName)
g.P("}")
g.P("return ", defaultValue)
g.P("}")
}
g.P()
}
}
func genMessageSetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
for _, field := range m.Fields {
if !field.Desc.IsWeak() {
continue
}
genNoInterfacePragma(g, m.isTracked)
g.Annotate(m.GoIdent.GoName+".Set"+field.GoName, field.Location)
leadingComments := appendDeprecationSuffix("",
field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
g.P(leadingComments, "func (x *", m.GoIdent, ") Set", field.GoName, "(v ", protoPackage.Ident("Message"), ") {")
g.P("var w *", protoimplPackage.Ident("WeakFields"))
g.P("if x != nil {")
g.P("w = &x.", genid.WeakFields_goname)
if m.isTracked {
g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName)
}
g.P("}")
g.P(protoimplPackage.Ident("X"), ".SetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ", v)")
g.P("}")
g.P()
}
}
// fieldGoType returns the Go type used for a field.
//
// If it returns pointer=true, the struct field is a pointer to the type.
func fieldGoType(g *protogen.GeneratedFile, f *fileInfo, field *protogen.Field) (goType string, pointer bool) {
if field.Desc.IsWeak() {
return "struct{}", false
}
pointer = field.Desc.HasPresence()
switch field.Desc.Kind() {
case protoreflect.BoolKind:
goType = "bool"
case protoreflect.EnumKind:
goType = g.QualifiedGoIdent(field.Enum.GoIdent)
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
goType = "int32"
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
goType = "uint32"
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
goType = "int64"
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
goType = "uint64"
case protoreflect.FloatKind:
goType = "float32"
case protoreflect.DoubleKind:
goType = "float64"
case protoreflect.StringKind:
goType = "string"
case protoreflect.BytesKind:
goType = "[]byte"
pointer = false // rely on nullability of slices for presence
case protoreflect.MessageKind, protoreflect.GroupKind:
goType = "*" + g.QualifiedGoIdent(field.Message.GoIdent)
pointer = false // pointer captured as part of the type
}
switch {
case field.Desc.IsList():
return "[]" + goType, false
case field.Desc.IsMap():
keyType, _ := fieldGoType(g, f, field.Message.Fields[0])
valType, _ := fieldGoType(g, f, field.Message.Fields[1])
return fmt.Sprintf("map[%v]%v", keyType, valType), false
}
return goType, pointer
}
func fieldProtobufTagValue(field *protogen.Field) string {
var enumName string
if field.Desc.Kind() == protoreflect.EnumKind {
enumName = protoimpl.X.LegacyEnumName(field.Enum.Desc)
}
return tag.Marshal(field.Desc, enumName)
}
func fieldDefaultValue(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field) string {
if field.Desc.IsList() {
return "nil"
}
if field.Desc.HasDefault() {
defVarName := "Default_" + m.GoIdent.GoName + "_" + field.GoName
if field.Desc.Kind() == protoreflect.BytesKind {
return "append([]byte(nil), " + defVarName + "...)"
}
return defVarName
}
switch field.Desc.Kind() {
case protoreflect.BoolKind:
return "false"
case protoreflect.StringKind:
return `""`
case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.BytesKind:
return "nil"
case protoreflect.EnumKind:
val := field.Enum.Values[0]
if val.GoIdent.GoImportPath == f.GoImportPath {
return g.QualifiedGoIdent(val.GoIdent)
} else {
// If the enum value is declared in a different Go package,
// reference it by number since the name may not be correct.
// See https://github.com/golang/protobuf/issues/513.
return g.QualifiedGoIdent(field.Enum.GoIdent) + "(" + strconv.FormatInt(int64(val.Desc.Number()), 10) + ")"
}
default:
return "0"
}
}
func fieldJSONTagValue(field *protogen.Field) string {
return string(field.Desc.Name()) + ",omitempty"
}
func genExtensions(g *protogen.GeneratedFile, f *fileInfo) {
if len(f.allExtensions) == 0 {
return
}
g.P("var ", extensionTypesVarName(f), " = []", protoimplPackage.Ident("ExtensionInfo"), "{")
for _, x := range f.allExtensions {
g.P("{")
g.P("ExtendedType: (*", x.Extendee.GoIdent, ")(nil),")
goType, pointer := fieldGoType(g, f, x.Extension)
if pointer {
goType = "*" + goType
}
g.P("ExtensionType: (", goType, ")(nil),")
g.P("Field: ", x.Desc.Number(), ",")
g.P("Name: ", strconv.Quote(string(x.Desc.FullName())), ",")
g.P("Tag: ", strconv.Quote(fieldProtobufTagValue(x.Extension)), ",")
g.P("Filename: ", strconv.Quote(f.Desc.Path()), ",")
g.P("},")
}
g.P("}")
g.P()
// Group extensions by the target message.
var orderedTargets []protogen.GoIdent
allExtensionsByTarget := make(map[protogen.GoIdent][]*extensionInfo)
allExtensionsByPtr := make(map[*extensionInfo]int)
for i, x := range f.allExtensions {
target := x.Extendee.GoIdent
if len(allExtensionsByTarget[target]) == 0 {
orderedTargets = append(orderedTargets, target)
}
allExtensionsByTarget[target] = append(allExtensionsByTarget[target], x)
allExtensionsByPtr[x] = i
}
for _, target := range orderedTargets {
g.P("// Extension fields to ", target, ".")
g.P("var (")
for _, x := range allExtensionsByTarget[target] {
xd := x.Desc
typeName := xd.Kind().String()
switch xd.Kind() {
case protoreflect.EnumKind:
typeName = string(xd.Enum().FullName())
case protoreflect.MessageKind, protoreflect.GroupKind:
typeName = string(xd.Message().FullName())
}
fieldName := string(xd.Name())
leadingComments := x.Comments.Leading
if leadingComments != "" {
leadingComments += "\n"
}
leadingComments += protogen.Comments(fmt.Sprintf(" %v %v %v = %v;\n",
xd.Cardinality(), typeName, fieldName, xd.Number()))
leadingComments = appendDeprecationSuffix(leadingComments,
x.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
g.P(leadingComments,
"E_", x.GoIdent, " = &", extensionTypesVarName(f), "[", allExtensionsByPtr[x], "]",
trailingComment(x.Comments.Trailing))
}
g.P(")")
g.P()
}
}
// genMessageOneofWrapperTypes generates the oneof wrapper types and
// associates the types with the parent message type.
func genMessageOneofWrapperTypes(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
for _, oneof := range m.Oneofs {
if oneof.Desc.IsSynthetic() {
continue
}
ifName := oneofInterfaceName(oneof)
g.P("type ", ifName, " interface {")
g.P(ifName, "()")
g.P("}")
g.P()
for _, field := range oneof.Fields {
g.Annotate(field.GoIdent.GoName, field.Location)
g.Annotate(field.GoIdent.GoName+"."+field.GoName, field.Location)
g.P("type ", field.GoIdent, " struct {")
goType, _ := fieldGoType(g, f, field)
tags := structTags{
{"protobuf", fieldProtobufTagValue(field)},
}
if m.isTracked {
tags = append(tags, gotrackTags...)
}
leadingComments := appendDeprecationSuffix(field.Comments.Leading,
field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
g.P(leadingComments,
field.GoName, " ", goType, tags,
trailingComment(field.Comments.Trailing))
g.P("}")
g.P()
}
for _, field := range oneof.Fields {
g.P("func (*", field.GoIdent, ") ", ifName, "() {}")
g.P()
}
}
}
// oneofInterfaceName returns the name of the interface type implemented by
// the oneof field value types.
func oneofInterfaceName(oneof *protogen.Oneof) string {
return "is" + oneof.GoIdent.GoName
}
// genNoInterfacePragma generates a standalone "nointerface" pragma to
// decorate methods with field-tracking support.
func genNoInterfacePragma(g *protogen.GeneratedFile, tracked bool) {
if tracked {
g.P("//go:nointerface")
g.P()
}
}
var gotrackTags = structTags{{"go", "track"}}
// structTags is a data structure for build idiomatic Go struct tags.
// Each [2]string is a key-value pair, where value is the unescaped string.
//
// Example: structTags{{"key", "value"}}.String() -> `key:"value"`
type structTags [][2]string
func (tags structTags) String() string {
if len(tags) == 0 {
return ""
}
var ss []string
for _, tag := range tags {
// NOTE: When quoting the value, we need to make sure the backtick
// character does not appear. Convert all cases to the escaped hex form.
key := tag[0]
val := strings.Replace(strconv.Quote(tag[1]), "`", `\x60`, -1)
ss = append(ss, fmt.Sprintf("%s:%s", key, val))
}
return "`" + strings.Join(ss, " ") + "`"
}
// appendDeprecationSuffix optionally appends a deprecation notice as a suffix.
func appendDeprecationSuffix(prefix protogen.Comments, deprecated bool) protogen.Comments {
if !deprecated {
return prefix
}
if prefix != "" {
prefix += "\n"
}
return prefix + " Deprecated: Do not use.\n"
}
// trailingComment is like protogen.Comments, but lacks a trailing newline.
type trailingComment protogen.Comments
func (c trailingComment) String() string {
s := strings.TrimSuffix(protogen.Comments(c).String(), "\n")
if strings.Contains(s, "\n") {
// We don't support multi-lined trailing comments as it is unclear
// how to best render them in the generated code.
return ""
}
return s
}

View file

@ -1,351 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal_gengo
import (
"fmt"
"math"
"strings"
"unicode/utf8"
"google.golang.org/protobuf/compiler/protogen"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
)
func genReflectFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) {
g.P("var ", f.GoDescriptorIdent, " ", protoreflectPackage.Ident("FileDescriptor"))
g.P()
genFileDescriptor(gen, g, f)
if len(f.allEnums) > 0 {
g.P("var ", enumTypesVarName(f), " = make([]", protoimplPackage.Ident("EnumInfo"), ",", len(f.allEnums), ")")
}
if len(f.allMessages) > 0 {
g.P("var ", messageTypesVarName(f), " = make([]", protoimplPackage.Ident("MessageInfo"), ",", len(f.allMessages), ")")
}
// Generate a unique list of Go types for all declarations and dependencies,
// and the associated index into the type list for all dependencies.
var goTypes []string
var depIdxs []string
seen := map[protoreflect.FullName]int{}
genDep := func(name protoreflect.FullName, depSource string) {
if depSource != "" {
line := fmt.Sprintf("%d, // %d: %s -> %s", seen[name], len(depIdxs), depSource, name)
depIdxs = append(depIdxs, line)
}
}
genEnum := func(e *protogen.Enum, depSource string) {
if e != nil {
name := e.Desc.FullName()
if _, ok := seen[name]; !ok {
line := fmt.Sprintf("(%s)(0), // %d: %s", g.QualifiedGoIdent(e.GoIdent), len(goTypes), name)
goTypes = append(goTypes, line)
seen[name] = len(seen)
}
if depSource != "" {
genDep(name, depSource)
}
}
}
genMessage := func(m *protogen.Message, depSource string) {
if m != nil {
name := m.Desc.FullName()
if _, ok := seen[name]; !ok {
line := fmt.Sprintf("(*%s)(nil), // %d: %s", g.QualifiedGoIdent(m.GoIdent), len(goTypes), name)
if m.Desc.IsMapEntry() {
// Map entry messages have no associated Go type.
line = fmt.Sprintf("nil, // %d: %s", len(goTypes), name)
}
goTypes = append(goTypes, line)
seen[name] = len(seen)
}
if depSource != "" {
genDep(name, depSource)
}
}
}
// This ordering is significant.
// See filetype.TypeBuilder.DependencyIndexes.
type offsetEntry struct {
start int
name string
}
var depOffsets []offsetEntry
for _, enum := range f.allEnums {
genEnum(enum.Enum, "")
}
for _, message := range f.allMessages {
genMessage(message.Message, "")
}
depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "field type_name"})
for _, message := range f.allMessages {
for _, field := range message.Fields {
if field.Desc.IsWeak() {
continue
}
source := string(field.Desc.FullName())
genEnum(field.Enum, source+":type_name")
genMessage(field.Message, source+":type_name")
}
}
depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension extendee"})
for _, extension := range f.allExtensions {
source := string(extension.Desc.FullName())
genMessage(extension.Extendee, source+":extendee")
}
depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension type_name"})
for _, extension := range f.allExtensions {
source := string(extension.Desc.FullName())
genEnum(extension.Enum, source+":type_name")
genMessage(extension.Message, source+":type_name")
}
depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method input_type"})
for _, service := range f.Services {
for _, method := range service.Methods {
source := string(method.Desc.FullName())
genMessage(method.Input, source+":input_type")
}
}
depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method output_type"})
for _, service := range f.Services {
for _, method := range service.Methods {
source := string(method.Desc.FullName())
genMessage(method.Output, source+":output_type")
}
}
depOffsets = append(depOffsets, offsetEntry{len(depIdxs), ""})
for i := len(depOffsets) - 2; i >= 0; i-- {
curr, next := depOffsets[i], depOffsets[i+1]
depIdxs = append(depIdxs, fmt.Sprintf("%d, // [%d:%d] is the sub-list for %s",
curr.start, curr.start, next.start, curr.name))
}
if len(depIdxs) > math.MaxInt32 {
panic("too many dependencies") // sanity check
}
g.P("var ", goTypesVarName(f), " = []interface{}{")
for _, s := range goTypes {
g.P(s)
}
g.P("}")
g.P("var ", depIdxsVarName(f), " = []int32{")
for _, s := range depIdxs {
g.P(s)
}
g.P("}")
g.P("func init() { ", initFuncName(f.File), "() }")
g.P("func ", initFuncName(f.File), "() {")
g.P("if ", f.GoDescriptorIdent, " != nil {")
g.P("return")
g.P("}")
// Ensure that initialization functions for different files in the same Go
// package run in the correct order: Call the init funcs for every .proto file
// imported by this one that is in the same Go package.
for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ {
impFile := gen.FilesByPath[imps.Get(i).Path()]
if impFile.GoImportPath != f.GoImportPath {
continue
}
g.P(initFuncName(impFile), "()")
}
if len(f.allMessages) > 0 {
// Populate MessageInfo.Exporters.
g.P("if !", protoimplPackage.Ident("UnsafeEnabled"), " {")
for _, message := range f.allMessages {
if sf := f.allMessageFieldsByPtr[message]; len(sf.unexported) > 0 {
idx := f.allMessagesByPtr[message]
typesVar := messageTypesVarName(f)
g.P(typesVar, "[", idx, "].Exporter = func(v interface{}, i int) interface{} {")
g.P("switch v := v.(*", message.GoIdent, "); i {")
for i := 0; i < sf.count; i++ {
if name := sf.unexported[i]; name != "" {
g.P("case ", i, ": return &v.", name)
}
}
g.P("default: return nil")
g.P("}")
g.P("}")
}
}
g.P("}")
// Populate MessageInfo.OneofWrappers.
for _, message := range f.allMessages {
if len(message.Oneofs) > 0 {
idx := f.allMessagesByPtr[message]
typesVar := messageTypesVarName(f)
// Associate the wrapper types by directly passing them to the MessageInfo.
g.P(typesVar, "[", idx, "].OneofWrappers = []interface{} {")
for _, oneof := range message.Oneofs {
if !oneof.Desc.IsSynthetic() {
for _, field := range oneof.Fields {
g.P("(*", field.GoIdent, ")(nil),")
}
}
}
g.P("}")
}
}
}
g.P("type x struct{}")
g.P("out := ", protoimplPackage.Ident("TypeBuilder"), "{")
g.P("File: ", protoimplPackage.Ident("DescBuilder"), "{")
g.P("GoPackagePath: ", reflectPackage.Ident("TypeOf"), "(x{}).PkgPath(),")
g.P("RawDescriptor: ", rawDescVarName(f), ",")
g.P("NumEnums: ", len(f.allEnums), ",")
g.P("NumMessages: ", len(f.allMessages), ",")
g.P("NumExtensions: ", len(f.allExtensions), ",")
g.P("NumServices: ", len(f.Services), ",")
g.P("},")
g.P("GoTypes: ", goTypesVarName(f), ",")
g.P("DependencyIndexes: ", depIdxsVarName(f), ",")
if len(f.allEnums) > 0 {
g.P("EnumInfos: ", enumTypesVarName(f), ",")
}
if len(f.allMessages) > 0 {
g.P("MessageInfos: ", messageTypesVarName(f), ",")
}
if len(f.allExtensions) > 0 {
g.P("ExtensionInfos: ", extensionTypesVarName(f), ",")
}
g.P("}.Build()")
g.P(f.GoDescriptorIdent, " = out.File")
// Set inputs to nil to allow GC to reclaim resources.
g.P(rawDescVarName(f), " = nil")
g.P(goTypesVarName(f), " = nil")
g.P(depIdxsVarName(f), " = nil")
g.P("}")
}
func genFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) {
descProto := proto.Clone(f.Proto).(*descriptorpb.FileDescriptorProto)
descProto.SourceCodeInfo = nil // drop source code information
b, err := proto.MarshalOptions{AllowPartial: true, Deterministic: true}.Marshal(descProto)
if err != nil {
gen.Error(err)
return
}
g.P("var ", rawDescVarName(f), " = []byte{")
for len(b) > 0 {
n := 16
if n > len(b) {
n = len(b)
}
s := ""
for _, c := range b[:n] {
s += fmt.Sprintf("0x%02x,", c)
}
g.P(s)
b = b[n:]
}
g.P("}")
g.P()
if f.needRawDesc {
onceVar := rawDescVarName(f) + "Once"
dataVar := rawDescVarName(f) + "Data"
g.P("var (")
g.P(onceVar, " ", syncPackage.Ident("Once"))
g.P(dataVar, " = ", rawDescVarName(f))
g.P(")")
g.P()
g.P("func ", rawDescVarName(f), "GZIP() []byte {")
g.P(onceVar, ".Do(func() {")
g.P(dataVar, " = ", protoimplPackage.Ident("X"), ".CompressGZIP(", dataVar, ")")
g.P("})")
g.P("return ", dataVar)
g.P("}")
g.P()
}
}
func genEnumReflectMethods(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) {
idx := f.allEnumsByPtr[e]
typesVar := enumTypesVarName(f)
// Descriptor method.
g.P("func (", e.GoIdent, ") Descriptor() ", protoreflectPackage.Ident("EnumDescriptor"), " {")
g.P("return ", typesVar, "[", idx, "].Descriptor()")
g.P("}")
g.P()
// Type method.
g.P("func (", e.GoIdent, ") Type() ", protoreflectPackage.Ident("EnumType"), " {")
g.P("return &", typesVar, "[", idx, "]")
g.P("}")
g.P()
// Number method.
g.P("func (x ", e.GoIdent, ") Number() ", protoreflectPackage.Ident("EnumNumber"), " {")
g.P("return ", protoreflectPackage.Ident("EnumNumber"), "(x)")
g.P("}")
g.P()
}
func genMessageReflectMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
idx := f.allMessagesByPtr[m]
typesVar := messageTypesVarName(f)
// ProtoReflect method.
g.P("func (x *", m.GoIdent, ") ProtoReflect() ", protoreflectPackage.Ident("Message"), " {")
g.P("mi := &", typesVar, "[", idx, "]")
g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " && x != nil {")
g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))")
g.P("if ms.LoadMessageInfo() == nil {")
g.P("ms.StoreMessageInfo(mi)")
g.P("}")
g.P("return ms")
g.P("}")
g.P("return mi.MessageOf(x)")
g.P("}")
g.P()
}
func fileVarName(f *protogen.File, suffix string) string {
prefix := f.GoDescriptorIdent.GoName
_, n := utf8.DecodeRuneInString(prefix)
prefix = strings.ToLower(prefix[:n]) + prefix[n:]
return prefix + "_" + suffix
}
func rawDescVarName(f *fileInfo) string {
return fileVarName(f.File, "rawDesc")
}
func goTypesVarName(f *fileInfo) string {
return fileVarName(f.File, "goTypes")
}
func depIdxsVarName(f *fileInfo) string {
return fileVarName(f.File, "depIdxs")
}
func enumTypesVarName(f *fileInfo) string {
return fileVarName(f.File, "enumTypes")
}
func messageTypesVarName(f *fileInfo) string {
return fileVarName(f.File, "msgTypes")
}
func extensionTypesVarName(f *fileInfo) string {
return fileVarName(f.File, "extTypes")
}
func initFuncName(f *protogen.File) string {
return fileVarName(f, "init")
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,653 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: kenton@google.com (Kenton Varda)
//
// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
// change.
//
// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
// just a program that reads a CodeGeneratorRequest from stdin and writes a
// CodeGeneratorResponse to stdout.
//
// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
// of dealing with the raw protocol defined here.
//
// A plugin executable needs only to be placed somewhere in the path. The
// plugin should be named "protoc-gen-$NAME", and will then be used when the
// flag "--${NAME}_out" is passed to protoc.
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/compiler/plugin.proto
package pluginpb
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
reflect "reflect"
sync "sync"
)
// Sync with code_generator.h.
type CodeGeneratorResponse_Feature int32
const (
CodeGeneratorResponse_FEATURE_NONE CodeGeneratorResponse_Feature = 0
CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL CodeGeneratorResponse_Feature = 1
)
// Enum value maps for CodeGeneratorResponse_Feature.
var (
CodeGeneratorResponse_Feature_name = map[int32]string{
0: "FEATURE_NONE",
1: "FEATURE_PROTO3_OPTIONAL",
}
CodeGeneratorResponse_Feature_value = map[string]int32{
"FEATURE_NONE": 0,
"FEATURE_PROTO3_OPTIONAL": 1,
}
)
func (x CodeGeneratorResponse_Feature) Enum() *CodeGeneratorResponse_Feature {
p := new(CodeGeneratorResponse_Feature)
*p = x
return p
}
func (x CodeGeneratorResponse_Feature) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (CodeGeneratorResponse_Feature) Descriptor() protoreflect.EnumDescriptor {
return file_google_protobuf_compiler_plugin_proto_enumTypes[0].Descriptor()
}
func (CodeGeneratorResponse_Feature) Type() protoreflect.EnumType {
return &file_google_protobuf_compiler_plugin_proto_enumTypes[0]
}
func (x CodeGeneratorResponse_Feature) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Do not use.
func (x *CodeGeneratorResponse_Feature) UnmarshalJSON(b []byte) error {
num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
if err != nil {
return err
}
*x = CodeGeneratorResponse_Feature(num)
return nil
}
// Deprecated: Use CodeGeneratorResponse_Feature.Descriptor instead.
func (CodeGeneratorResponse_Feature) EnumDescriptor() ([]byte, []int) {
return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0}
}
// The version number of protocol compiler.
type Version struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
// be empty for mainline stable releases.
Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
}
func (x *Version) Reset() {
*x = Version{}
if protoimpl.UnsafeEnabled {
mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Version) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Version) ProtoMessage() {}
func (x *Version) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Version.ProtoReflect.Descriptor instead.
func (*Version) Descriptor() ([]byte, []int) {
return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{0}
}
func (x *Version) GetMajor() int32 {
if x != nil && x.Major != nil {
return *x.Major
}
return 0
}
func (x *Version) GetMinor() int32 {
if x != nil && x.Minor != nil {
return *x.Minor
}
return 0
}
func (x *Version) GetPatch() int32 {
if x != nil && x.Patch != nil {
return *x.Patch
}
return 0
}
func (x *Version) GetSuffix() string {
if x != nil && x.Suffix != nil {
return *x.Suffix
}
return ""
}
// An encoded CodeGeneratorRequest is written to the plugin's stdin.
type CodeGeneratorRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The .proto files that were explicitly listed on the command-line. The
// code generator should generate code only for these files. Each file's
// descriptor will be included in proto_file, below.
FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
// The generator parameter passed on the command-line.
Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
// FileDescriptorProtos for all files in files_to_generate and everything
// they import. The files will appear in topological order, so each file
// appears before any file that imports it.
//
// protoc guarantees that all proto_files will be written after
// the fields above, even though this is not technically guaranteed by the
// protobuf wire format. This theoretically could allow a plugin to stream
// in the FileDescriptorProtos and handle them one by one rather than read
// the entire set into memory at once. However, as of this writing, this
// is not similarly optimized on protoc's end -- it will store all fields in
// memory at once before sending them to the plugin.
//
// Type names of fields and extensions in the FileDescriptorProto are always
// fully qualified.
ProtoFile []*descriptorpb.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
// The version number of protocol compiler.
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
}
func (x *CodeGeneratorRequest) Reset() {
*x = CodeGeneratorRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CodeGeneratorRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CodeGeneratorRequest) ProtoMessage() {}
func (x *CodeGeneratorRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CodeGeneratorRequest.ProtoReflect.Descriptor instead.
func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) {
return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{1}
}
func (x *CodeGeneratorRequest) GetFileToGenerate() []string {
if x != nil {
return x.FileToGenerate
}
return nil
}
func (x *CodeGeneratorRequest) GetParameter() string {
if x != nil && x.Parameter != nil {
return *x.Parameter
}
return ""
}
func (x *CodeGeneratorRequest) GetProtoFile() []*descriptorpb.FileDescriptorProto {
if x != nil {
return x.ProtoFile
}
return nil
}
func (x *CodeGeneratorRequest) GetCompilerVersion() *Version {
if x != nil {
return x.CompilerVersion
}
return nil
}
// The plugin writes an encoded CodeGeneratorResponse to stdout.
type CodeGeneratorResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Error message. If non-empty, code generation failed. The plugin process
// should exit with status code zero even if it reports an error in this way.
//
// This should be used to indicate errors in .proto files which prevent the
// code generator from generating correct code. Errors which indicate a
// problem in protoc itself -- such as the input CodeGeneratorRequest being
// unparseable -- should be reported by writing a message to stderr and
// exiting with a non-zero status code.
Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
// A bitmask of supported features that the code generator supports.
// This is a bitwise "or" of values from the Feature enum.
SupportedFeatures *uint64 `protobuf:"varint,2,opt,name=supported_features,json=supportedFeatures" json:"supported_features,omitempty"`
File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
}
func (x *CodeGeneratorResponse) Reset() {
*x = CodeGeneratorResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CodeGeneratorResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CodeGeneratorResponse) ProtoMessage() {}
func (x *CodeGeneratorResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CodeGeneratorResponse.ProtoReflect.Descriptor instead.
func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) {
return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2}
}
func (x *CodeGeneratorResponse) GetError() string {
if x != nil && x.Error != nil {
return *x.Error
}
return ""
}
func (x *CodeGeneratorResponse) GetSupportedFeatures() uint64 {
if x != nil && x.SupportedFeatures != nil {
return *x.SupportedFeatures
}
return 0
}
func (x *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
if x != nil {
return x.File
}
return nil
}
// Represents a single generated file.
type CodeGeneratorResponse_File struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The file name, relative to the output directory. The name must not
// contain "." or ".." components and must be relative, not be absolute (so,
// the file cannot lie outside the output directory). "/" must be used as
// the path separator, not "\".
//
// If the name is omitted, the content will be appended to the previous
// file. This allows the generator to break large files into small chunks,
// and allows the generated text to be streamed back to protoc so that large
// files need not reside completely in memory at one time. Note that as of
// this writing protoc does not optimize for this -- it will read the entire
// CodeGeneratorResponse before writing files to disk.
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// If non-empty, indicates that the named file should already exist, and the
// content here is to be inserted into that file at a defined insertion
// point. This feature allows a code generator to extend the output
// produced by another code generator. The original generator may provide
// insertion points by placing special annotations in the file that look
// like:
// @@protoc_insertion_point(NAME)
// The annotation can have arbitrary text before and after it on the line,
// which allows it to be placed in a comment. NAME should be replaced with
// an identifier naming the point -- this is what other generators will use
// as the insertion_point. Code inserted at this point will be placed
// immediately above the line containing the insertion point (thus multiple
// insertions to the same point will come out in the order they were added).
// The double-@ is intended to make it unlikely that the generated code
// could contain things that look like insertion points by accident.
//
// For example, the C++ code generator places the following line in the
// .pb.h files that it generates:
// // @@protoc_insertion_point(namespace_scope)
// This line appears within the scope of the file's package namespace, but
// outside of any particular class. Another plugin can then specify the
// insertion_point "namespace_scope" to generate additional classes or
// other declarations that should be placed in this scope.
//
// Note that if the line containing the insertion point begins with
// whitespace, the same whitespace will be added to every line of the
// inserted text. This is useful for languages like Python, where
// indentation matters. In these languages, the insertion point comment
// should be indented the same amount as any inserted code will need to be
// in order to work correctly in that context.
//
// The code generator that generates the initial file and the one which
// inserts into it must both run as part of a single invocation of protoc.
// Code generators are executed in the order in which they appear on the
// command line.
//
// If |insertion_point| is present, |name| must also be present.
InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
// The file contents.
Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
// Information describing the file content being inserted. If an insertion
// point is used, this information will be appropriately offset and inserted
// into the code generation metadata for the generated files.
GeneratedCodeInfo *descriptorpb.GeneratedCodeInfo `protobuf:"bytes,16,opt,name=generated_code_info,json=generatedCodeInfo" json:"generated_code_info,omitempty"`
}
func (x *CodeGeneratorResponse_File) Reset() {
*x = CodeGeneratorResponse_File{}
if protoimpl.UnsafeEnabled {
mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CodeGeneratorResponse_File) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CodeGeneratorResponse_File) ProtoMessage() {}
func (x *CodeGeneratorResponse_File) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CodeGeneratorResponse_File.ProtoReflect.Descriptor instead.
func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) {
return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0}
}
func (x *CodeGeneratorResponse_File) GetName() string {
if x != nil && x.Name != nil {
return *x.Name
}
return ""
}
func (x *CodeGeneratorResponse_File) GetInsertionPoint() string {
if x != nil && x.InsertionPoint != nil {
return *x.InsertionPoint
}
return ""
}
func (x *CodeGeneratorResponse_File) GetContent() string {
if x != nil && x.Content != nil {
return *x.Content
}
return ""
}
func (x *CodeGeneratorResponse_File) GetGeneratedCodeInfo() *descriptorpb.GeneratedCodeInfo {
if x != nil {
return x.GeneratedCodeInfo
}
return nil
}
var File_google_protobuf_compiler_plugin_proto protoreflect.FileDescriptor
var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{
0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69,
0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65,
0x72, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14,
0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d,
0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20,
0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61,
0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68,
0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xf1, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x64,
0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e,
0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c,
0x65, 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70,
0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
0x6f, 0x74, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x4c,
0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69,
0x6c, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d,
0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x03, 0x0a,
0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12,
0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72,
0x74, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x04, 0x66,
0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70,
0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52,
0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0xb1, 0x01, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12,
0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73,
0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63,
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f,
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x52, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
0x65, 0x64, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x10, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61,
0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f,
0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52,
0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41,
0x4c, 0x10, 0x01, 0x42, 0x57, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69,
0x6c, 0x65, 0x72, 0x42, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f,
0x73, 0x5a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x70, 0x62,
}
var (
file_google_protobuf_compiler_plugin_proto_rawDescOnce sync.Once
file_google_protobuf_compiler_plugin_proto_rawDescData = file_google_protobuf_compiler_plugin_proto_rawDesc
)
func file_google_protobuf_compiler_plugin_proto_rawDescGZIP() []byte {
file_google_protobuf_compiler_plugin_proto_rawDescOnce.Do(func() {
file_google_protobuf_compiler_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_compiler_plugin_proto_rawDescData)
})
return file_google_protobuf_compiler_plugin_proto_rawDescData
}
var file_google_protobuf_compiler_plugin_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_google_protobuf_compiler_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_google_protobuf_compiler_plugin_proto_goTypes = []interface{}{
(CodeGeneratorResponse_Feature)(0), // 0: google.protobuf.compiler.CodeGeneratorResponse.Feature
(*Version)(nil), // 1: google.protobuf.compiler.Version
(*CodeGeneratorRequest)(nil), // 2: google.protobuf.compiler.CodeGeneratorRequest
(*CodeGeneratorResponse)(nil), // 3: google.protobuf.compiler.CodeGeneratorResponse
(*CodeGeneratorResponse_File)(nil), // 4: google.protobuf.compiler.CodeGeneratorResponse.File
(*descriptorpb.FileDescriptorProto)(nil), // 5: google.protobuf.FileDescriptorProto
(*descriptorpb.GeneratedCodeInfo)(nil), // 6: google.protobuf.GeneratedCodeInfo
}
var file_google_protobuf_compiler_plugin_proto_depIdxs = []int32{
5, // 0: google.protobuf.compiler.CodeGeneratorRequest.proto_file:type_name -> google.protobuf.FileDescriptorProto
1, // 1: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version
4, // 2: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File
6, // 3: google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info:type_name -> google.protobuf.GeneratedCodeInfo
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_google_protobuf_compiler_plugin_proto_init() }
func file_google_protobuf_compiler_plugin_proto_init() {
if File_google_protobuf_compiler_plugin_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Version); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CodeGeneratorRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CodeGeneratorResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CodeGeneratorResponse_File); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_protobuf_compiler_plugin_proto_rawDesc,
NumEnums: 1,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_google_protobuf_compiler_plugin_proto_goTypes,
DependencyIndexes: file_google_protobuf_compiler_plugin_proto_depIdxs,
EnumInfos: file_google_protobuf_compiler_plugin_proto_enumTypes,
MessageInfos: file_google_protobuf_compiler_plugin_proto_msgTypes,
}.Build()
File_google_protobuf_compiler_plugin_proto = out.File
file_google_protobuf_compiler_plugin_proto_rawDesc = nil
file_google_protobuf_compiler_plugin_proto_goTypes = nil
file_google_protobuf_compiler_plugin_proto_depIdxs = nil
}

37
vendor/modules.txt vendored
View file

@ -1,4 +1,4 @@
# cloud.google.com/go v0.81.0
# cloud.google.com/go v0.92.0
## explicit; go 1.11
cloud.google.com/go
cloud.google.com/go/compute/metadata
@ -400,10 +400,8 @@ github.com/golang/gddo/httputil/header
github.com/golang/groupcache/lru
# github.com/golang/protobuf v1.5.2
## explicit; go 1.9
github.com/golang/protobuf/internal/gengogrpc
github.com/golang/protobuf/jsonpb
github.com/golang/protobuf/proto
github.com/golang/protobuf/protoc-gen-go
github.com/golang/protobuf/protoc-gen-go/descriptor
github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
@ -493,11 +491,6 @@ github.com/ishidawataru/sctp
# github.com/jmespath/go-jmespath v0.3.0
## explicit; go 1.14
github.com/jmespath/go-jmespath
# github.com/jstemmer/go-junit-report v0.9.1
## explicit; go 1.2
github.com/jstemmer/go-junit-report
github.com/jstemmer/go-junit-report/formatter
github.com/jstemmer/go-junit-report/parser
# github.com/klauspost/compress v1.14.3
## explicit; go 1.15
github.com/klauspost/compress
@ -820,14 +813,6 @@ golang.org/x/crypto/pkcs12/internal/rc2
golang.org/x/crypto/salsa20/salsa
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
# golang.org/x/lint v0.0.0-20210508222113-6edffad5e616
## explicit; go 1.11
golang.org/x/lint
golang.org/x/lint/golint
# golang.org/x/mod v0.4.2
## explicit; go 1.12
golang.org/x/mod/module
golang.org/x/mod/semver
# golang.org/x/net v0.0.0-20211216030914-fe4d6282115f
## explicit; go 1.17
golang.org/x/net/bpf
@ -882,26 +867,11 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11
## explicit
golang.org/x/time/rate
# golang.org/x/tools v0.1.5
## explicit; go 1.17
golang.org/x/tools/cmd/goimports
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/go/gcexportdata
golang.org/x/tools/go/internal/gcimporter
golang.org/x/tools/internal/event
golang.org/x/tools/internal/event/core
golang.org/x/tools/internal/event/keys
golang.org/x/tools/internal/event/label
golang.org/x/tools/internal/fastwalk
golang.org/x/tools/internal/gocommand
golang.org/x/tools/internal/gopathwalk
golang.org/x/tools/internal/imports
golang.org/x/tools/internal/typeparams
# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
## explicit; go 1.11
golang.org/x/xerrors
golang.org/x/xerrors/internal
# google.golang.org/api v0.46.0
# google.golang.org/api v0.54.0
## explicit; go 1.11
google.golang.org/api/internal
google.golang.org/api/internal/impersonate
@ -1000,8 +970,6 @@ google.golang.org/grpc/status
google.golang.org/grpc/tap
# google.golang.org/protobuf v1.27.1
## explicit; go 1.9
google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo
google.golang.org/protobuf/compiler/protogen
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire
@ -1038,7 +1006,6 @@ google.golang.org/protobuf/types/known/fieldmaskpb
google.golang.org/protobuf/types/known/structpb
google.golang.org/protobuf/types/known/timestamppb
google.golang.org/protobuf/types/known/wrapperspb
google.golang.org/protobuf/types/pluginpb
# gotest.tools/v3 v3.1.0
## explicit; go 1.11
gotest.tools/v3/assert