diff --git a/vendor.conf b/vendor.conf index 5a6b4c8284..484d320c1d 100644 --- a/vendor.conf +++ b/vendor.conf @@ -115,10 +115,11 @@ github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a # gcplogs deps golang.org/x/oauth2 bf48bf16ab8d622ce64ec6ce98d2c98f916b6303 -google.golang.org/api de943baf05a022a8f921b544b7827bacaba1aed5 -go.opencensus.io c3ed530f775d85e577ca652cb052a52c078aad26 # v0.11.0 -cloud.google.com/go 0fd7230b2a7505833d5f69b75cbd6c9582401479 # v0.23.0 -github.com/googleapis/gax-go 317e0006254c44a0ac427cc52a0e083ff0b9622f # v2.0.0 +google.golang.org/api dec2ee309f5b09fc59bc40676447c15736284d78 # v0.8.0 +github.com/golang/groupcache 869f871628b6baa9cfbc11732cdf6546b17c1298 +go.opencensus.io d835ff86be02193d324330acdb7d65546b05f814 # v0.22.3 +cloud.google.com/go ceeb313ad77b789a7fa5287b36a1d127b69b7093 # v0.44.3 +github.com/googleapis/gax-go bd5b16380fd03dc758d11cef74ba2e3bc8b0e8c2 # v2.0.5 google.golang.org/genproto 3f1135a288c9a07e340ae8ba4cc6c7065a3160e8 # containerd diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index ac66000bc7..f3a0c3c5e8 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -8,7 +8,7 @@ Go packages for [Google Cloud Platform](https://cloud.google.com) services. import "cloud.google.com/go" ``` -To install the packages on your system, +To install the packages on your system, *do not clone the repo*. Instead use ``` $ go get -u cloud.google.com/go/... @@ -19,263 +19,44 @@ make backwards-incompatible changes. **NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). - * [News](#news) - * [Supported APIs](#supported-apis) - * [Go Versions Supported](#go-versions-supported) - * [Authorization](#authorization) - * [Cloud Datastore](#cloud-datastore-) - * [Cloud Storage](#cloud-storage-) - * [Cloud Pub/Sub](#cloud-pub-sub-) - * [Cloud BigQuery](#cloud-bigquery-) - * [Stackdriver Logging](#stackdriver-logging-) - * [Cloud Spanner](#cloud-spanner-) - - -## News - -_May 18, 2018_ - -*v0.23.0* - -- bigquery: Add DDL stats to query statistics. -- bigtable: - - cbt: Add cells-per-column limit for row lookup. - - cbt: Make it possible to combine read filters. -- dlp: v2beta2 client removed. Use the v2 client instead. -- firestore, spanner: Fix compilation errors due to protobuf changes. - -_May 8, 2018_ - -*v0.22.0* - -- bigtable: - - cbt: Support cells per column limit for row read. - - bttest: Correctly handle empty RowSet. - - Fix ReadModifyWrite operation in emulator. - - Fix API path in GetCluster. - -- bigquery: - - BEHAVIOR CHANGE: Retry on 503 status code. - - Add dataset.DeleteWithContents. - - Add SchemaUpdateOptions for query jobs. - - Add Timeline to QueryStatistics. - - Add more stats to ExplainQueryStage. - - Support Parquet data format. - -- datastore: - - Support omitempty for times. - -- dlp: - - **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client, - which is now out of beta. - - Add v2 client. - -- firestore: - - BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid. - -- iam: - - Support JWT signing via SignJwt callopt. - -- profiler: - - BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done. - - BEHAVIOR CHANGE: Increase the initial backoff to 1 minute. - - Avoid returning empty serial port output. - -- pubsub: - - BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy. - - BEHAVIOR CHANGE: Don't backoff on EOF. - - pstest: Support Acknowledge and ModifyAckDeadline RPCs. - -- redis: - - Add v1 beta Redis client. - -- spanner: - - Support SessionLabels. - -- speech: - - Add api v1 beta1 client. - -- storage: - - BEHAVIOR CHANGE: Retry reads when retryable error occurs. - - Fix delete of object in requester-pays bucket. - - Support KMS integration. - -_April 9, 2018_ - -*v0.21.0* - -- bigquery: - - Add OpenCensus tracing. - -- firestore: - - **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot - whose Exists method returns false. DocumentRef.Get and Transaction.Get - return the non-nil DocumentSnapshot in addition to a NotFound error. - **DocumentRef.GetAll and Transaction.GetAll return a non-nil - DocumentSnapshot instead of nil.** - - Add DocumentIterator.Stop. **Call Stop whenever you are done with a - DocumentIterator.** - - Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime - notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen. - - Canceling an RPC now always returns a grpc.Status with codes.Canceled. - -- spanner: - - Add `CommitTimestamp`, which supports inserting the commit timestamp of a - transaction into a column. - -_March 22, 2018_ - -*v0.20.0* - -- bigquery: Support SchemaUpdateOptions for load jobs. - -- bigtable: - - Add SampleRowKeys. - - cbt: Support union, intersection GCPolicy. - - Retry admin RPCS. - - Add trace spans to retries. - -- datastore: Add OpenCensus tracing. - -- firestore: - - Fix queries involving Null and NaN. - - Allow Timestamp protobuffers for time values. - -- logging: Add a WriteTimeout option. - -- spanner: Support Batch API. - -- storage: Add OpenCensus tracing. - - -_February 26, 2018_ - -*v0.19.0* - -- bigquery: - - Support customer-managed encryption keys. - -- bigtable: - - Improved emulator support. - - Support GetCluster. - -- datastore: - - Add general mutations. - - Support pointer struct fields. - - Support transaction options. - -- firestore: - - Add Transaction.GetAll. - - Support document cursors. - -- logging: - - Support concurrent RPCs to the service. - - Support per-entry resources. - -- profiler: - - Add config options to disable heap and thread profiling. - - Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set. - -- pubsub: - - BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the - callback returns). - - Add SubscriptionInProject. - - Add OpenCensus instrumentation for streaming pull. - -- storage: - - Support CORS. - - -_January 18, 2018_ - -*v0.18.0* - -- bigquery: - - Marked stable. - - Schema inference of nullable fields supported. - - Added TimePartitioning to QueryConfig. - -- firestore: Data provided to DocumentRef.Set with a Merge option can contain - Delete sentinels. - -- logging: Clients can accept parent resources other than projects. - -- pubsub: - - pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome. - - Support updating more subscription metadata: AckDeadline, - RetainAckedMessages and RetentionDuration. - -- oslogin/apiv1beta: New client for the Cloud OS Login API. - -- rpcreplay: A package for recording and replaying gRPC traffic. - -- spanner: - - Add a ReadWithOptions that supports a row limit, as well as an index. - - Support query plan and execution statistics. - - Added [OpenCensus](http://opencensus.io) support. - -- storage: Clarify checksum validation for gzipped files (it is not validated - when the file is served uncompressed). - - -_December 11, 2017_ - -*v0.17.0* - -- firestore BREAKING CHANGES: - - Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update. - Change - `docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})` - to - `docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})` - - Change - `docref.UpdateStruct(ctx, []string{"Field"}, aStruct)` - to - `docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})` - - Rename MergePaths to Merge; require args to be FieldPaths - - A value stored as an integer can be read into a floating-point field, and vice versa. -- bigtable/cmd/cbt: - - Support deleting a column. - - Add regex option for row read. -- spanner: Mark stable. -- storage: - - Add Reader.ContentEncoding method. - - Fix handling of SignedURL headers. -- bigquery: - - If Uploader.Put is called with no rows, it returns nil without making a - call. - - Schema inference supports the "nullable" option in struct tags for - non-required fields. - - TimePartitioning supports "Field". - - -[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md) - ## Supported APIs -Google API | Status | Package ----------------------------------|--------------|----------------------------------------------------------- -[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] -[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] -[Container][cloud-container] | alpha | [`cloud.google.com/go/container/apiv1`][cloud-container-ref] -[Data Loss Prevention][cloud-dlp]| alpha | [`cloud.google.com/go/dlp/apiv2beta1`][cloud-dlp-ref] -[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref] -[Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref] -[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref] -[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref] -[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref] -[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref] -[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref] -[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref] -[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] -[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref] -[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref] -[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref] -[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref] -[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref] -[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref] - +Google API | Status | Package +------------------------------------------------|--------------|----------------------------------------------------------- +[Asset][cloud-asset] | alpha | [`cloud.google.com/go/asset/v1beta`][cloud-asset-ref] +[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] +[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] +[Cloudtasks][cloud-tasks] | stable | [`cloud.google.com/go/cloudtasks/apiv2`][cloud-tasks-ref] +[Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`][cloud-container-ref] +[ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1beta1`][cloud-containeranalysis-ref] +[Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`][cloud-dataproc-ref] +[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref] +[Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref] +[Dialogflow][cloud-dialogflow] | alpha | [`cloud.google.com/go/dialogflow/apiv2`][cloud-dialogflow-ref] +[Data Loss Prevention][cloud-dlp] | alpha | [`cloud.google.com/go/dlp/apiv2`][cloud-dlp-ref] +[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref] +[Firestore][cloud-firestore] | stable | [`cloud.google.com/go/firestore`][cloud-firestore-ref] +[IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`][cloud-iam-ref] +[IoT][cloud-iot] | alpha | [`cloud.google.com/iot/apiv1`][cloud-iot-ref] +[KMS][cloud-kms] | stable | [`cloud.google.com/go/kms`][cloud-kms-ref] +[Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-natural-language-ref] +[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref] +[Monitoring][cloud-monitoring] | alpha | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref] +[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/go/oslogin/apiv1`][cloud-oslogin-ref] +[Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] +[Phishing Protection][cloud-phishingprotection] | alpha | [`cloud.google.com/go/phishingprotection/apiv1betad1`][cloud-phishingprotection-ref] +[reCAPTCHA Enterprise][cloud-recaptcha] | alpha | [`cloud.google.com/go/recaptchaenterprise/apiv1betad1`][cloud-recaptcha-ref] +[Memorystore][cloud-memorystore] | alpha | [`cloud.google.com/go/redis/apiv1`][cloud-memorystore-ref] +[Scheduler][cloud-scheduler] | stable | [`cloud.google.com/go/scheduler/apiv1`][cloud-scheduler-ref] +[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref] +[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref] +[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref] +[Talent][cloud-talent] | alpha | [`cloud.google.com/go/talent/apiv4beta1`][cloud-talent-ref] +[Text To Speech][cloud-texttospeech] | alpha | [`cloud.google.com/go/texttospeech/apiv1`][cloud-texttospeech-ref] +[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace/apiv2`][cloud-trace-ref] +[Translate][cloud-translate] | stable | [`cloud.google.com/go/translate`][cloud-translate-ref] +[Video Intelligence][cloud-video] | alpha | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref] +[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref] > **Alpha status**: the API is still being actively developed. As a > result, it might change in backward-incompatible ways and is not recommended @@ -288,23 +69,16 @@ Google API | Status | Package > **Stable status**: the API is mature and ready for production use. We will > continue addressing bugs and feature requests. -Documentation and examples are available at -https://godoc.org/cloud.google.com/go - -Visit or join the -[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce) -for updates on these packages. +Documentation and examples are available at [godoc.org/cloud.google.com/go](godoc.org/cloud.google.com/go) ## Go Versions Supported We support the two most recent major versions of Go. If Google App Engine uses -an older version, we support that as well. You can see which versions are -currently supported by looking at the lines following `go:` in -[`.travis.yml`](.travis.yml). +an older version, we support that as well. ## Authorization -By default, each API will use [Google Application Default Credentials][default-creds] +By default, each API will use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials) for authorization credentials used in calling the API endpoints. This will allow your application to run in many environments without requiring explicit configuration. @@ -316,12 +90,12 @@ client, err := storage.NewClient(ctx) To authorize using a [JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), pass -[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile) +[`option.WithCredentialsFile`](https://godoc.org/google.golang.org/api/option#WithCredentialsFile) to the `NewClient` function of the desired package. For example: [snip]:# (auth-JSON) ```go -client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json")) +client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json")) ``` You can exert more control over authorization by using the @@ -335,249 +109,6 @@ tokenSource := ... client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) ``` -## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) - -- [About Cloud Datastore][cloud-datastore] -- [Activating the API for your project][cloud-datastore-activation] -- [API documentation][cloud-datastore-docs] -- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore) -- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks) - -### Example Usage - -First create a `datastore.Client` to use throughout your application: - -[snip]:# (datastore-1) -```go -client, err := datastore.NewClient(ctx, "my-project-id") -if err != nil { - log.Fatal(err) -} -``` - -Then use that client to interact with the API: - -[snip]:# (datastore-2) -```go -type Post struct { - Title string - Body string `datastore:",noindex"` - PublishedAt time.Time -} -keys := []*datastore.Key{ - datastore.NameKey("Post", "post1", nil), - datastore.NameKey("Post", "post2", nil), -} -posts := []*Post{ - {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, - {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, -} -if _, err := client.PutMulti(ctx, keys, posts); err != nil { - log.Fatal(err) -} -``` - -## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) - -- [About Cloud Storage][cloud-storage] -- [API documentation][cloud-storage-docs] -- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) - -### Example Usage - -First create a `storage.Client` to use throughout your application: - -[snip]:# (storage-1) -```go -client, err := storage.NewClient(ctx) -if err != nil { - log.Fatal(err) -} -``` - -[snip]:# (storage-2) -```go -// Read the object1 from bucket. -rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) -if err != nil { - log.Fatal(err) -} -defer rc.Close() -body, err := ioutil.ReadAll(rc) -if err != nil { - log.Fatal(err) -} -``` - -## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) - -- [About Cloud Pubsub][cloud-pubsub] -- [API documentation][cloud-pubsub-docs] -- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) - -### Example Usage - -First create a `pubsub.Client` to use throughout your application: - -[snip]:# (pubsub-1) -```go -client, err := pubsub.NewClient(ctx, "project-id") -if err != nil { - log.Fatal(err) -} -``` - -Then use the client to publish and subscribe: - -[snip]:# (pubsub-2) -```go -// Publish "hello world" on topic1. -topic := client.Topic("topic1") -res := topic.Publish(ctx, &pubsub.Message{ - Data: []byte("hello world"), -}) -// The publish happens asynchronously. -// Later, you can get the result from res: -... -msgID, err := res.Get(ctx) -if err != nil { - log.Fatal(err) -} - -// Use a callback to receive messages via subscription1. -sub := client.Subscription("subscription1") -err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { - fmt.Println(m.Data) - m.Ack() // Acknowledge that we've consumed the message. -}) -if err != nil { - log.Println(err) -} -``` - -## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery) - -- [About Cloud BigQuery][cloud-bigquery] -- [API documentation][cloud-bigquery-docs] -- [Go client documentation][cloud-bigquery-ref] -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery) - -### Example Usage - -First create a `bigquery.Client` to use throughout your application: -[snip]:# (bq-1) -```go -c, err := bigquery.NewClient(ctx, "my-project-ID") -if err != nil { - // TODO: Handle error. -} -``` - -Then use that client to interact with the API: -[snip]:# (bq-2) -```go -// Construct a query. -q := c.Query(` - SELECT year, SUM(number) - FROM [bigquery-public-data:usa_names.usa_1910_2013] - WHERE name = "William" - GROUP BY year - ORDER BY year -`) -// Execute the query. -it, err := q.Read(ctx) -if err != nil { - // TODO: Handle error. -} -// Iterate through the results. -for { - var values []bigquery.Value - err := it.Next(&values) - if err == iterator.Done { - break - } - if err != nil { - // TODO: Handle error. - } - fmt.Println(values) -} -``` - - -## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging) - -- [About Stackdriver Logging][cloud-logging] -- [API documentation][cloud-logging-docs] -- [Go client documentation][cloud-logging-ref] -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging) - -### Example Usage - -First create a `logging.Client` to use throughout your application: -[snip]:# (logging-1) -```go -ctx := context.Background() -client, err := logging.NewClient(ctx, "my-project") -if err != nil { - // TODO: Handle error. -} -``` - -Usually, you'll want to add log entries to a buffer to be periodically flushed -(automatically and asynchronously) to the Stackdriver Logging service. -[snip]:# (logging-2) -```go -logger := client.Logger("my-log") -logger.Log(logging.Entry{Payload: "something happened!"}) -``` - -Close your client before your program exits, to flush any buffered log entries. -[snip]:# (logging-3) -```go -err = client.Close() -if err != nil { - // TODO: Handle error. -} -``` - -## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner) - -- [About Cloud Spanner][cloud-spanner] -- [API documentation][cloud-spanner-docs] -- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner) - -### Example Usage - -First create a `spanner.Client` to use throughout your application: - -[snip]:# (spanner-1) -```go -client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") -if err != nil { - log.Fatal(err) -} -``` - -[snip]:# (spanner-2) -```go -// Simple Reads And Writes -_, err = client.Apply(ctx, []*spanner.Mutation{ - spanner.Insert("Users", - []string{"name", "email"}, - []interface{}{"alice", "a@example.com"})}) -if err != nil { - log.Fatal(err) -} -row, err := client.Single().ReadRow(ctx, "Users", - spanner.Key{"alice"}, []string{"email"}) -if err != nil { - log.Fatal(err) -} -``` - - ## Contributing Contributions are welcome. Please, see the @@ -592,32 +123,23 @@ for more information. [cloud-datastore]: https://cloud.google.com/datastore/ [cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore -[cloud-datastore-docs]: https://cloud.google.com/datastore/docs -[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate [cloud-firestore]: https://cloud.google.com/firestore/ [cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore -[cloud-firestore-docs]: https://cloud.google.com/firestore/docs -[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate [cloud-pubsub]: https://cloud.google.com/pubsub/ [cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub -[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs [cloud-storage]: https://cloud.google.com/storage/ [cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage -[cloud-storage-docs]: https://cloud.google.com/storage/docs -[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets [cloud-bigtable]: https://cloud.google.com/bigtable/ [cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable [cloud-bigquery]: https://cloud.google.com/bigquery/ -[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs [cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery [cloud-logging]: https://cloud.google.com/logging/ -[cloud-logging-docs]: https://cloud.google.com/logging/docs [cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging [cloud-monitoring]: https://cloud.google.com/monitoring/ @@ -630,17 +152,16 @@ for more information. [cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1 [cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest -[cloud-oslogin-ref]: https://cloud.google.com/compute/docs/oslogin/rest +[cloud-oslogin-ref]: https://cloud.google.com/go/oslogin/apiv1 [cloud-speech]: https://cloud.google.com/speech [cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1 [cloud-spanner]: https://cloud.google.com/spanner/ [cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner -[cloud-spanner-docs]: https://cloud.google.com/spanner/docs -[cloud-translation]: https://cloud.google.com/translation -[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation +[cloud-translate]: https://cloud.google.com/translate +[cloud-translate-ref]: https://godoc.org/cloud.google.com/go/translate [cloud-video]: https://cloud.google.com/video-intelligence/ [cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1 @@ -657,4 +178,50 @@ for more information. [cloud-dlp]: https://cloud.google.com/dlp/ [cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1 -[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials +[cloud-dataproc]: https://cloud.google.com/dataproc/ +[cloud-dataproc-ref]: https://godoc.org/cloud.google.com/go/dataproc/apiv1 + +[cloud-iam]: https://cloud.google.com/iam/ +[cloud-iam-ref]: https://godoc.org/cloud.google.com/go/iam + +[cloud-kms]: https://cloud.google.com/kms/ +[cloud-kms-ref]: https://godoc.org/cloud.google.com/go/kms/apiv1 + +[cloud-natural-language]: https://cloud.google.com/natural-language/ +[cloud-natural-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1 + +[cloud-memorystore]: https://cloud.google.com/memorystore/ +[cloud-memorystore-ref]: https://godoc.org/cloud.google.com/go/redis/apiv1 + +[cloud-texttospeech]: https://cloud.google.com/texttospeech/ +[cloud-texttospeech-ref]: https://godoc.org/cloud.google.com/go/texttospeech/apiv1 + +[cloud-trace]: https://cloud.google.com/trace/ +[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace/apiv2 + +[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/ +[cloud-dialogflow-ref]: https://godoc.org/cloud.google.com/go/dialogflow/apiv2 + +[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis +[cloud-containeranalysis-ref]: https://godoc.org/cloud.google.com/go/devtools/containeranalysis/apiv1beta1 + +[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory +[cloud-asset-ref]: https://godoc.org/cloud.google.com/go/asset/apiv1 + +[cloud-tasks]: https://cloud.google.com/tasks/ +[cloud-tasks-ref]: https://godoc.org/cloud.google.com/go/cloudtasks/apiv2 + +[cloud-scheduler]: https://cloud.google.com/scheduler +[cloud-scheduler-ref]: https://godoc.org/cloud.google.com/go/scheduler/apiv1 + +[cloud-iot]: https://cloud.google.com/iot-core/ +[cloud-iot-ref]: https://godoc.org/cloud.google.com/go/iot/apiv1 + +[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/ +[cloud-phishingprotection-ref]: https://cloud.google.com/go/phishingprotection/apiv1beta1 + +[cloud-recaptcha]: https://cloud.google.com/recaptcha-enterprise/ +[cloud-recaptcha-ref]: https://cloud.google.com/go/recaptchaenterprise/apiv1beta1 + +[cloud-talent]: https://cloud.google.com/solutions/talent-solution/ +[cloud-talent-ref]: https://godoc.org/cloud.google.com/go/talent/apiv4beta1 diff --git a/vendor/cloud.google.com/go/cloud.go b/vendor/cloud.google.com/go/cloud.go new file mode 100644 index 0000000000..237d84561c --- /dev/null +++ b/vendor/cloud.google.com/go/cloud.go @@ -0,0 +1,100 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package cloud is the root of the packages used to access Google Cloud +Services. See https://godoc.org/cloud.google.com/go for a full list +of sub-packages. + + +Client Options + +All clients in sub-packages are configurable via client options. These options are +described here: https://godoc.org/google.golang.org/api/option. + + +Authentication and Authorization + +All the clients in sub-packages support authentication via Google Application Default +Credentials (see https://cloud.google.com/docs/authentication/production), or +by providing a JSON key file for a Service Account. See the authentication examples +in this package for details. + + +Timeouts and Cancellation + +By default, all requests in sub-packages will run indefinitely, retrying on transient +errors when correctness allows. To set timeouts or arrange for cancellation, use +contexts. See the examples for details. + +Do not attempt to control the initial connection (dialing) of a service by setting a +timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts +would be ineffective and would only interfere with credential refreshing, which uses +the same context. + + +Connection Pooling + +Connection pooling differs in clients based on their transport. Cloud +clients either rely on HTTP or gRPC transports to communicate +with Google Cloud. + +Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the +underlying HTTP transport to cache connections for later re-use. These are cached to +the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in +http.DefaultTransport. + +For gRPC clients (all others in this repo), connection pooling is configurable. Users +of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client +option to NewClient calls. This configures the underlying gRPC connections to be +pooled and addressed in a round robin fashion. + + +Using the Libraries with Docker + +Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to +hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928 +for more information. + + +Debugging + +To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See +https://godoc.org/google.golang.org/grpc/grpclog for more information. + +For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". + + +Client Stability + +Clients in this repository are considered alpha or beta unless otherwise +marked as stable in the README.md. Semver is not used to communicate stability +of clients. + +Alpha and beta clients may change or go away without notice. + +Clients marked stable will maintain compatibility with future versions for as +long as we can reasonably sustain. Incompatible changes might be made in some +situations, including: + +- Security bugs may prompt backwards-incompatible changes. + +- Situations in which components are no longer feasible to maintain without +making breaking changes, including removal. + +- Parts of the client surface may be outright unstable and subject to change. +These parts of the surface will be labeled with the note, "It is EXPERIMENTAL +and subject to change or removal without notice." +*/ +package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/gosym/pclinetest.h b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/gosym/pclinetest.h new file mode 100644 index 0000000000..156c0b87b0 --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/gosym/pclinetest.h @@ -0,0 +1,9 @@ +// +build ignore + +// Empty include file to generate z symbols + + + + + +// EOF diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/gosym/pclntab.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/gosym/pclntab.go new file mode 100644 index 0000000000..dcd7015bb7 --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/gosym/pclntab.go @@ -0,0 +1,472 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + * Line tables + */ + +package gosym + +import ( + "encoding/binary" + "sync" +) + +// A LineTable is a data structure mapping program counters to line numbers. +// +// In Go 1.1 and earlier, each function (represented by a Func) had its own LineTable, +// and the line number corresponded to a numbering of all source lines in the +// program, across all files. That absolute line number would then have to be +// converted separately to a file name and line number within the file. +// +// In Go 1.2, the format of the data changed so that there is a single LineTable +// for the entire program, shared by all Funcs, and there are no absolute line +// numbers, just line numbers within specific files. +// +// For the most part, LineTable's methods should be treated as an internal +// detail of the package; callers should use the methods on Table instead. +type LineTable struct { + Data []byte + PC uint64 + Line int + + // Go 1.2 state + mu sync.Mutex + go12 int // is this in Go 1.2 format? -1 no, 0 unknown, 1 yes + binary binary.ByteOrder + quantum uint32 + ptrsize uint32 + functab []byte + nfunctab uint32 + filetab []byte + nfiletab uint32 + fileMap map[string]uint32 +} + +// NOTE(rsc): This is wrong for GOARCH=arm, which uses a quantum of 4, +// but we have no idea whether we're using arm or not. This only +// matters in the old (pre-Go 1.2) symbol table format, so it's not worth +// fixing. +const oldQuantum = 1 + +func (t *LineTable) parse(targetPC uint64, targetLine int) (b []byte, pc uint64, line int) { + // The PC/line table can be thought of as a sequence of + // * + // batches. Each update batch results in a (pc, line) pair, + // where line applies to every PC from pc up to but not + // including the pc of the next pair. + // + // Here we process each update individually, which simplifies + // the code, but makes the corner cases more confusing. + b, pc, line = t.Data, t.PC, t.Line + for pc <= targetPC && line != targetLine && len(b) > 0 { + code := b[0] + b = b[1:] + switch { + case code == 0: + if len(b) < 4 { + b = b[0:0] + break + } + val := binary.BigEndian.Uint32(b) + b = b[4:] + line += int(val) + case code <= 64: + line += int(code) + case code <= 128: + line -= int(code - 64) + default: + pc += oldQuantum * uint64(code-128) + continue + } + pc += oldQuantum + } + return b, pc, line +} + +func (t *LineTable) slice(pc uint64) *LineTable { + data, pc, line := t.parse(pc, -1) + return &LineTable{Data: data, PC: pc, Line: line} +} + +// PCToLine returns the line number for the given program counter. +// Callers should use Table's PCToLine method instead. +func (t *LineTable) PCToLine(pc uint64) int { + if t.isGo12() { + return t.go12PCToLine(pc) + } + _, _, line := t.parse(pc, -1) + return line +} + +// LineToPC returns the program counter for the given line number, +// considering only program counters before maxpc. +// Callers should use Table's LineToPC method instead. +func (t *LineTable) LineToPC(line int, maxpc uint64) uint64 { + if t.isGo12() { + return 0 + } + _, pc, line1 := t.parse(maxpc, line) + if line1 != line { + return 0 + } + // Subtract quantum from PC to account for post-line increment + return pc - oldQuantum +} + +// NewLineTable returns a new PC/line table +// corresponding to the encoded data. +// Text must be the start address of the +// corresponding text segment. +func NewLineTable(data []byte, text uint64) *LineTable { + return &LineTable{Data: data, PC: text, Line: 0} +} + +// Go 1.2 symbol table format. +// See golang.org/s/go12symtab. +// +// A general note about the methods here: rather than try to avoid +// index out of bounds errors, we trust Go to detect them, and then +// we recover from the panics and treat them as indicative of a malformed +// or incomplete table. +// +// The methods called by symtab.go, which begin with "go12" prefixes, +// are expected to have that recovery logic. + +// isGo12 reports whether this is a Go 1.2 (or later) symbol table. +func (t *LineTable) isGo12() bool { + t.go12Init() + return t.go12 == 1 +} + +const go12magic = 0xfffffffb + +// uintptr returns the pointer-sized value encoded at b. +// The pointer size is dictated by the table being read. +func (t *LineTable) uintptr(b []byte) uint64 { + if t.ptrsize == 4 { + return uint64(t.binary.Uint32(b)) + } + return t.binary.Uint64(b) +} + +// go12init initializes the Go 1.2 metadata if t is a Go 1.2 symbol table. +func (t *LineTable) go12Init() { + t.mu.Lock() + defer t.mu.Unlock() + if t.go12 != 0 { + return + } + + defer func() { + // If we panic parsing, assume it's not a Go 1.2 symbol table. + recover() + }() + + // Check header: 4-byte magic, two zeros, pc quantum, pointer size. + t.go12 = -1 // not Go 1.2 until proven otherwise + if len(t.Data) < 16 || t.Data[4] != 0 || t.Data[5] != 0 || + (t.Data[6] != 1 && t.Data[6] != 4) || // pc quantum + (t.Data[7] != 4 && t.Data[7] != 8) { // pointer size + return + } + + switch uint32(go12magic) { + case binary.LittleEndian.Uint32(t.Data): + t.binary = binary.LittleEndian + case binary.BigEndian.Uint32(t.Data): + t.binary = binary.BigEndian + default: + return + } + + t.quantum = uint32(t.Data[6]) + t.ptrsize = uint32(t.Data[7]) + + t.nfunctab = uint32(t.uintptr(t.Data[8:])) + t.functab = t.Data[8+t.ptrsize:] + functabsize := t.nfunctab*2*t.ptrsize + t.ptrsize + fileoff := t.binary.Uint32(t.functab[functabsize:]) + t.functab = t.functab[:functabsize] + t.filetab = t.Data[fileoff:] + t.nfiletab = t.binary.Uint32(t.filetab) + t.filetab = t.filetab[:t.nfiletab*4] + + t.go12 = 1 // so far so good +} + +// go12Funcs returns a slice of Funcs derived from the Go 1.2 pcln table. +func (t *LineTable) go12Funcs() []Func { + // Assume it is malformed and return nil on error. + defer func() { + recover() + }() + + n := len(t.functab) / int(t.ptrsize) / 2 + funcs := make([]Func, n) + for i := range funcs { + f := &funcs[i] + f.Entry = uint64(t.uintptr(t.functab[2*i*int(t.ptrsize):])) + f.End = uint64(t.uintptr(t.functab[(2*i+2)*int(t.ptrsize):])) + info := t.Data[t.uintptr(t.functab[(2*i+1)*int(t.ptrsize):]):] + f.LineTable = t + f.FrameSize = int(t.binary.Uint32(info[t.ptrsize+2*4:])) + f.Sym = &Sym{ + Value: f.Entry, + Type: 'T', + Name: t.string(t.binary.Uint32(info[t.ptrsize:])), + GoType: 0, + Func: f, + } + } + return funcs +} + +// findFunc returns the func corresponding to the given program counter. +func (t *LineTable) findFunc(pc uint64) []byte { + if pc < t.uintptr(t.functab) || pc >= t.uintptr(t.functab[len(t.functab)-int(t.ptrsize):]) { + return nil + } + + // The function table is a list of 2*nfunctab+1 uintptrs, + // alternating program counters and offsets to func structures. + f := t.functab + nf := t.nfunctab + for nf > 0 { + m := nf / 2 + fm := f[2*t.ptrsize*m:] + if t.uintptr(fm) <= pc && pc < t.uintptr(fm[2*t.ptrsize:]) { + return t.Data[t.uintptr(fm[t.ptrsize:]):] + } else if pc < t.uintptr(fm) { + nf = m + } else { + f = f[(m+1)*2*t.ptrsize:] + nf -= m + 1 + } + } + return nil +} + +// readvarint reads, removes, and returns a varint from *pp. +func (t *LineTable) readvarint(pp *[]byte) uint32 { + var v, shift uint32 + p := *pp + for shift = 0; ; shift += 7 { + b := p[0] + p = p[1:] + v |= (uint32(b) & 0x7F) << shift + if b&0x80 == 0 { + break + } + } + *pp = p + return v +} + +// string returns a Go string found at off. +func (t *LineTable) string(off uint32) string { + for i := off; ; i++ { + if t.Data[i] == 0 { + return string(t.Data[off:i]) + } + } +} + +// step advances to the next pc, value pair in the encoded table. +func (t *LineTable) step(p *[]byte, pc *uint64, val *int32, first bool) bool { + uvdelta := t.readvarint(p) + if uvdelta == 0 && !first { + return false + } + if uvdelta&1 != 0 { + uvdelta = ^(uvdelta >> 1) + } else { + uvdelta >>= 1 + } + vdelta := int32(uvdelta) + pcdelta := t.readvarint(p) * t.quantum + *pc += uint64(pcdelta) + *val += vdelta + return true +} + +// pcvalue reports the value associated with the target pc. +// off is the offset to the beginning of the pc-value table, +// and entry is the start PC for the corresponding function. +func (t *LineTable) pcvalue(off uint32, entry, targetpc uint64) int32 { + if off == 0 { + return -1 + } + p := t.Data[off:] + + val := int32(-1) + pc := entry + for t.step(&p, &pc, &val, pc == entry) { + if targetpc < pc { + return val + } + } + return -1 +} + +// findFileLine scans one function in the binary looking for a +// program counter in the given file on the given line. +// It does so by running the pc-value tables mapping program counter +// to file number. Since most functions come from a single file, these +// are usually short and quick to scan. If a file match is found, then the +// code goes to the expense of looking for a simultaneous line number match. +func (t *LineTable) findFileLine(entry uint64, filetab, linetab uint32, filenum, line int32) uint64 { + if filetab == 0 || linetab == 0 { + return 0 + } + + fp := t.Data[filetab:] + fl := t.Data[linetab:] + fileVal := int32(-1) + filePC := entry + lineVal := int32(-1) + linePC := entry + fileStartPC := filePC + for t.step(&fp, &filePC, &fileVal, filePC == entry) { + if fileVal == filenum && fileStartPC < filePC { + // fileVal is in effect starting at fileStartPC up to + // but not including filePC, and it's the file we want. + // Run the PC table looking for a matching line number + // or until we reach filePC. + lineStartPC := linePC + for linePC < filePC && t.step(&fl, &linePC, &lineVal, linePC == entry) { + // lineVal is in effect until linePC, and lineStartPC < filePC. + if lineVal == line { + if fileStartPC <= lineStartPC { + return lineStartPC + } + if fileStartPC < linePC { + return fileStartPC + } + } + lineStartPC = linePC + } + } + fileStartPC = filePC + } + return 0 +} + +// go12PCToLine maps program counter to line number for the Go 1.2 pcln table. +func (t *LineTable) go12PCToLine(pc uint64) (line int) { + return t.go12PCToVal(pc, t.ptrsize+5*4) +} + +// go12PCToSPAdj maps program counter to Stack Pointer adjustment for the Go 1.2 pcln table. +func (t *LineTable) go12PCToSPAdj(pc uint64) (spadj int) { + return t.go12PCToVal(pc, t.ptrsize+3*4) +} + +func (t *LineTable) go12PCToVal(pc uint64, fOffset uint32) (val int) { + defer func() { + if recover() != nil { + val = -1 + } + }() + + f := t.findFunc(pc) + if f == nil { + return -1 + } + entry := t.uintptr(f) + linetab := t.binary.Uint32(f[fOffset:]) + return int(t.pcvalue(linetab, entry, pc)) +} + +// go12PCToFile maps program counter to file name for the Go 1.2 pcln table. +func (t *LineTable) go12PCToFile(pc uint64) (file string) { + defer func() { + if recover() != nil { + file = "" + } + }() + + f := t.findFunc(pc) + if f == nil { + return "" + } + entry := t.uintptr(f) + filetab := t.binary.Uint32(f[t.ptrsize+4*4:]) + fno := t.pcvalue(filetab, entry, pc) + if fno <= 0 { + return "" + } + return t.string(t.binary.Uint32(t.filetab[4*fno:])) +} + +// go12LineToPC maps a (file, line) pair to a program counter for the Go 1.2 pcln table. +func (t *LineTable) go12LineToPC(file string, line int) (pc uint64) { + defer func() { + if recover() != nil { + pc = 0 + } + }() + + t.initFileMap() + filenum := t.fileMap[file] + if filenum == 0 { + return 0 + } + + // Scan all functions. + // If this turns out to be a bottleneck, we could build a map[int32][]int32 + // mapping file number to a list of functions with code from that file. + for i := uint32(0); i < t.nfunctab; i++ { + f := t.Data[t.uintptr(t.functab[2*t.ptrsize*i+t.ptrsize:]):] + entry := t.uintptr(f) + filetab := t.binary.Uint32(f[t.ptrsize+4*4:]) + linetab := t.binary.Uint32(f[t.ptrsize+5*4:]) + pc := t.findFileLine(entry, filetab, linetab, int32(filenum), int32(line)) + if pc != 0 { + return pc + } + } + return 0 +} + +// initFileMap initializes the map from file name to file number. +func (t *LineTable) initFileMap() { + t.mu.Lock() + defer t.mu.Unlock() + + if t.fileMap != nil { + return + } + m := make(map[string]uint32) + + for i := uint32(1); i < t.nfiletab; i++ { + s := t.string(t.binary.Uint32(t.filetab[4*i:])) + m[s] = i + } + t.fileMap = m +} + +// go12MapFiles adds to m a key for every file in the Go 1.2 LineTable. +// Every key maps to obj. That's not a very interesting map, but it provides +// a way for callers to obtain the list of files in the program. +func (t *LineTable) go12MapFiles(m map[string]*Obj, obj *Obj) { + defer func() { + recover() + }() + + t.initFileMap() + for file := range t.fileMap { + m[file] = obj + } +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/gosym/symtab.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/gosym/symtab.go new file mode 100644 index 0000000000..2c83b841cf --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/gosym/symtab.go @@ -0,0 +1,731 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gosym implements access to the Go symbol +// and line number tables embedded in Go binaries generated +// by the gc compilers. +package gosym + +// The table format is a variant of the format used in Plan 9's a.out +// format, documented at http://plan9.bell-labs.com/magic/man2html/6/a.out. +// The best reference for the differences between the Plan 9 format +// and the Go format is the runtime source, specifically ../../runtime/symtab.c. + +import ( + "bytes" + "encoding/binary" + "fmt" + "strconv" + "strings" +) + +/* + * Symbols + */ + +// A Sym represents a single symbol table entry. +type Sym struct { + Value uint64 + Type byte + Name string + GoType uint64 + // If this symbol if a function symbol, the corresponding Func + Func *Func +} + +// Static reports whether this symbol is static (not visible outside its file). +func (s *Sym) Static() bool { return s.Type >= 'a' } + +// PackageName returns the package part of the symbol name, +// or the empty string if there is none. +func (s *Sym) PackageName() string { + if i := strings.Index(s.Name, "."); i != -1 { + return s.Name[0:i] + } + return "" +} + +// ReceiverName returns the receiver type name of this symbol, +// or the empty string if there is none. +func (s *Sym) ReceiverName() string { + l := strings.Index(s.Name, ".") + r := strings.LastIndex(s.Name, ".") + if l == -1 || r == -1 || l == r { + return "" + } + return s.Name[l+1 : r] +} + +// BaseName returns the symbol name without the package or receiver name. +func (s *Sym) BaseName() string { + if i := strings.LastIndex(s.Name, "."); i != -1 { + return s.Name[i+1:] + } + return s.Name +} + +// A Func collects information about a single function. +type Func struct { + Entry uint64 + *Sym + End uint64 + Params []*Sym + Locals []*Sym + FrameSize int + LineTable *LineTable + Obj *Obj +} + +// An Obj represents a collection of functions in a symbol table. +// +// The exact method of division of a binary into separate Objs is an internal detail +// of the symbol table format. +// +// In early versions of Go each source file became a different Obj. +// +// In Go 1 and Go 1.1, each package produced one Obj for all Go sources +// and one Obj per C source file. +// +// In Go 1.2, there is a single Obj for the entire program. +type Obj struct { + // Funcs is a list of functions in the Obj. + Funcs []Func + + // In Go 1.1 and earlier, Paths is a list of symbols corresponding + // to the source file names that produced the Obj. + // In Go 1.2, Paths is nil. + // Use the keys of Table.Files to obtain a list of source files. + Paths []Sym // meta +} + +/* + * Symbol tables + */ + +// Table represents a Go symbol table. It stores all of the +// symbols decoded from the program and provides methods to translate +// between symbols, names, and addresses. +type Table struct { + Syms []Sym + Funcs []Func + Files map[string]*Obj // nil for Go 1.2 and later binaries + Objs []Obj // nil for Go 1.2 and later binaries + + go12line *LineTable // Go 1.2 line number table +} + +type sym struct { + value uint64 + gotype uint64 + typ byte + name []byte +} + +var ( + littleEndianSymtab = []byte{0xFD, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00} + bigEndianSymtab = []byte{0xFF, 0xFF, 0xFF, 0xFD, 0x00, 0x00, 0x00} + oldLittleEndianSymtab = []byte{0xFE, 0xFF, 0xFF, 0xFF, 0x00, 0x00} +) + +func walksymtab(data []byte, fn func(sym) error) error { + if len(data) == 0 { // missing symtab is okay + return nil + } + var order binary.ByteOrder = binary.BigEndian + newTable := false + switch { + case bytes.HasPrefix(data, oldLittleEndianSymtab): + // Same as Go 1.0, but little endian. + // Format was used during interim development between Go 1.0 and Go 1.1. + // Should not be widespread, but easy to support. + data = data[6:] + order = binary.LittleEndian + case bytes.HasPrefix(data, bigEndianSymtab): + newTable = true + case bytes.HasPrefix(data, littleEndianSymtab): + newTable = true + order = binary.LittleEndian + } + var ptrsz int + if newTable { + if len(data) < 8 { + return &DecodingError{len(data), "unexpected EOF", nil} + } + ptrsz = int(data[7]) + if ptrsz != 4 && ptrsz != 8 { + return &DecodingError{7, "invalid pointer size", ptrsz} + } + data = data[8:] + } + var s sym + p := data + for len(p) >= 4 { + var typ byte + if newTable { + // Symbol type, value, Go type. + typ = p[0] & 0x3F + wideValue := p[0]&0x40 != 0 + goType := p[0]&0x80 != 0 + if typ < 26 { + typ += 'A' + } else { + typ += 'a' - 26 + } + s.typ = typ + p = p[1:] + if wideValue { + if len(p) < ptrsz { + return &DecodingError{len(data), "unexpected EOF", nil} + } + // fixed-width value + if ptrsz == 8 { + s.value = order.Uint64(p[0:8]) + p = p[8:] + } else { + s.value = uint64(order.Uint32(p[0:4])) + p = p[4:] + } + } else { + // varint value + s.value = 0 + shift := uint(0) + for len(p) > 0 && p[0]&0x80 != 0 { + s.value |= uint64(p[0]&0x7F) << shift + shift += 7 + p = p[1:] + } + if len(p) == 0 { + return &DecodingError{len(data), "unexpected EOF", nil} + } + s.value |= uint64(p[0]) << shift + p = p[1:] + } + if goType { + if len(p) < ptrsz { + return &DecodingError{len(data), "unexpected EOF", nil} + } + // fixed-width go type + if ptrsz == 8 { + s.gotype = order.Uint64(p[0:8]) + p = p[8:] + } else { + s.gotype = uint64(order.Uint32(p[0:4])) + p = p[4:] + } + } + } else { + // Value, symbol type. + s.value = uint64(order.Uint32(p[0:4])) + if len(p) < 5 { + return &DecodingError{len(data), "unexpected EOF", nil} + } + typ = p[4] + if typ&0x80 == 0 { + return &DecodingError{len(data) - len(p) + 4, "bad symbol type", typ} + } + typ &^= 0x80 + s.typ = typ + p = p[5:] + } + + // Name. + var i int + var nnul int + for i = 0; i < len(p); i++ { + if p[i] == 0 { + nnul = 1 + break + } + } + switch typ { + case 'z', 'Z': + p = p[i+nnul:] + for i = 0; i+2 <= len(p); i += 2 { + if p[i] == 0 && p[i+1] == 0 { + nnul = 2 + break + } + } + } + if len(p) < i+nnul { + return &DecodingError{len(data), "unexpected EOF", nil} + } + s.name = p[0:i] + i += nnul + p = p[i:] + + if !newTable { + if len(p) < 4 { + return &DecodingError{len(data), "unexpected EOF", nil} + } + // Go type. + s.gotype = uint64(order.Uint32(p[:4])) + p = p[4:] + } + fn(s) + } + return nil +} + +// NewTable decodes the Go symbol table in data, +// returning an in-memory representation. +func NewTable(symtab []byte, pcln *LineTable) (*Table, error) { + var n int + err := walksymtab(symtab, func(s sym) error { + n++ + return nil + }) + if err != nil { + return nil, err + } + + var t Table + if pcln.isGo12() { + t.go12line = pcln + } + fname := make(map[uint16]string) + t.Syms = make([]Sym, 0, n) + nf := 0 + nz := 0 + lasttyp := uint8(0) + err = walksymtab(symtab, func(s sym) error { + n := len(t.Syms) + t.Syms = t.Syms[0 : n+1] + ts := &t.Syms[n] + ts.Type = s.typ + ts.Value = uint64(s.value) + ts.GoType = uint64(s.gotype) + switch s.typ { + default: + // rewrite name to use . instead of ยท (c2 b7) + w := 0 + b := s.name + for i := 0; i < len(b); i++ { + if b[i] == 0xc2 && i+1 < len(b) && b[i+1] == 0xb7 { + i++ + b[i] = '.' + } + b[w] = b[i] + w++ + } + ts.Name = string(s.name[0:w]) + case 'z', 'Z': + if lasttyp != 'z' && lasttyp != 'Z' { + nz++ + } + for i := 0; i < len(s.name); i += 2 { + eltIdx := binary.BigEndian.Uint16(s.name[i : i+2]) + elt, ok := fname[eltIdx] + if !ok { + return &DecodingError{-1, "bad filename code", eltIdx} + } + if n := len(ts.Name); n > 0 && ts.Name[n-1] != '/' { + ts.Name += "/" + } + ts.Name += elt + } + } + switch s.typ { + case 'T', 't', 'L', 'l': + nf++ + case 'f': + fname[uint16(s.value)] = ts.Name + } + lasttyp = s.typ + return nil + }) + if err != nil { + return nil, err + } + + t.Funcs = make([]Func, 0, nf) + t.Files = make(map[string]*Obj) + + var obj *Obj + if t.go12line != nil { + // Put all functions into one Obj. + t.Objs = make([]Obj, 1) + obj = &t.Objs[0] + t.go12line.go12MapFiles(t.Files, obj) + } else { + t.Objs = make([]Obj, 0, nz) + } + + // Count text symbols and attach frame sizes, parameters, and + // locals to them. Also, find object file boundaries. + lastf := 0 + for i := 0; i < len(t.Syms); i++ { + sym := &t.Syms[i] + switch sym.Type { + case 'Z', 'z': // path symbol + if t.go12line != nil { + // Go 1.2 binaries have the file information elsewhere. Ignore. + break + } + // Finish the current object + if obj != nil { + obj.Funcs = t.Funcs[lastf:] + } + lastf = len(t.Funcs) + + // Start new object + n := len(t.Objs) + t.Objs = t.Objs[0 : n+1] + obj = &t.Objs[n] + + // Count & copy path symbols + var end int + for end = i + 1; end < len(t.Syms); end++ { + if c := t.Syms[end].Type; c != 'Z' && c != 'z' { + break + } + } + obj.Paths = t.Syms[i:end] + i = end - 1 // loop will i++ + + // Record file names + depth := 0 + for j := range obj.Paths { + s := &obj.Paths[j] + if s.Name == "" { + depth-- + } else { + if depth == 0 { + t.Files[s.Name] = obj + } + depth++ + } + } + + case 'T', 't', 'L', 'l': // text symbol + if n := len(t.Funcs); n > 0 { + t.Funcs[n-1].End = sym.Value + } + if sym.Name == "etext" { + continue + } + + // Count parameter and local (auto) syms + var np, na int + var end int + countloop: + for end = i + 1; end < len(t.Syms); end++ { + switch t.Syms[end].Type { + case 'T', 't', 'L', 'l', 'Z', 'z': + break countloop + case 'p': + np++ + case 'a': + na++ + } + } + + // Fill in the function symbol + n := len(t.Funcs) + t.Funcs = t.Funcs[0 : n+1] + fn := &t.Funcs[n] + sym.Func = fn + fn.Params = make([]*Sym, 0, np) + fn.Locals = make([]*Sym, 0, na) + fn.Sym = sym + fn.Entry = sym.Value + fn.Obj = obj + if t.go12line != nil { + // All functions share the same line table. + // It knows how to narrow down to a specific + // function quickly. + fn.LineTable = t.go12line + } else if pcln != nil { + fn.LineTable = pcln.slice(fn.Entry) + pcln = fn.LineTable + } + for j := i; j < end; j++ { + s := &t.Syms[j] + switch s.Type { + case 'm': + fn.FrameSize = int(s.Value) + case 'p': + n := len(fn.Params) + fn.Params = fn.Params[0 : n+1] + fn.Params[n] = s + case 'a': + n := len(fn.Locals) + fn.Locals = fn.Locals[0 : n+1] + fn.Locals[n] = s + } + } + i = end - 1 // loop will i++ + } + } + + if t.go12line != nil && nf == 0 { + t.Funcs = t.go12line.go12Funcs() + } + if obj != nil { + obj.Funcs = t.Funcs[lastf:] + } + return &t, nil +} + +// PCToFunc returns the function containing the program counter pc, +// or nil if there is no such function. +func (t *Table) PCToFunc(pc uint64) *Func { + funcs := t.Funcs + for len(funcs) > 0 { + m := len(funcs) / 2 + fn := &funcs[m] + switch { + case pc < fn.Entry: + funcs = funcs[0:m] + case fn.Entry <= pc && pc < fn.End: + return fn + default: + funcs = funcs[m+1:] + } + } + return nil +} + +// PCToLine looks up line number information for a program counter. +// If there is no information, it returns fn == nil. +func (t *Table) PCToLine(pc uint64) (file string, line int, fn *Func) { + if fn = t.PCToFunc(pc); fn == nil { + return + } + if t.go12line != nil { + file = t.go12line.go12PCToFile(pc) + line = t.go12line.go12PCToLine(pc) + } else { + file, line = fn.Obj.lineFromAline(fn.LineTable.PCToLine(pc)) + } + return +} + +// PCToSPAdj returns the stack pointer adjustment for a program counter. +func (t *Table) PCToSPAdj(pc uint64) (spadj int) { + if fn := t.PCToFunc(pc); fn == nil { + return 0 + } + if t.go12line != nil { + return t.go12line.go12PCToSPAdj(pc) + } + return 0 +} + +// LineToPC looks up the first program counter on the given line in +// the named file. It returns UnknownPathError or UnknownLineError if +// there is an error looking up this line. +func (t *Table) LineToPC(file string, line int) (pc uint64, fn *Func, err error) { + obj, ok := t.Files[file] + if !ok { + return 0, nil, UnknownFileError(file) + } + + if t.go12line != nil { + pc := t.go12line.go12LineToPC(file, line) + if pc == 0 { + return 0, nil, &UnknownLineError{file, line} + } + return pc, t.PCToFunc(pc), nil + } + + abs, err := obj.alineFromLine(file, line) + if err != nil { + return + } + for i := range obj.Funcs { + f := &obj.Funcs[i] + pc := f.LineTable.LineToPC(abs, f.End) + if pc != 0 { + return pc, f, nil + } + } + return 0, nil, &UnknownLineError{file, line} +} + +// LookupSym returns the text, data, or bss symbol with the given name, +// or nil if no such symbol is found. +func (t *Table) LookupSym(name string) *Sym { + // TODO(austin) Maybe make a map + for i := range t.Syms { + s := &t.Syms[i] + switch s.Type { + case 'T', 't', 'L', 'l', 'D', 'd', 'B', 'b': + if s.Name == name { + return s + } + } + } + return nil +} + +// LookupFunc returns the text, data, or bss symbol with the given name, +// or nil if no such symbol is found. +func (t *Table) LookupFunc(name string) *Func { + for i := range t.Funcs { + f := &t.Funcs[i] + if f.Sym.Name == name { + return f + } + } + return nil +} + +// SymByAddr returns the text, data, or bss symbol starting at the given address. +func (t *Table) SymByAddr(addr uint64) *Sym { + for i := range t.Syms { + s := &t.Syms[i] + switch s.Type { + case 'T', 't', 'L', 'l', 'D', 'd', 'B', 'b': + if s.Value == addr { + return s + } + } + } + return nil +} + +/* + * Object files + */ + +// This is legacy code for Go 1.1 and earlier, which used the +// Plan 9 format for pc-line tables. This code was never quite +// correct. It's probably very close, and it's usually correct, but +// we never quite found all the corner cases. +// +// Go 1.2 and later use a simpler format, documented at golang.org/s/go12symtab. + +func (o *Obj) lineFromAline(aline int) (string, int) { + type stackEnt struct { + path string + start int + offset int + prev *stackEnt + } + + noPath := &stackEnt{"", 0, 0, nil} + tos := noPath + +pathloop: + for _, s := range o.Paths { + val := int(s.Value) + switch { + case val > aline: + break pathloop + + case val == 1: + // Start a new stack + tos = &stackEnt{s.Name, val, 0, noPath} + + case s.Name == "": + // Pop + if tos == noPath { + return "", 0 + } + tos.prev.offset += val - tos.start + tos = tos.prev + + default: + // Push + tos = &stackEnt{s.Name, val, 0, tos} + } + } + + if tos == noPath { + return "", 0 + } + return tos.path, aline - tos.start - tos.offset + 1 +} + +func (o *Obj) alineFromLine(path string, line int) (int, error) { + if line < 1 { + return 0, &UnknownLineError{path, line} + } + + for i, s := range o.Paths { + // Find this path + if s.Name != path { + continue + } + + // Find this line at this stack level + depth := 0 + var incstart int + line += int(s.Value) + pathloop: + for _, s := range o.Paths[i:] { + val := int(s.Value) + switch { + case depth == 1 && val >= line: + return line - 1, nil + + case s.Name == "": + depth-- + if depth == 0 { + break pathloop + } else if depth == 1 { + line += val - incstart + } + + default: + if depth == 1 { + incstart = val + } + depth++ + } + } + return 0, &UnknownLineError{path, line} + } + return 0, UnknownFileError(path) +} + +/* + * Errors + */ + +// UnknownFileError represents a failure to find the specific file in +// the symbol table. +type UnknownFileError string + +func (e UnknownFileError) Error() string { return "unknown file: " + string(e) } + +// UnknownLineError represents a failure to map a line to a program +// counter, either because the line is beyond the bounds of the file +// or because there is no code on the given line. +type UnknownLineError struct { + File string + Line int +} + +func (e *UnknownLineError) Error() string { + return "no code at " + e.File + ":" + strconv.Itoa(e.Line) +} + +// DecodingError represents an error during the decoding of +// the symbol table. +type DecodingError struct { + off int + msg string + val interface{} +} + +func (e *DecodingError) Error() string { + msg := e.msg + if e.val != nil { + msg += fmt.Sprintf(" '%v'", e.val) + } + msg += fmt.Sprintf(" at byte %#x", e.off) + return msg +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index e708c031b9..125b7033c9 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -1,4 +1,4 @@ -// Copyright 2014 Google Inc. All Rights Reserved. +// Copyright 2014 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ package metadata // import "cloud.google.com/go/compute/metadata" import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -31,9 +32,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" ) const ( @@ -64,7 +62,7 @@ var ( ) var ( - metaClient = &http.Client{ + defaultClient = &Client{hc: &http.Client{ Transport: &http.Transport{ Dial: (&net.Dialer{ Timeout: 2 * time.Second, @@ -72,15 +70,15 @@ var ( }).Dial, ResponseHeaderTimeout: 2 * time.Second, }, - } - subscribeClient = &http.Client{ + }} + subscribeClient = &Client{hc: &http.Client{ Transport: &http.Transport{ Dial: (&net.Dialer{ Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, }, - } + }} ) // NotDefinedError is returned when requested metadata is not defined. @@ -95,74 +93,16 @@ func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func Get(suffix string) (string, error) { - val, _, err := getETag(metaClient, suffix) - return val, err -} - -// getETag returns a value from the metadata service as well as the associated -// ETag using the provided client. This func is otherwise equivalent to Get. -func getETag(client *http.Client, suffix string) (value, etag string, err error) { - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - url := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", url, nil) - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - res, err := client.Do(req) - if err != nil { - return "", "", err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - if res.StatusCode != 200 { - return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - return string(all), res.Header.Get("Etag"), nil -} - -func getTrimmed(suffix string) (s string, err error) { - s, err = Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *cachedValue) get() (v string, err error) { +func (c *cachedValue) get(cl *Client) (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { - v, err = getTrimmed(c.k) + v, err = cl.getTrimmed(c.k) } else { - v, err = Get(c.k) + v, err = cl.Get(c.k) } if err == nil { c.v = v @@ -197,11 +137,11 @@ func testOnGCE() bool { resc := make(chan bool, 2) // Try two strategies in parallel. - // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + // See https://github.com/googleapis/google-cloud-go/issues/194 go func() { req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) req.Header.Set("User-Agent", userAgent) - res, err := ctxhttp.Do(ctx, metaClient, req) + res, err := defaultClient.hc.Do(req.WithContext(ctx)) if err != nil { resc <- false return @@ -266,6 +206,255 @@ func systemInfoSuggestsGCE() bool { return name == "Google" || name == "Google Compute Engine" } +// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no +// ResponseHeaderTimeout). +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + return subscribeClient.Subscribe(suffix, fn) +} + +// Get calls Client.Get on the default client. +func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. All HTTP requests +// will use the given http.Client instead of the default client. +func NewClient(c *http.Client) *Client { + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + u := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", u, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := c.hc.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if res.StatusCode != 200 { + return "", "", &Error{Code: res.StatusCode, Message: string(all)} + } + return string(all), res.Header.Get("Etag"), nil +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) Get(suffix string) (string, error) { + val, _, err := c.getETag(suffix) + return val, err +} + +func (c *Client) getTrimmed(suffix string) (s string, err error) { + s, err = c.Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// ProjectID returns the current instance's project ID string. +func (c *Client) ProjectID() (string, error) { return projID.get(c) } + +// NumericProjectID returns the current instance's numeric project ID. +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } + +// InternalIP returns the instance's primary internal IP address. +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) Hostname() (string, error) { + return c.getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTags() ([]string, error) { + var s []string + j, err := c.Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceName returns the current VM's instance ID string. +func (c *Client) InstanceName() (string, error) { + host, err := c.Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + // Subscribe subscribes to a value from the metadata service. // The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". // The suffix may contain query parameters. @@ -275,11 +464,11 @@ func systemInfoSuggestsGCE() bool { // and ok false. Subscribe blocks until fn returns a non-nil error or the value // is deleted. Subscribe returns the error value returned from the last call to // fn, which may be nil when ok == false. -func Subscribe(suffix string, fn func(v string, ok bool) error) error { +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { const failedSubscribeSleep = time.Second * 5 // First check to see if the metadata value exists at all. - val, lastETag, err := getETag(subscribeClient, suffix) + val, lastETag, err := c.getETag(suffix) if err != nil { return err } @@ -295,7 +484,7 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error { suffix += "?wait_for_change=true&last_etag=" } for { - val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) + val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) if err != nil { if _, deleted := err.(NotDefinedError); !deleted { time.Sleep(failedSubscribeSleep) @@ -311,127 +500,14 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error { } } -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return projID.get() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return projNum.get() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/ip") +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code. + Code int + // Message is the server response message. + Message string } -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func Hostname() (string, error) { - return getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { - var s []string - j, err := Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { - return instID.get() -} - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { - host, err := Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { - zone, err := getTrimmed("instance/zone") - // zone is of the form "projects//zones/". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } - -func lines(suffix string) ([]string, error) { - j, err := Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func InstanceAttributeValue(attr string) (string, error) { - return Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func ProjectAttributeValue(attr string) (string, error) { - return Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return lines("instance/service-accounts/" + serviceAccount + "/scopes") -} - -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false +func (e *Error) Error() string { + return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) } diff --git a/vendor/cloud.google.com/go/go.mod b/vendor/cloud.google.com/go/go.mod new file mode 100644 index 0000000000..e3d7d7e623 --- /dev/null +++ b/vendor/cloud.google.com/go/go.mod @@ -0,0 +1,29 @@ +module cloud.google.com/go + +go 1.9 + +require ( + cloud.google.com/go/datastore v1.0.0 + github.com/golang/mock v1.3.1 + github.com/golang/protobuf v1.3.2 + github.com/google/btree v1.0.0 + github.com/google/go-cmp v0.3.0 + github.com/google/martian v2.1.0+incompatible + github.com/google/pprof v0.0.0-20190515194954-54271f7e092f + github.com/googleapis/gax-go/v2 v2.0.5 + github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 + go.opencensus.io v0.22.0 + golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522 + golang.org/x/lint v0.0.0-20190409202823-959b441ac422 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + golang.org/x/sync v0.0.0-20190423024810-112230192c58 + golang.org/x/text v0.3.2 + golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 + golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 + google.golang.org/api v0.8.0 + google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 + google.golang.org/grpc v1.21.1 + honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a + rsc.io/binaryregexp v0.2.0 +) diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go index f5c23a564c..d291921b18 100644 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ import ( // Repo is the current version of the client libraries in this // repo. It should be a date in YYYYMMDD format. -const Repo = "20180226" +const Repo = "20190802" // Go returns the Go runtime version. The returned string // has no whitespace. @@ -67,5 +67,5 @@ func goVer(s string) string { } func notSemverRune(r rune) bool { - return strings.IndexRune("0123456789.", r) < 0 + return !strings.ContainsRune("0123456789.", r) } diff --git a/vendor/cloud.google.com/go/logging/README.md b/vendor/cloud.google.com/go/logging/README.md new file mode 100644 index 0000000000..06050110e4 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/README.md @@ -0,0 +1,35 @@ +## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging) + +- [About Stackdriver Logging](https://cloud.google.com/logging/) +- [API documentation](https://cloud.google.com/logging/docs) +- [Go client documentation](https://godoc.org/cloud.google.com/go/logging) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging) + +### Example Usage + +First create a `logging.Client` to use throughout your application: +[snip]:# (logging-1) +```go +ctx := context.Background() +client, err := logging.NewClient(ctx, "my-project") +if err != nil { + // TODO: Handle error. +} +``` + +Usually, you'll want to add log entries to a buffer to be periodically flushed +(automatically and asynchronously) to the Stackdriver Logging service. +[snip]:# (logging-2) +```go +logger := client.Logger("my-log") +logger.Log(logging.Entry{Payload: "something happened!"}) +``` + +Close your client before your program exits, to flush any buffered log entries. +[snip]:# (logging-3) +```go +err = client.Close() +if err != nil { + // TODO: Handle error. +} +``` \ No newline at end of file diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client.go b/vendor/cloud.google.com/go/logging/apiv2/config_client.go index bad7a3a483..747158714f 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/config_client.go +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC +// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,17 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -// AUTO-GENERATED CODE. DO NOT EDIT. +// Code generated by gapic-generator. DO NOT EDIT. package logging import ( + "context" + "fmt" "math" + "net/url" "time" - "cloud.google.com/go/internal/version" - gax "github.com/googleapis/gax-go" - "golang.org/x/net/context" + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" @@ -63,8 +65,8 @@ func defaultConfigCallOptions() *ConfigCallOptions { codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, - Max: 1000 * time.Millisecond, - Multiplier: 1.2, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, }) }), }, @@ -73,7 +75,7 @@ func defaultConfigCallOptions() *ConfigCallOptions { ListSinks: retry[[2]string{"default", "idempotent"}], GetSink: retry[[2]string{"default", "idempotent"}], CreateSink: retry[[2]string{"default", "non_idempotent"}], - UpdateSink: retry[[2]string{"default", "non_idempotent"}], + UpdateSink: retry[[2]string{"default", "idempotent"}], DeleteSink: retry[[2]string{"default", "idempotent"}], ListExclusions: retry[[2]string{"default", "idempotent"}], GetExclusion: retry[[2]string{"default", "idempotent"}], @@ -84,6 +86,8 @@ func defaultConfigCallOptions() *ConfigCallOptions { } // ConfigClient is a client for interacting with Stackdriver Logging API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. type ConfigClient struct { // The connection to the service. conn *grpc.ClientConn @@ -100,8 +104,8 @@ type ConfigClient struct { // NewConfigClient creates a new config service v2 client. // -// Service for configuring sinks used to export log entries outside of -// Stackdriver Logging. +// Service for configuring sinks used to export log entries out of +// Logging. func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...) if err != nil { @@ -132,16 +136,18 @@ func (c *ConfigClient) Close() error { // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", version.Go()}, keyval...) - kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } // ListSinks lists sinks. func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.ListSinks[0:len(c.CallOptions.ListSinks):len(c.CallOptions.ListSinks)], opts...) it := &LogSinkIterator{} + req = proto.Clone(req).(*loggingpb.ListSinksRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) { var resp *loggingpb.ListSinksResponse req.PageToken = pageToken @@ -169,12 +175,15 @@ func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRe return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken return it } // GetSink gets a sink. func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.GetSink[0:len(c.CallOptions.GetSink):len(c.CallOptions.GetSink)], opts...) var resp *loggingpb.LogSink err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -193,7 +202,8 @@ func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkReques // writer_identity is not permitted to write to the destination. A sink can // export log entries only from the resource owning the sink. func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.CreateSink[0:len(c.CallOptions.CreateSink):len(c.CallOptions.CreateSink)], opts...) var resp *loggingpb.LogSink err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -212,7 +222,8 @@ func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSink // The updated sink might also have a new writer_identity; see the // unique_writer_identity field. func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...) var resp *loggingpb.LogSink err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -229,7 +240,8 @@ func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSink // DeleteSink deletes a sink. If the sink has a unique writer_identity, then that // service account is also deleted. func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.DeleteSink[0:len(c.CallOptions.DeleteSink):len(c.CallOptions.DeleteSink)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -241,9 +253,11 @@ func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSink // ListExclusions lists all the exclusions in a parent resource. func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListExclusionsRequest, opts ...gax.CallOption) *LogExclusionIterator { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.ListExclusions[0:len(c.CallOptions.ListExclusions):len(c.CallOptions.ListExclusions)], opts...) it := &LogExclusionIterator{} + req = proto.Clone(req).(*loggingpb.ListExclusionsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogExclusion, string, error) { var resp *loggingpb.ListExclusionsResponse req.PageToken = pageToken @@ -271,12 +285,15 @@ func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListEx return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken return it } // GetExclusion gets the description of an exclusion. func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.GetExclusion[0:len(c.CallOptions.GetExclusion):len(c.CallOptions.GetExclusion)], opts...) var resp *loggingpb.LogExclusion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -294,7 +311,8 @@ func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclu // Only log entries belonging to that resource can be excluded. // You can have up to 10 exclusions in a resource. func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.CreateExclusion[0:len(c.CallOptions.CreateExclusion):len(c.CallOptions.CreateExclusion)], opts...) var resp *loggingpb.LogExclusion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -310,7 +328,8 @@ func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.Creat // UpdateExclusion changes one or more properties of an existing exclusion. func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.UpdateExclusion[0:len(c.CallOptions.UpdateExclusion):len(c.CallOptions.UpdateExclusion)], opts...) var resp *loggingpb.LogExclusion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -326,7 +345,8 @@ func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.Updat // DeleteExclusion deletes an exclusion. func (c *ConfigClient) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest, opts ...gax.CallOption) error { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.DeleteExclusion[0:len(c.CallOptions.DeleteExclusion):len(c.CallOptions.DeleteExclusion)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error diff --git a/vendor/cloud.google.com/go/logging/apiv2/doc.go b/vendor/cloud.google.com/go/logging/apiv2/doc.go index b8087b020f..cd6d8f7421 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/doc.go +++ b/vendor/cloud.google.com/go/logging/apiv2/doc.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC +// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,20 +12,35 @@ // See the License for the specific language governing permissions and // limitations under the License. -// AUTO-GENERATED CODE. DO NOT EDIT. +// Code generated by gapic-generator. DO NOT EDIT. // Package logging is an auto-generated package for the // Stackdriver Logging API. // // NOTE: This package is in alpha. It is not stable, and is likely to change. // -// Writes log entries and manages your Stackdriver Logging configuration. +// Writes log entries and manages your Logging configuration. +// +// Use of Context +// +// The ctx passed to NewClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// For information about setting deadlines, reusing contexts, and more +// please visit godoc.org/cloud.google.com/go. // // Use the client at cloud.google.com/go/logging in preference to this. package logging // import "cloud.google.com/go/logging/apiv2" import ( - "golang.org/x/net/context" + "context" + "runtime" + "strings" + "unicode" + "google.golang.org/grpc/metadata" ) @@ -50,3 +65,42 @@ func DefaultAuthScopes() []string { "https://www.googleapis.com/auth/logging.write", } } + +// versionGo returns the Go runtime version. The returned string +// has no whitespace, suitable for reporting in header. +func versionGo() string { + const develPrefix = "devel +" + + s := runtime.Version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + notSemverRune := func(r rune) bool { + return strings.IndexRune("0123456789.", r) < 0 + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "UNKNOWN" +} + +const versionClient = "20190801" diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go index 90dae0ab4a..f39bdce457 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC +// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,17 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -// AUTO-GENERATED CODE. DO NOT EDIT. +// Code generated by gapic-generator. DO NOT EDIT. package logging import ( + "context" + "fmt" "math" + "net/url" "time" - "cloud.google.com/go/internal/version" - gax "github.com/googleapis/gax-go" - "golang.org/x/net/context" + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" @@ -59,35 +61,24 @@ func defaultCallOptions() *CallOptions { codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, - Max: 1000 * time.Millisecond, - Multiplier: 1.2, - }) - }), - }, - {"list", "idempotent"}: { - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Internal, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 1000 * time.Millisecond, - Multiplier: 1.2, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, }) }), }, } return &CallOptions{ DeleteLog: retry[[2]string{"default", "idempotent"}], - WriteLogEntries: retry[[2]string{"default", "non_idempotent"}], - ListLogEntries: retry[[2]string{"list", "idempotent"}], + WriteLogEntries: retry[[2]string{"default", "idempotent"}], + ListLogEntries: retry[[2]string{"default", "idempotent"}], ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], - ListLogs: retry[[2]string{"default", "idempotent"}], + ListLogs: retry[[2]string{"default", "idempotent"}], } } // Client is a client for interacting with Stackdriver Logging API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. type Client struct { // The connection to the service. conn *grpc.ClientConn @@ -135,8 +126,8 @@ func (c *Client) Close() error { // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *Client) SetGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", version.Go()}, keyval...) - kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } @@ -145,7 +136,8 @@ func (c *Client) SetGoogleClientInfo(keyval ...string) { // Log entries written shortly before the delete operation might not be // deleted. func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "log_name", url.QueryEscape(req.GetLogName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.DeleteLog[0:len(c.CallOptions.DeleteLog):len(c.CallOptions.DeleteLog)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -155,13 +147,13 @@ func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, return err } -// WriteLogEntries ## Log entry resources -// -// Writes log entries to Stackdriver Logging. This API method is the -// only way to send log entries to Stackdriver Logging. This method -// is used, directly or indirectly, by the Stackdriver Logging agent -// (fluentd) and all logging libraries configured to use Stackdriver -// Logging. +// WriteLogEntries writes log entries to Logging. This API method is the +// only way to send log entries to Logging. This method +// is used, directly or indirectly, by the Logging agent +// (fluentd) and all logging libraries configured to use Logging. +// A single request may contain log entries for a maximum of 1000 +// different resources (projects, organizations, billing accounts or +// folders) func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) { ctx = insertMetadata(ctx, c.xGoogMetadata) opts = append(c.CallOptions.WriteLogEntries[0:len(c.CallOptions.WriteLogEntries):len(c.CallOptions.WriteLogEntries)], opts...) @@ -178,12 +170,13 @@ func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEnt } // ListLogEntries lists log entries. Use this method to retrieve log entries from -// Stackdriver Logging. For ways to export log entries, see +// Logging. For ways to export log entries, see // Exporting Logs (at /logging/docs/export). func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator { ctx = insertMetadata(ctx, c.xGoogMetadata) opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...) it := &LogEntryIterator{} + req = proto.Clone(req).(*loggingpb.ListLogEntriesRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) { var resp *loggingpb.ListLogEntriesResponse req.PageToken = pageToken @@ -211,15 +204,17 @@ func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntri return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken return it } -// ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Stackdriver -// Logging. +// ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Logging. func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { ctx = insertMetadata(ctx, c.xGoogMetadata) opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) it := &MonitoredResourceDescriptorIterator{} + req = proto.Clone(req).(*loggingpb.ListMonitoredResourceDescriptorsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { var resp *loggingpb.ListMonitoredResourceDescriptorsResponse req.PageToken = pageToken @@ -247,15 +242,19 @@ func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *logg return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken return it } // ListLogs lists the logs in projects, organizations, folders, or billing accounts. // Only logs that have entries are listed. func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, opts ...gax.CallOption) *StringIterator { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.ListLogs[0:len(c.CallOptions.ListLogs):len(c.CallOptions.ListLogs)], opts...) it := &StringIterator{} + req = proto.Clone(req).(*loggingpb.ListLogsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { var resp *loggingpb.ListLogsResponse req.PageToken = pageToken @@ -283,6 +282,8 @@ func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, o return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken return it } diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go index e2f96612b3..3b70ab741f 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC +// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,17 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -// AUTO-GENERATED CODE. DO NOT EDIT. +// Code generated by gapic-generator. DO NOT EDIT. package logging import ( + "context" + "fmt" "math" + "net/url" "time" - "cloud.google.com/go/internal/version" - gax "github.com/googleapis/gax-go" - "golang.org/x/net/context" + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" @@ -58,8 +60,8 @@ func defaultMetricsCallOptions() *MetricsCallOptions { codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, - Max: 1000 * time.Millisecond, - Multiplier: 1.2, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, }) }), }, @@ -68,12 +70,14 @@ func defaultMetricsCallOptions() *MetricsCallOptions { ListLogMetrics: retry[[2]string{"default", "idempotent"}], GetLogMetric: retry[[2]string{"default", "idempotent"}], CreateLogMetric: retry[[2]string{"default", "non_idempotent"}], - UpdateLogMetric: retry[[2]string{"default", "non_idempotent"}], + UpdateLogMetric: retry[[2]string{"default", "idempotent"}], DeleteLogMetric: retry[[2]string{"default", "idempotent"}], } } // MetricsClient is a client for interacting with Stackdriver Logging API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. type MetricsClient struct { // The connection to the service. conn *grpc.ClientConn @@ -121,16 +125,18 @@ func (c *MetricsClient) Close() error { // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", version.Go()}, keyval...) - kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } // ListLogMetrics lists logs-based metrics. func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest, opts ...gax.CallOption) *LogMetricIterator { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.ListLogMetrics[0:len(c.CallOptions.ListLogMetrics):len(c.CallOptions.ListLogMetrics)], opts...) it := &LogMetricIterator{} + req = proto.Clone(req).(*loggingpb.ListLogMetricsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) { var resp *loggingpb.ListLogMetricsResponse req.PageToken = pageToken @@ -158,12 +164,15 @@ func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListL return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken return it } // GetLogMetric gets a logs-based metric. func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.GetLogMetric[0:len(c.CallOptions.GetLogMetric):len(c.CallOptions.GetLogMetric)], opts...) var resp *loggingpb.LogMetric err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -179,7 +188,8 @@ func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogM // CreateLogMetric creates a logs-based metric. func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.CreateLogMetric[0:len(c.CallOptions.CreateLogMetric):len(c.CallOptions.CreateLogMetric)], opts...) var resp *loggingpb.LogMetric err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -195,7 +205,8 @@ func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.Crea // UpdateLogMetric creates or updates a logs-based metric. func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.UpdateLogMetric[0:len(c.CallOptions.UpdateLogMetric):len(c.CallOptions.UpdateLogMetric)], opts...) var resp *loggingpb.LogMetric err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -211,7 +222,8 @@ func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.Upda // DeleteLogMetric deletes a logs-based metric. func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error { - ctx = insertMetadata(ctx, c.xGoogMetadata) + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) opts = append(c.CallOptions.DeleteLogMetric[0:len(c.CallOptions.DeleteLogMetric):len(c.CallOptions.DeleteLogMetric)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error diff --git a/vendor/cloud.google.com/go/logging/doc.go b/vendor/cloud.google.com/go/logging/doc.go index d5f425e557..2a2a4ce478 100644 --- a/vendor/cloud.google.com/go/logging/doc.go +++ b/vendor/cloud.google.com/go/logging/doc.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,9 +21,6 @@ This client uses Logging API v2. See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. -Note: This package is in beta. Some backwards-incompatible changes may occur. - - Creating a Client Use a Client to interact with the Stackdriver Logging API. @@ -65,7 +62,10 @@ For critical errors, you may want to send your log entries immediately. LogSync is slow and will block until the log entry has been sent, so it is not recommended for normal use. - lg.LogSync(ctx, logging.Entry{Payload: "ALERT! Something critical happened!"}) + err = lg.LogSync(ctx, logging.Entry{Payload: "ALERT! Something critical happened!"}) + if err != nil { + // TODO: Handle error. + } Payloads @@ -85,11 +85,11 @@ If you have a []byte of JSON, wrap it in json.RawMessage: lg.Log(logging.Entry{Payload: json.RawMessage(j)}) -The Standard Logger Interface +The Standard Logger You may want use a standard log.Logger in your program. - // stdlg implements log.Logger + // stdlg is an instance of *log.Logger. stdlg := lg.StandardLogger(logging.Info) stdlg.Println("some info") @@ -113,5 +113,22 @@ running from a Google Cloud Platform VM, select "GCE VM Instance". Otherwise, se accounts can be viewed on the command line with the "gcloud logging read" command. +Grouping Logs by Request + +To group all the log entries written during a single HTTP request, create two +Loggers, a "parent" and a "child," with different log IDs. Both should be in the same +project, and have the same MonitoredResouce type and labels. + +- Parent entries must have HTTPRequest.Request populated. (Strictly speaking, only the URL is necessary.) + +- A child entry's timestamp must be within the time interval covered by the parent request (i.e., older +than parent.Timestamp, and newer than parent.Timestamp - parent.HTTPRequest.Latency, assuming the +parent timestamp marks the end of the request. + +- The trace field must be populated in all of the entries and match exactly. + +You should observe the child log entries grouped under the parent on the console. The +parent entry will not inherit the severity of its children; you must update the +parent severity yourself. */ package logging // import "cloud.google.com/go/logging" diff --git a/vendor/cloud.google.com/go/logging/go.mod b/vendor/cloud.google.com/go/logging/go.mod new file mode 100644 index 0000000000..8739fdf496 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/go.mod @@ -0,0 +1,15 @@ +module cloud.google.com/go/logging + +go 1.9 + +require ( + cloud.google.com/go v0.43.0 + github.com/golang/protobuf v1.3.1 + github.com/google/go-cmp v0.3.0 + github.com/googleapis/gax-go/v2 v2.0.5 + go.opencensus.io v0.22.0 + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + google.golang.org/api v0.7.0 + google.golang.org/genproto v0.0.0-20190708153700-3bdd9d9f5532 + google.golang.org/grpc v1.21.1 +) diff --git a/vendor/google.golang.org/api/transport/grpc/go18.go b/vendor/cloud.google.com/go/logging/go_mod_tidy_hack.go similarity index 61% rename from vendor/google.golang.org/api/transport/grpc/go18.go rename to vendor/cloud.google.com/go/logging/go_mod_tidy_hack.go index a4b4a99453..a932c70c41 100644 --- a/vendor/google.golang.org/api/transport/grpc/go18.go +++ b/vendor/cloud.google.com/go/logging/go_mod_tidy_hack.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All Rights Reserved. +// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,15 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build go1.8 +// This file, and the cloud.google.com/go import, won't actually become part of +// the resultant binary. +// +build modhack -package grpc +package logging -import ( - "go.opencensus.io/plugin/ocgrpc" - "google.golang.org/grpc" -) - -func addOCStatsHandler(opts []grpc.DialOption) []grpc.DialOption { - return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) -} +// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/logging/internal/common.go b/vendor/cloud.google.com/go/logging/internal/common.go index 38cfbb5fa2..c5788feb0b 100644 --- a/vendor/cloud.google.com/go/logging/internal/common.go +++ b/vendor/cloud.google.com/go/logging/internal/common.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,15 +20,17 @@ import ( ) const ( + // ProdAddr is the production address. ProdAddr = "logging.googleapis.com:443" - Version = "0.2.0" ) +// LogPath creates a formatted path from a parent and a logID. func LogPath(parent, logID string) string { logID = strings.Replace(logID, "/", "%2F", -1) return fmt.Sprintf("%s/logs/%s", parent, logID) } +// LogIDFromPath parses and returns the ID from a log path. func LogIDFromPath(parent, path string) string { start := len(parent) + len("/logs/") if len(path) < start { diff --git a/vendor/cloud.google.com/go/logging/logging.go b/vendor/cloud.google.com/go/logging/logging.go index b341f61f7f..c537675f12 100644 --- a/vendor/cloud.google.com/go/logging/logging.go +++ b/vendor/cloud.google.com/go/logging/logging.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -25,16 +25,19 @@ package logging import ( + "bytes" + "context" "encoding/json" "errors" "fmt" "log" - "math" "net/http" + "regexp" "strconv" "strings" "sync" "time" + "unicode/utf8" "cloud.google.com/go/compute/metadata" "cloud.google.com/go/internal/version" @@ -44,7 +47,6 @@ import ( "github.com/golang/protobuf/ptypes" structpb "github.com/golang/protobuf/ptypes/struct" tspb "github.com/golang/protobuf/ptypes/timestamp" - "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/support/bundler" mrpb "google.golang.org/genproto/googleapis/api/monitoredres" @@ -53,13 +55,13 @@ import ( ) const ( - // Scope for reading from the logging service. + // ReadScope is the scope for reading from the logging service. ReadScope = "https://www.googleapis.com/auth/logging.read" - // Scope for writing to the logging service. + // WriteScope is the scope for writing to the logging service. WriteScope = "https://www.googleapis.com/auth/logging.write" - // Scope for administrative actions on the logging service. + // AdminScope is the scope for administrative actions on the logging service. AdminScope = "https://www.googleapis.com/auth/logging.admin" ) @@ -234,7 +236,7 @@ type Logger struct { // Options commonResource *mrpb.MonitoredResource commonLabels map[string]string - writeTimeout time.Duration + ctxFunc func() (context.Context, func()) } // A LoggerOption is a configuration option for a Logger. @@ -274,12 +276,17 @@ func detectResource() *mrpb.MonitoredResource { if err != nil { return } + name, err := metadata.InstanceName() + if err != nil { + return + } detectedResource.pb = &mrpb.MonitoredResource{ Type: "gce_instance", Labels: map[string]string{ - "project_id": projectID, - "instance_id": id, - "zone": zone, + "project_id": projectID, + "instance_id": id, + "instance_name": name, + "zone": zone, }, } }) @@ -398,6 +405,23 @@ type bufferedByteLimit int func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) } +// ContextFunc is a function that will be called to obtain a context.Context for the +// WriteLogEntries RPC executed in the background for calls to Logger.Log. The +// default is a function that always returns context.Background. The second return +// value of the function is a function to call after the RPC completes. +// +// The function is not used for calls to Logger.LogSync, since the caller can pass +// in the context directly. +// +// This option is EXPERIMENTAL. It may be changed or removed. +func ContextFunc(f func() (ctx context.Context, afterCall func())) LoggerOption { + return contextFunc(f) +} + +type contextFunc func() (ctx context.Context, afterCall func()) + +func (c contextFunc) set(l *Logger) { l.ctxFunc = c } + // Logger returns a Logger that will write entries with the given log ID, such as // "syslog". A log ID must be less than 512 characters long and can only // include the following characters: upper and lower case alphanumeric @@ -412,6 +436,7 @@ func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { client: c, logName: internal.LogPath(c.parent, logID), commonResource: r, + ctxFunc: func() (context.Context, func()) { return context.Background(), nil }, } l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) { l.writeLogEntries(entries.([]*logpb.LogEntry)) @@ -578,6 +603,17 @@ type Entry struct { // if any. If it contains a relative resource name, the name is assumed to // be relative to //tracing.googleapis.com. Trace string + + // ID of the span within the trace associated with the log entry. + // The ID is a 16-character hexadecimal encoding of an 8-byte array. + SpanID string + + // If set, symbolizes that this request was sampled. + TraceSampled bool + + // Optional. Source code location information associated with the log entry, + // if any. + SourceLocation *logpb.LogEntrySourceLocation } // HTTPRequest contains an http.Request as well as additional @@ -631,7 +667,7 @@ func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest { u.Fragment = "" pb := &logtypepb.HttpRequest{ RequestMethod: r.Request.Method, - RequestUrl: u.String(), + RequestUrl: fixUTF8(u.String()), RequestSize: r.RequestSize, Status: int32(r.Status), ResponseSize: r.ResponseSize, @@ -648,6 +684,27 @@ func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest { return pb } +// fixUTF8 is a helper that fixes an invalid UTF-8 string by replacing +// invalid UTF-8 runes with the Unicode replacement character (U+FFFD). +// See Issue https://github.com/googleapis/google-cloud-go/issues/1383. +func fixUTF8(s string) string { + if utf8.ValidString(s) { + return s + } + + // Otherwise time to build the sequence. + buf := new(bytes.Buffer) + buf.Grow(len(s)) + for _, r := range s { + if utf8.ValidRune(r) { + buf.WriteRune(r) + } else { + buf.WriteRune('\uFFFD') + } + } + return buf.String() +} + // toProtoStruct converts v, which must marshal into a JSON object, // into a Google Struct proto. func toProtoStruct(v interface{}) (*structpb.Struct, error) { @@ -713,7 +770,7 @@ func jsonValueToStructValue(v interface{}) *structpb.Value { // Prefer Log for most uses. // TODO(jba): come up with a better name (LogNow?) or eliminate. func (l *Logger) LogSync(ctx context.Context, e Entry) error { - ent, err := toLogEntry(e) + ent, err := l.toLogEntry(e) if err != nil { return err } @@ -728,7 +785,7 @@ func (l *Logger) LogSync(ctx context.Context, e Entry) error { // Log buffers the Entry for output to the logging service. It never blocks. func (l *Logger) Log(e Entry) { - ent, err := toLogEntry(e) + ent, err := l.toLogEntry(e) if err != nil { l.client.error(err) return @@ -756,12 +813,16 @@ func (l *Logger) writeLogEntries(entries []*logpb.LogEntry) { Labels: l.commonLabels, Entries: entries, } - ctx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout) + ctx, afterCall := l.ctxFunc() + ctx, cancel := context.WithTimeout(ctx, defaultWriteTimeout) defer cancel() _, err := l.client.client.WriteLogEntries(ctx, req) if err != nil { l.client.error(err) } + if afterCall != nil { + afterCall() + } } // StandardLogger returns a *log.Logger for the provided severity. @@ -771,14 +832,38 @@ func (l *Logger) writeLogEntries(entries []*logpb.LogEntry) { // (for example by calling SetFlags or SetPrefix). func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] } -func trunc32(i int) int32 { - if i > math.MaxInt32 { - i = math.MaxInt32 +var reCloudTraceContext = regexp.MustCompile(`([a-f\d]+)/([a-f\d]+);o=(\d)`) + +func deconstructXCloudTraceContext(s string) (traceID, spanID string, traceSampled bool) { + // As per the format described at https://cloud.google.com/trace/docs/troubleshooting#force-trace + // "X-Cloud-Trace-Context: TRACE_ID/SPAN_ID;o=TRACE_TRUE" + // for example: + // "X-Cloud-Trace-Context: 105445aa7843bc8bf206b120001000/0;o=1" + // + // We expect: + // * traceID: "105445aa7843bc8bf206b120001000" + // * spanID: "" + // * traceSampled: true + matches := reCloudTraceContext.FindAllStringSubmatch(s, -1) + if len(matches) != 1 { + return } - return int32(i) + + sub := matches[0] + if len(sub) != 4 { + return + } + + traceID, spanID = sub[1], sub[2] + if spanID == "0" { + spanID = "" + } + traceSampled = sub[3] == "1" + + return } -func toLogEntry(e Entry) (*logpb.LogEntry, error) { +func (l *Logger) toLogEntry(e Entry) (*logpb.LogEntry, error) { if e.LogName != "" { return nil, errors.New("logging: Entry.LogName should be not be set when writing") } @@ -790,15 +875,37 @@ func toLogEntry(e Entry) (*logpb.LogEntry, error) { if err != nil { return nil, err } + if e.Trace == "" && e.HTTPRequest != nil && e.HTTPRequest.Request != nil { + traceHeader := e.HTTPRequest.Request.Header.Get("X-Cloud-Trace-Context") + if traceHeader != "" { + // Set to a relative resource name, as described at + // https://cloud.google.com/appengine/docs/flexible/go/writing-application-logs. + traceID, spanID, traceSampled := deconstructXCloudTraceContext(traceHeader) + if traceID != "" { + e.Trace = fmt.Sprintf("%s/traces/%s", l.client.parent, traceID) + } + if e.SpanID == "" { + e.SpanID = spanID + } + + // If we previously hadn't set TraceSampled, let's retrieve it + // from the HTTP request's header, as per: + // https://cloud.google.com/trace/docs/troubleshooting#force-trace + e.TraceSampled = e.TraceSampled || traceSampled + } + } ent := &logpb.LogEntry{ - Timestamp: ts, - Severity: logtypepb.LogSeverity(e.Severity), - InsertId: e.InsertID, - HttpRequest: fromHTTPRequest(e.HTTPRequest), - Operation: e.Operation, - Labels: e.Labels, - Trace: e.Trace, - Resource: e.Resource, + Timestamp: ts, + Severity: logtypepb.LogSeverity(e.Severity), + InsertId: e.InsertID, + HttpRequest: fromHTTPRequest(e.HTTPRequest), + Operation: e.Operation, + Labels: e.Labels, + Trace: e.Trace, + SpanId: e.SpanID, + Resource: e.Resource, + SourceLocation: e.SourceLocation, + TraceSampled: e.TraceSampled, } switch p := e.Payload.(type) { case string: diff --git a/vendor/cloud.google.com/go/tools.go b/vendor/cloud.google.com/go/tools.go new file mode 100644 index 0000000000..fa01cc44cc --- /dev/null +++ b/vendor/cloud.google.com/go/tools.go @@ -0,0 +1,33 @@ +// +build tools + +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This package exists to cause `go mod` and `go get` to believe these tools +// are dependencies, even though they are not runtime dependencies of any +// package (these are tools used by our CI builds). This means they will appear +// in our `go.mod` file, but will not be a part of the build. Also, since the +// build target is something non-existent, these should not be included in any +// binaries. + +package cloud + +import ( + _ "github.com/golang/protobuf/protoc-gen-go" + _ "github.com/jstemmer/go-junit-report" + _ "golang.org/x/exp/cmd/apidiff" + _ "golang.org/x/lint/golint" + _ "golang.org/x/tools/cmd/goimports" + _ "honnef.co/go/tools/cmd/staticcheck" +) diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/vendor/github.com/golang/groupcache/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/groupcache/README.md b/vendor/github.com/golang/groupcache/README.md new file mode 100644 index 0000000000..70c29da160 --- /dev/null +++ b/vendor/github.com/golang/groupcache/README.md @@ -0,0 +1,73 @@ +# groupcache + +## Summary + +groupcache is a caching and cache-filling library, intended as a +replacement for memcached in many cases. + +For API docs and examples, see http://godoc.org/github.com/golang/groupcache + +## Comparison to memcached + +### **Like memcached**, groupcache: + + * shards by key to select which peer is responsible for that key + +### **Unlike memcached**, groupcache: + + * does not require running a separate set of servers, thus massively + reducing deployment/configuration pain. groupcache is a client + library as well as a server. It connects to its own peers. + + * comes with a cache filling mechanism. Whereas memcached just says + "Sorry, cache miss", often resulting in a thundering herd of + database (or whatever) loads from an unbounded number of clients + (which has resulted in several fun outages), groupcache coordinates + cache fills such that only one load in one process of an entire + replicated set of processes populates the cache, then multiplexes + the loaded value to all callers. + + * does not support versioned values. If key "foo" is value "bar", + key "foo" must always be "bar". There are neither cache expiration + times, nor explicit cache evictions. Thus there is also no CAS, + nor Increment/Decrement. This also means that groupcache.... + + * ... supports automatic mirroring of super-hot items to multiple + processes. This prevents memcached hot spotting where a machine's + CPU and/or NIC are overloaded by very popular keys/values. + + * is currently only available for Go. It's very unlikely that I + (bradfitz@) will port the code to any other language. + +## Loading process + +In a nutshell, a groupcache lookup of **Get("foo")** looks like: + +(On machine #5 of a set of N machines running the same code) + + 1. Is the value of "foo" in local memory because it's super hot? If so, use it. + + 2. Is the value of "foo" in local memory because peer #5 (the current + peer) is the owner of it? If so, use it. + + 3. Amongst all the peers in my set of N, am I the owner of the key + "foo"? (e.g. does it consistent hash to 5?) If so, load it. If + other callers come in, via the same process or via RPC requests + from peers, they block waiting for the load to finish and get the + same answer. If not, RPC to the peer that's the owner and get + the answer. If the RPC fails, just load it locally (still with + local dup suppression). + +## Users + +groupcache is in production use by dl.google.com (its original user), +parts of Blogger, parts of Google Code, parts of Google Fiber, parts +of Google production monitoring systems, etc. + +## Presentations + +See http://talks.golang.org/2013/oscon-dl.slide + +## Help + +Use the golang-nuts mailing list for any discussion or questions. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go new file mode 100644 index 0000000000..eac1c7664f --- /dev/null +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -0,0 +1,133 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go new file mode 100644 index 0000000000..63b8ca0eed --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go @@ -0,0 +1,2808 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + The code generator for the plugin for the Google protocol buffer compiler. + It generates Go code from the protocol buffer description files read by the + main routine. +*/ +package generator + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "path" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" + plugin "github.com/golang/protobuf/protoc-gen-go/plugin" +) + +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// proto package is introduced; the generated code references +// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 3 + +// A Plugin provides functionality to add to the output during Go code generation, +// such as to produce RPC stubs. +type Plugin interface { + // Name identifies the plugin. + Name() string + // Init is called once after data structures are built but before + // code generation begins. + Init(g *Generator) + // Generate produces the code generated by the plugin for this file, + // except for the imports, by calling the generator's methods P, In, and Out. + Generate(file *FileDescriptor) + // GenerateImports produces the import declarations for this file. + // It is called after Generate. + GenerateImports(file *FileDescriptor) +} + +var plugins []Plugin + +// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. +// It is typically called during initialization. +func RegisterPlugin(p Plugin) { + plugins = append(plugins, p) +} + +// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf". +type GoImportPath string + +func (p GoImportPath) String() string { return strconv.Quote(string(p)) } + +// A GoPackageName is the name of a Go package. e.g., "protobuf". +type GoPackageName string + +// Each type we import as a protocol buffer (other than FileDescriptorProto) needs +// a pointer to the FileDescriptorProto that represents it. These types achieve that +// wrapping by placing each Proto inside a struct with the pointer to its File. The +// structs have the same names as their contents, with "Proto" removed. +// FileDescriptor is used to store the things that it points to. + +// The file and package name method are common to messages and enums. +type common struct { + file *FileDescriptor // File this object comes from. +} + +// GoImportPath is the import path of the Go package containing the type. +func (c *common) GoImportPath() GoImportPath { + return c.file.importPath +} + +func (c *common) File() *FileDescriptor { return c.file } + +func fileIsProto3(file *descriptor.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) } + +// Descriptor represents a protocol buffer message. +type Descriptor struct { + common + *descriptor.DescriptorProto + parent *Descriptor // The containing message, if any. + nested []*Descriptor // Inner messages, if any. + enums []*EnumDescriptor // Inner enums, if any. + ext []*ExtensionDescriptor // Extensions, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or another message. + path string // The SourceCodeInfo path as comma-separated integers. + group bool +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (d *Descriptor) TypeName() []string { + if d.typename != nil { + return d.typename + } + n := 0 + for parent := d; parent != nil; parent = parent.parent { + n++ + } + s := make([]string, n) + for parent := d; parent != nil; parent = parent.parent { + n-- + s[n] = parent.GetName() + } + d.typename = s + return s +} + +// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type EnumDescriptor struct { + common + *descriptor.EnumDescriptorProto + parent *Descriptor // The containing message, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or a message. + path string // The SourceCodeInfo path as comma-separated integers. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *EnumDescriptor) TypeName() (s []string) { + if e.typename != nil { + return e.typename + } + name := e.GetName() + if e.parent == nil { + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + e.typename = s + return s +} + +// Everything but the last element of the full type name, CamelCased. +// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . +func (e *EnumDescriptor) prefix() string { + if e.parent == nil { + // If the enum is not part of a message, the prefix is just the type name. + return CamelCase(*e.Name) + "_" + } + typeName := e.TypeName() + return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" +} + +// The integer value of the named constant in this enumerated type. +func (e *EnumDescriptor) integerValueAsString(name string) string { + for _, c := range e.Value { + if c.GetName() == name { + return fmt.Sprint(c.GetNumber()) + } + } + log.Fatal("cannot find value for enum constant") + return "" +} + +// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type ExtensionDescriptor struct { + common + *descriptor.FieldDescriptorProto + parent *Descriptor // The containing message, if any. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *ExtensionDescriptor) TypeName() (s []string) { + name := e.GetName() + if e.parent == nil { + // top-level extension + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + return s +} + +// DescName returns the variable name used for the generated descriptor. +func (e *ExtensionDescriptor) DescName() string { + // The full type name. + typeName := e.TypeName() + // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. + for i, s := range typeName { + typeName[i] = CamelCase(s) + } + return "E_" + strings.Join(typeName, "_") +} + +// ImportedDescriptor describes a type that has been publicly imported from another file. +type ImportedDescriptor struct { + common + o Object +} + +func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } + +// FileDescriptor describes an protocol buffer descriptor file (.proto). +// It includes slices of all the messages and enums defined within it. +// Those slices are constructed by WrapTypes. +type FileDescriptor struct { + *descriptor.FileDescriptorProto + desc []*Descriptor // All the messages defined in this file. + enum []*EnumDescriptor // All the enums defined in this file. + ext []*ExtensionDescriptor // All the top-level extensions defined in this file. + imp []*ImportedDescriptor // All types defined in files publicly imported by this file. + + // Comments, stored as a map of path (comma-separated integers) to the comment. + comments map[string]*descriptor.SourceCodeInfo_Location + + // The full list of symbols that are exported, + // as a map from the exported object to its symbols. + // This is used for supporting public imports. + exported map[Object][]symbol + + importPath GoImportPath // Import path of this file's package. + packageName GoPackageName // Name of this file's Go package. + + proto3 bool // whether to generate proto3 code for this file +} + +// VarName is the variable name we'll use in the generated code to refer +// to the compressed bytes of this descriptor. It is not exported, so +// it is only valid inside the generated package. +func (d *FileDescriptor) VarName() string { + h := sha256.Sum256([]byte(d.GetName())) + return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8])) +} + +// goPackageOption interprets the file's go_package option. +// If there is no go_package, it returns ("", "", false). +// If there's a simple name, it returns ("", pkg, true). +// If the option implies an import path, it returns (impPath, pkg, true). +func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) { + opt := d.GetOptions().GetGoPackage() + if opt == "" { + return "", "", false + } + // A semicolon-delimited suffix delimits the import path and package name. + sc := strings.Index(opt, ";") + if sc >= 0 { + return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true + } + // The presence of a slash implies there's an import path. + slash := strings.LastIndex(opt, "/") + if slash >= 0 { + return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true + } + return "", cleanPackageName(opt), true +} + +// goFileName returns the output name for the generated Go file. +func (d *FileDescriptor) goFileName(pathType pathType) string { + name := *d.Name + if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { + name = name[:len(name)-len(ext)] + } + name += ".pb.go" + + if pathType == pathTypeSourceRelative { + return name + } + + // Does the file have a "go_package" option? + // If it does, it may override the filename. + if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { + // Replace the existing dirname with the declared import path. + _, name = path.Split(name) + name = path.Join(string(impPath), name) + return name + } + + return name +} + +func (d *FileDescriptor) addExport(obj Object, sym symbol) { + d.exported[obj] = append(d.exported[obj], sym) +} + +// symbol is an interface representing an exported Go symbol. +type symbol interface { + // GenerateAlias should generate an appropriate alias + // for the symbol from the named package. + GenerateAlias(g *Generator, filename string, pkg GoPackageName) +} + +type messageSymbol struct { + sym string + hasExtensions, isMessageSet bool + oneofTypes []string +} + +type getterSymbol struct { + name string + typ string + typeName string // canonical name in proto world; empty for proto.Message and similar + genType bool // whether typ contains a generated type (message/group/enum) +} + +func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + g.P("// ", ms.sym, " from public import ", filename) + g.P("type ", ms.sym, " = ", pkg, ".", ms.sym) + for _, name := range ms.oneofTypes { + g.P("type ", name, " = ", pkg, ".", name) + } +} + +type enumSymbol struct { + name string + proto3 bool // Whether this came from a proto3 file. +} + +func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + s := es.name + g.P("// ", s, " from public import ", filename) + g.P("type ", s, " = ", pkg, ".", s) + g.P("var ", s, "_name = ", pkg, ".", s, "_name") + g.P("var ", s, "_value = ", pkg, ".", s, "_value") +} + +type constOrVarSymbol struct { + sym string + typ string // either "const" or "var" + cast string // if non-empty, a type cast is required (used for enums) +} + +func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + v := string(pkg) + "." + cs.sym + if cs.cast != "" { + v = cs.cast + "(" + v + ")" + } + g.P(cs.typ, " ", cs.sym, " = ", v) +} + +// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. +type Object interface { + GoImportPath() GoImportPath + TypeName() []string + File() *FileDescriptor +} + +// Generator is the type whose methods generate the output, stored in the associated response structure. +type Generator struct { + *bytes.Buffer + + Request *plugin.CodeGeneratorRequest // The input. + Response *plugin.CodeGeneratorResponse // The output. + + Param map[string]string // Command-line parameters. + PackageImportPath string // Go import path of the package we're generating code for + ImportPrefix string // String to prefix to imported package file names. + ImportMap map[string]string // Mapping from .proto file name to import path + + Pkg map[string]string // The names under which we import support packages + + outputImportPath GoImportPath // Package we're generating code for. + allFiles []*FileDescriptor // All files in the tree + allFilesByName map[string]*FileDescriptor // All files by filename. + genFiles []*FileDescriptor // Those files we will generate output for. + file *FileDescriptor // The file we are compiling now. + packageNames map[GoImportPath]GoPackageName // Imported package names in the current file. + usedPackages map[GoImportPath]bool // Packages used in current file. + usedPackageNames map[GoPackageName]bool // Package names used in the current file. + addedImports map[GoImportPath]bool // Additional imports to emit. + typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. + init []string // Lines to emit in the init function. + indent string + pathType pathType // How to generate output filenames. + writeOutput bool + annotateCode bool // whether to store annotations + annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store +} + +type pathType int + +const ( + pathTypeImport pathType = iota + pathTypeSourceRelative +) + +// New creates a new generator and allocates the request and response protobufs. +func New() *Generator { + g := new(Generator) + g.Buffer = new(bytes.Buffer) + g.Request = new(plugin.CodeGeneratorRequest) + g.Response = new(plugin.CodeGeneratorResponse) + return g +} + +// Error reports a problem, including an error, and exits the program. +func (g *Generator) Error(err error, msgs ...string) { + s := strings.Join(msgs, " ") + ":" + err.Error() + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// Fail reports a problem and exits the program. +func (g *Generator) Fail(msgs ...string) { + s := strings.Join(msgs, " ") + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// CommandLineParameters breaks the comma-separated list of key=value pairs +// in the parameter (a member of the request protobuf) into a key/value map. +// It then sets file name mappings defined by those entries. +func (g *Generator) CommandLineParameters(parameter string) { + g.Param = make(map[string]string) + for _, p := range strings.Split(parameter, ",") { + if i := strings.Index(p, "="); i < 0 { + g.Param[p] = "" + } else { + g.Param[p[0:i]] = p[i+1:] + } + } + + g.ImportMap = make(map[string]string) + pluginList := "none" // Default list of plugin names to enable (empty means all). + for k, v := range g.Param { + switch k { + case "import_prefix": + g.ImportPrefix = v + case "import_path": + g.PackageImportPath = v + case "paths": + switch v { + case "import": + g.pathType = pathTypeImport + case "source_relative": + g.pathType = pathTypeSourceRelative + default: + g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v)) + } + case "plugins": + pluginList = v + case "annotate_code": + if v == "true" { + g.annotateCode = true + } + default: + if len(k) > 0 && k[0] == 'M' { + g.ImportMap[k[1:]] = v + } + } + } + if pluginList != "" { + // Amend the set of plugins. + enabled := make(map[string]bool) + for _, name := range strings.Split(pluginList, "+") { + enabled[name] = true + } + var nplugins []Plugin + for _, p := range plugins { + if enabled[p.Name()] { + nplugins = append(nplugins, p) + } + } + plugins = nplugins + } +} + +// DefaultPackageName returns the package name printed for the object. +// If its file is in a different package, it returns the package name we're using for this file, plus ".". +// Otherwise it returns the empty string. +func (g *Generator) DefaultPackageName(obj Object) string { + importPath := obj.GoImportPath() + if importPath == g.outputImportPath { + return "" + } + return string(g.GoPackageName(importPath)) + "." +} + +// GoPackageName returns the name used for a package. +func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName { + if name, ok := g.packageNames[importPath]; ok { + return name + } + name := cleanPackageName(baseName(string(importPath))) + for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) + } + g.packageNames[importPath] = name + g.usedPackageNames[name] = true + return name +} + +// AddImport adds a package to the generated file's import section. +// It returns the name used for the package. +func (g *Generator) AddImport(importPath GoImportPath) GoPackageName { + g.addedImports[importPath] = true + return g.GoPackageName(importPath) +} + +var globalPackageNames = map[GoPackageName]bool{ + "fmt": true, + "math": true, + "proto": true, +} + +// Create and remember a guaranteed unique package name. Pkg is the candidate name. +// The FileDescriptor parameter is unused. +func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { + name := cleanPackageName(pkg) + for i, orig := 1, name; globalPackageNames[name]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) + } + globalPackageNames[name] = true + return string(name) +} + +var isGoKeyword = map[string]bool{ + "break": true, + "case": true, + "chan": true, + "const": true, + "continue": true, + "default": true, + "else": true, + "defer": true, + "fallthrough": true, + "for": true, + "func": true, + "go": true, + "goto": true, + "if": true, + "import": true, + "interface": true, + "map": true, + "package": true, + "range": true, + "return": true, + "select": true, + "struct": true, + "switch": true, + "type": true, + "var": true, +} + +var isGoPredeclaredIdentifier = map[string]bool{ + "append": true, + "bool": true, + "byte": true, + "cap": true, + "close": true, + "complex": true, + "complex128": true, + "complex64": true, + "copy": true, + "delete": true, + "error": true, + "false": true, + "float32": true, + "float64": true, + "imag": true, + "int": true, + "int16": true, + "int32": true, + "int64": true, + "int8": true, + "iota": true, + "len": true, + "make": true, + "new": true, + "nil": true, + "panic": true, + "print": true, + "println": true, + "real": true, + "recover": true, + "rune": true, + "string": true, + "true": true, + "uint": true, + "uint16": true, + "uint32": true, + "uint64": true, + "uint8": true, + "uintptr": true, +} + +func cleanPackageName(name string) GoPackageName { + name = strings.Map(badToUnderscore, name) + // Identifier must not be keyword or predeclared identifier: insert _. + if isGoKeyword[name] { + name = "_" + name + } + // Identifier must not begin with digit: insert _. + if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) { + name = "_" + name + } + return GoPackageName(name) +} + +// defaultGoPackage returns the package name to use, +// derived from the import path of the package we're building code for. +func (g *Generator) defaultGoPackage() GoPackageName { + p := g.PackageImportPath + if i := strings.LastIndex(p, "/"); i >= 0 { + p = p[i+1:] + } + return cleanPackageName(p) +} + +// SetPackageNames sets the package name for this run. +// The package name must agree across all files being generated. +// It also defines unique package names for all imported files. +func (g *Generator) SetPackageNames() { + g.outputImportPath = g.genFiles[0].importPath + + defaultPackageNames := make(map[GoImportPath]GoPackageName) + for _, f := range g.genFiles { + if _, p, ok := f.goPackageOption(); ok { + defaultPackageNames[f.importPath] = p + } + } + for _, f := range g.genFiles { + if _, p, ok := f.goPackageOption(); ok { + // Source file: option go_package = "quux/bar"; + f.packageName = p + } else if p, ok := defaultPackageNames[f.importPath]; ok { + // A go_package option in another file in the same package. + // + // This is a poor choice in general, since every source file should + // contain a go_package option. Supported mainly for historical + // compatibility. + f.packageName = p + } else if p := g.defaultGoPackage(); p != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets a package name for files which don't + // contain a go_package option. + f.packageName = p + } else if p := f.GetPackage(); p != "" { + // Source file: package quux.bar; + f.packageName = cleanPackageName(p) + } else { + // Source filename. + f.packageName = cleanPackageName(baseName(f.GetName())) + } + } + + // Check that all files have a consistent package name and import path. + for _, f := range g.genFiles[1:] { + if a, b := g.genFiles[0].importPath, f.importPath; a != b { + g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b)) + } + if a, b := g.genFiles[0].packageName, f.packageName; a != b { + g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b)) + } + } + + // Names of support packages. These never vary (if there are conflicts, + // we rename the conflicting package), so this could be removed someday. + g.Pkg = map[string]string{ + "fmt": "fmt", + "math": "math", + "proto": "proto", + } +} + +// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos +// and FileDescriptorProtos into file-referenced objects within the Generator. +// It also creates the list of files to generate and so should be called before GenerateAllFiles. +func (g *Generator) WrapTypes() { + g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) + g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) + genFileNames := make(map[string]bool) + for _, n := range g.Request.FileToGenerate { + genFileNames[n] = true + } + for _, f := range g.Request.ProtoFile { + fd := &FileDescriptor{ + FileDescriptorProto: f, + exported: make(map[Object][]symbol), + proto3: fileIsProto3(f), + } + // The import path may be set in a number of ways. + if substitution, ok := g.ImportMap[f.GetName()]; ok { + // Command-line: M=foo.proto=quux/bar. + // + // Explicit mapping of source file to import path. + fd.importPath = GoImportPath(substitution) + } else if genFileNames[f.GetName()] && g.PackageImportPath != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets the import path for every file that + // we generate code for. + fd.importPath = GoImportPath(g.PackageImportPath) + } else if p, _, _ := fd.goPackageOption(); p != "" { + // Source file: option go_package = "quux/bar"; + // + // The go_package option sets the import path. Most users should use this. + fd.importPath = p + } else { + // Source filename. + // + // Last resort when nothing else is available. + fd.importPath = GoImportPath(path.Dir(f.GetName())) + } + // We must wrap the descriptors before we wrap the enums + fd.desc = wrapDescriptors(fd) + g.buildNestedDescriptors(fd.desc) + fd.enum = wrapEnumDescriptors(fd, fd.desc) + g.buildNestedEnums(fd.desc, fd.enum) + fd.ext = wrapExtensions(fd) + extractComments(fd) + g.allFiles = append(g.allFiles, fd) + g.allFilesByName[f.GetName()] = fd + } + for _, fd := range g.allFiles { + fd.imp = wrapImported(fd, g) + } + + g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) + for _, fileName := range g.Request.FileToGenerate { + fd := g.allFilesByName[fileName] + if fd == nil { + g.Fail("could not find file named", fileName) + } + g.genFiles = append(g.genFiles, fd) + } +} + +// Scan the descriptors in this file. For each one, build the slice of nested descriptors +func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { + for _, desc := range descs { + if len(desc.NestedType) != 0 { + for _, nest := range descs { + if nest.parent == desc { + desc.nested = append(desc.nested, nest) + } + } + if len(desc.nested) != len(desc.NestedType) { + g.Fail("internal error: nesting failure for", desc.GetName()) + } + } + } +} + +func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { + for _, desc := range descs { + if len(desc.EnumType) != 0 { + for _, enum := range enums { + if enum.parent == desc { + desc.enums = append(desc.enums, enum) + } + } + if len(desc.enums) != len(desc.EnumType) { + g.Fail("internal error: enum nesting failure for", desc.GetName()) + } + } + } +} + +// Construct the Descriptor +func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor { + d := &Descriptor{ + common: common{file}, + DescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + d.path = fmt.Sprintf("%d,%d", messagePath, index) + } else { + d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) + } + + // The only way to distinguish a group from a message is whether + // the containing message has a TYPE_GROUP field that matches. + if parent != nil { + parts := d.TypeName() + if file.Package != nil { + parts = append([]string{*file.Package}, parts...) + } + exp := "." + strings.Join(parts, ".") + for _, field := range parent.Field { + if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { + d.group = true + break + } + } + } + + for _, field := range desc.Extension { + d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) + } + + return d +} + +// Return a slice of all the Descriptors defined within this file +func wrapDescriptors(file *FileDescriptor) []*Descriptor { + sl := make([]*Descriptor, 0, len(file.MessageType)+10) + for i, desc := range file.MessageType { + sl = wrapThisDescriptor(sl, desc, nil, file, i) + } + return sl +} + +// Wrap this Descriptor, recursively +func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor { + sl = append(sl, newDescriptor(desc, parent, file, index)) + me := sl[len(sl)-1] + for i, nested := range desc.NestedType { + sl = wrapThisDescriptor(sl, nested, me, file, i) + } + return sl +} + +// Construct the EnumDescriptor +func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor { + ed := &EnumDescriptor{ + common: common{file}, + EnumDescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + ed.path = fmt.Sprintf("%d,%d", enumPath, index) + } else { + ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) + } + return ed +} + +// Return a slice of all the EnumDescriptors defined within this file +func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor { + sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) + // Top-level enums. + for i, enum := range file.EnumType { + sl = append(sl, newEnumDescriptor(enum, nil, file, i)) + } + // Enums within messages. Enums within embedded messages appear in the outer-most message. + for _, nested := range descs { + for i, enum := range nested.EnumType { + sl = append(sl, newEnumDescriptor(enum, nested, file, i)) + } + } + return sl +} + +// Return a slice of all the top-level ExtensionDescriptors defined within this file. +func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor { + var sl []*ExtensionDescriptor + for _, field := range file.Extension { + sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) + } + return sl +} + +// Return a slice of all the types that are publicly imported into this file. +func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) { + for _, index := range file.PublicDependency { + df := g.fileByName(file.Dependency[index]) + for _, d := range df.desc { + if d.GetOptions().GetMapEntry() { + continue + } + sl = append(sl, &ImportedDescriptor{common{file}, d}) + } + for _, e := range df.enum { + sl = append(sl, &ImportedDescriptor{common{file}, e}) + } + for _, ext := range df.ext { + sl = append(sl, &ImportedDescriptor{common{file}, ext}) + } + } + return +} + +func extractComments(file *FileDescriptor) { + file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) + for _, loc := range file.GetSourceCodeInfo().GetLocation() { + if loc.LeadingComments == nil { + continue + } + var p []string + for _, n := range loc.Path { + p = append(p, strconv.Itoa(int(n))) + } + file.comments[strings.Join(p, ",")] = loc + } +} + +// BuildTypeNameMap builds the map from fully qualified type names to objects. +// The key names for the map come from the input data, which puts a period at the beginning. +// It should be called after SetPackageNames and before GenerateAllFiles. +func (g *Generator) BuildTypeNameMap() { + g.typeNameToObject = make(map[string]Object) + for _, f := range g.allFiles { + // The names in this loop are defined by the proto world, not us, so the + // package name may be empty. If so, the dotted package name of X will + // be ".X"; otherwise it will be ".pkg.X". + dottedPkg := "." + f.GetPackage() + if dottedPkg != "." { + dottedPkg += "." + } + for _, enum := range f.enum { + name := dottedPkg + dottedSlice(enum.TypeName()) + g.typeNameToObject[name] = enum + } + for _, desc := range f.desc { + name := dottedPkg + dottedSlice(desc.TypeName()) + g.typeNameToObject[name] = desc + } + } +} + +// ObjectNamed, given a fully-qualified input type name as it appears in the input data, +// returns the descriptor for the message or enum with that name. +func (g *Generator) ObjectNamed(typeName string) Object { + o, ok := g.typeNameToObject[typeName] + if !ok { + g.Fail("can't find object with type", typeName) + } + return o +} + +// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated. +type AnnotatedAtoms struct { + source string + path string + atoms []interface{} +} + +// Annotate records the file name and proto AST path of a list of atoms +// so that a later call to P can emit a link from each atom to its origin. +func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms { + return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms} +} + +// printAtom prints the (atomic, non-annotation) argument to the generated output. +func (g *Generator) printAtom(v interface{}) { + switch v := v.(type) { + case string: + g.WriteString(v) + case *string: + g.WriteString(*v) + case bool: + fmt.Fprint(g, v) + case *bool: + fmt.Fprint(g, *v) + case int: + fmt.Fprint(g, v) + case *int32: + fmt.Fprint(g, *v) + case *int64: + fmt.Fprint(g, *v) + case float64: + fmt.Fprint(g, v) + case *float64: + fmt.Fprint(g, *v) + case GoPackageName: + g.WriteString(string(v)) + case GoImportPath: + g.WriteString(strconv.Quote(string(v))) + default: + g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + } +} + +// P prints the arguments to the generated output. It handles strings and int32s, plus +// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit +// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode +// is true). +func (g *Generator) P(str ...interface{}) { + if !g.writeOutput { + return + } + g.WriteString(g.indent) + for _, v := range str { + switch v := v.(type) { + case *AnnotatedAtoms: + begin := int32(g.Len()) + for _, v := range v.atoms { + g.printAtom(v) + } + if g.annotateCode { + end := int32(g.Len()) + var path []int32 + for _, token := range strings.Split(v.path, ",") { + val, err := strconv.ParseInt(token, 10, 32) + if err != nil { + g.Fail("could not parse proto AST path: ", err.Error()) + } + path = append(path, int32(val)) + } + g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{ + Path: path, + SourceFile: &v.source, + Begin: &begin, + End: &end, + }) + } + default: + g.printAtom(v) + } + } + g.WriteByte('\n') +} + +// addInitf stores the given statement to be printed inside the file's init function. +// The statement is given as a format specifier and arguments. +func (g *Generator) addInitf(stmt string, a ...interface{}) { + g.init = append(g.init, fmt.Sprintf(stmt, a...)) +} + +// In Indents the output one tab stop. +func (g *Generator) In() { g.indent += "\t" } + +// Out unindents the output one tab stop. +func (g *Generator) Out() { + if len(g.indent) > 0 { + g.indent = g.indent[1:] + } +} + +// GenerateAllFiles generates the output for all the files we're outputting. +func (g *Generator) GenerateAllFiles() { + // Initialize the plugins + for _, p := range plugins { + p.Init(g) + } + // Generate the output. The generator runs for every file, even the files + // that we don't generate output for, so that we can collate the full list + // of exported symbols to support public imports. + genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) + for _, file := range g.genFiles { + genFileMap[file] = true + } + for _, file := range g.allFiles { + g.Reset() + g.annotations = nil + g.writeOutput = genFileMap[file] + g.generate(file) + if !g.writeOutput { + continue + } + fname := file.goFileName(g.pathType) + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(fname), + Content: proto.String(g.String()), + }) + if g.annotateCode { + // Store the generated code annotations in text, as the protoc plugin protocol requires that + // strings contain valid UTF-8. + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(file.goFileName(g.pathType) + ".meta"), + Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})), + }) + } + } +} + +// Run all the plugins associated with the file. +func (g *Generator) runPlugins(file *FileDescriptor) { + for _, p := range plugins { + p.Generate(file) + } +} + +// Fill the response protocol buffer with the generated output for all the files we're +// supposed to generate. +func (g *Generator) generate(file *FileDescriptor) { + g.file = file + g.usedPackages = make(map[GoImportPath]bool) + g.packageNames = make(map[GoImportPath]GoPackageName) + g.usedPackageNames = make(map[GoPackageName]bool) + g.addedImports = make(map[GoImportPath]bool) + for name := range globalPackageNames { + g.usedPackageNames[name] = true + } + + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the proto package it is being compiled against.") + g.P("// A compilation error at this line likely means your copy of the") + g.P("// proto package needs to be updated.") + g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") + g.P() + + for _, td := range g.file.imp { + g.generateImported(td) + } + for _, enum := range g.file.enum { + g.generateEnum(enum) + } + for _, desc := range g.file.desc { + // Don't generate virtual messages for maps. + if desc.GetOptions().GetMapEntry() { + continue + } + g.generateMessage(desc) + } + for _, ext := range g.file.ext { + g.generateExtension(ext) + } + g.generateInitFunction() + g.generateFileDescriptor(file) + + // Run the plugins before the imports so we know which imports are necessary. + g.runPlugins(file) + + // Generate header and imports last, though they appear first in the output. + rem := g.Buffer + remAnno := g.annotations + g.Buffer = new(bytes.Buffer) + g.annotations = nil + g.generateHeader() + g.generateImports() + if !g.writeOutput { + return + } + // Adjust the offsets for annotations displaced by the header and imports. + for _, anno := range remAnno { + *anno.Begin += int32(g.Len()) + *anno.End += int32(g.Len()) + g.annotations = append(g.annotations, anno) + } + g.Write(rem.Bytes()) + + // Reformat generated code and patch annotation locations. + fset := token.NewFileSet() + original := g.Bytes() + if g.annotateCode { + // make a copy independent of g; we'll need it after Reset. + original = append([]byte(nil), original...) + } + fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code, + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(original)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) + } + ast.SortImports(fset, fileAST) + g.Reset() + err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST) + if err != nil { + g.Fail("generated Go source code could not be reformatted:", err.Error()) + } + if g.annotateCode { + m, err := remap.Compute(original, g.Bytes()) + if err != nil { + g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error()) + } + for _, anno := range g.annotations { + new, ok := m.Find(int(*anno.Begin), int(*anno.End)) + if !ok { + g.Fail("span in formatted generated Go source code could not be mapped back to the original code") + } + *anno.Begin = int32(new.Pos) + *anno.End = int32(new.End) + } + } +} + +// Generate the header, including package definition +func (g *Generator) generateHeader() { + g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") + if g.file.GetOptions().GetDeprecated() { + g.P("// ", g.file.Name, " is a deprecated file.") + } else { + g.P("// source: ", g.file.Name) + } + g.P() + g.PrintComments(strconv.Itoa(packagePath)) + g.P() + g.P("package ", g.file.packageName) + g.P() +} + +// deprecationComment is the standard comment added to deprecated +// messages, fields, enums, and enum values. +var deprecationComment = "// Deprecated: Do not use." + +// PrintComments prints any comments from the source .proto file. +// The path is a comma-separated list of integers. +// It returns an indication of whether any comments were printed. +// See descriptor.proto for its format. +func (g *Generator) PrintComments(path string) bool { + if !g.writeOutput { + return false + } + if c, ok := g.makeComments(path); ok { + g.P(c) + return true + } + return false +} + +// makeComments generates the comment string for the field, no "\n" at the end +func (g *Generator) makeComments(path string) (string, bool) { + loc, ok := g.file.comments[path] + if !ok { + return "", false + } + w := new(bytes.Buffer) + nl := "" + for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") { + fmt.Fprintf(w, "%s//%s", nl, line) + nl = "\n" + } + return w.String(), true +} + +func (g *Generator) fileByName(filename string) *FileDescriptor { + return g.allFilesByName[filename] +} + +// weak returns whether the ith import of the current file is a weak import. +func (g *Generator) weak(i int32) bool { + for _, j := range g.file.WeakDependency { + if j == i { + return true + } + } + return false +} + +// Generate the imports +func (g *Generator) generateImports() { + imports := make(map[GoImportPath]GoPackageName) + for i, s := range g.file.Dependency { + fd := g.fileByName(s) + importPath := fd.importPath + // Do not import our own package. + if importPath == g.file.importPath { + continue + } + // Do not import weak imports. + if g.weak(int32(i)) { + continue + } + // Do not import a package twice. + if _, ok := imports[importPath]; ok { + continue + } + // We need to import all the dependencies, even if we don't reference them, + // because other code and tools depend on having the full transitive closure + // of protocol buffer types in the binary. + packageName := g.GoPackageName(importPath) + if _, ok := g.usedPackages[importPath]; !ok { + packageName = "_" + } + imports[importPath] = packageName + } + for importPath := range g.addedImports { + imports[importPath] = g.GoPackageName(importPath) + } + // We almost always need a proto import. Rather than computing when we + // do, which is tricky when there's a plugin, just import it and + // reference it later. The same argument applies to the fmt and math packages. + g.P("import (") + g.P(g.Pkg["fmt"] + ` "fmt"`) + g.P(g.Pkg["math"] + ` "math"`) + g.P(g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto") + for importPath, packageName := range imports { + g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath) + } + g.P(")") + g.P() + // TODO: may need to worry about uniqueness across plugins + for _, p := range plugins { + p.GenerateImports(g.file) + g.P() + } + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ = ", g.Pkg["proto"], ".Marshal") + g.P("var _ = ", g.Pkg["fmt"], ".Errorf") + g.P("var _ = ", g.Pkg["math"], ".Inf") + g.P() +} + +func (g *Generator) generateImported(id *ImportedDescriptor) { + df := id.o.File() + filename := *df.Name + if df.importPath == g.file.importPath { + // Don't generate type aliases for files in the same Go package as this one. + return + } + if !supportTypeAliases { + g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename)) + } + g.usedPackages[df.importPath] = true + + for _, sym := range df.exported[id.o] { + sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath)) + } + + g.P() +} + +// Generate the enum definitions for this EnumDescriptor. +func (g *Generator) generateEnum(enum *EnumDescriptor) { + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + ccPrefix := enum.prefix() + + deprecatedEnum := "" + if enum.GetOptions().GetDeprecated() { + deprecatedEnum = deprecationComment + } + g.PrintComments(enum.path) + g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum) + g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) + g.P("const (") + for i, e := range enum.Value { + etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i) + g.PrintComments(etorPath) + + deprecatedValue := "" + if e.GetOptions().GetDeprecated() { + deprecatedValue = deprecationComment + } + + name := ccPrefix + *e.Name + g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue) + g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) + } + g.P(")") + g.P() + g.P("var ", ccTypeName, "_name = map[int32]string{") + generated := make(map[int32]bool) // avoid duplicate values + for _, e := range enum.Value { + duplicate := "" + if _, present := generated[*e.Number]; present { + duplicate = "// Duplicate value: " + } + g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") + generated[*e.Number] = true + } + g.P("}") + g.P() + g.P("var ", ccTypeName, "_value = map[string]int32{") + for _, e := range enum.Value { + g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") + } + g.P("}") + g.P() + + if !enum.proto3() { + g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") + g.P("p := new(", ccTypeName, ")") + g.P("*p = x") + g.P("return p") + g.P("}") + g.P() + } + + g.P("func (x ", ccTypeName, ") String() string {") + g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") + g.P("}") + g.P() + + if !enum.proto3() { + g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") + g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) + g.P("if err != nil {") + g.P("return err") + g.P("}") + g.P("*x = ", ccTypeName, "(value)") + g.P("return nil") + g.P("}") + g.P() + } + + var indexes []string + for m := enum.parent; m != nil; m = m.parent { + // XXX: skip groups? + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + indexes = append(indexes, strconv.Itoa(enum.index)) + g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {") + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.P("}") + g.P() + if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { + g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) + g.P() + } + + g.generateEnumRegistration(enum) +} + +// The tag is a string like "varint,2,opt,name=fieldname,def=7" that +// identifies details of the field for the protocol buffer marshaling and unmarshaling +// code. The fields are: +// wire encoding +// protocol tag number +// opt,req,rep for optional, required, or repeated +// packed whether the encoding is "packed" (optional; repeated primitives only) +// name= the original declared name +// enum= the name of the enum type if it is an enum-typed field. +// proto3 if this field is in a proto3 message +// def= string representation of the default value, if any. +// The default value must be in a representation that can be used at run-time +// to generate the default value. Thus bools become 0 and 1, for instance. +func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { + optrepreq := "" + switch { + case isOptional(field): + optrepreq = "opt" + case isRequired(field): + optrepreq = "req" + case isRepeated(field): + optrepreq = "rep" + } + var defaultValue string + if dv := field.DefaultValue; dv != nil { // set means an explicit default + defaultValue = *dv + // Some types need tweaking. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if defaultValue == "true" { + defaultValue = "1" + } else { + defaultValue = "0" + } + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + // Nothing to do. Quoting is done for the whole tag. + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // For enums we need to provide the integer constant. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + // It is an enum that was publicly imported. + // We need the underlying type. + obj = id.o + } + enum, ok := obj.(*EnumDescriptor) + if !ok { + log.Printf("obj is a %T", obj) + if id, ok := obj.(*ImportedDescriptor); ok { + log.Printf("id.o is a %T", id.o) + } + g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) + } + defaultValue = enum.integerValueAsString(defaultValue) + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { + if f, err := strconv.ParseFloat(defaultValue, 32); err == nil { + defaultValue = fmt.Sprint(float32(f)) + } + } + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { + if f, err := strconv.ParseFloat(defaultValue, 64); err == nil { + defaultValue = fmt.Sprint(f) + } + } + } + defaultValue = ",def=" + defaultValue + } + enum := "" + if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { + // We avoid using obj.GoPackageName(), because we want to use the + // original (proto-world) package name. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + obj = id.o + } + enum = ",enum=" + if pkg := obj.File().GetPackage(); pkg != "" { + enum += pkg + "." + } + enum += CamelCaseSlice(obj.TypeName()) + } + packed := "" + if (field.Options != nil && field.Options.GetPacked()) || + // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: + // "In proto3, repeated fields of scalar numeric types use packed encoding by default." + (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && + isRepeated(field) && isScalar(field)) { + packed = ",packed" + } + fieldName := field.GetName() + name := fieldName + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + // We must use the type name for groups instead of + // the field name to preserve capitalization. + // type_name in FieldDescriptorProto is fully-qualified, + // but we only want the local part. + name = *field.TypeName + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[i+1:] + } + } + if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name { + // TODO: escaping might be needed, in which case + // perhaps this should be in its own "json" tag. + name += ",json=" + json + } + name = ",name=" + name + if message.proto3() { + name += ",proto3" + } + oneof := "" + if field.OneofIndex != nil { + oneof = ",oneof" + } + return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s", + wiretype, + field.GetNumber(), + optrepreq, + packed, + name, + enum, + oneof, + defaultValue)) +} + +func needsStar(typ descriptor.FieldDescriptorProto_Type) bool { + switch typ { + case descriptor.FieldDescriptorProto_TYPE_GROUP: + return false + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + return false + case descriptor.FieldDescriptorProto_TYPE_BYTES: + return false + } + return true +} + +// TypeName is the printed name appropriate for an item. If the object is in the current file, +// TypeName drops the package name and underscores the rest. +// Otherwise the object is from another package; and the result is the underscored +// package name followed by the item name. +// The result always has an initial capital. +func (g *Generator) TypeName(obj Object) string { + return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) +} + +// GoType returns a string representing the type name, and the wire type +func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { + // TODO: Options. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + typ, wire = "float64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + typ, wire = "float32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_INT64: + typ, wire = "int64", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT64: + typ, wire = "uint64", "varint" + case descriptor.FieldDescriptorProto_TYPE_INT32: + typ, wire = "int32", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT32: + typ, wire = "uint32", "varint" + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + typ, wire = "uint64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + typ, wire = "uint32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + typ, wire = "bool", "varint" + case descriptor.FieldDescriptorProto_TYPE_STRING: + typ, wire = "string", "bytes" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "group" + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "bytes" + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typ, wire = "[]byte", "bytes" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "varint" + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + typ, wire = "int32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + typ, wire = "int64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + typ, wire = "int32", "zigzag32" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + typ, wire = "int64", "zigzag64" + default: + g.Fail("unknown type for", field.GetName()) + } + if isRepeated(field) { + typ = "[]" + typ + } else if message != nil && message.proto3() { + return + } else if field.OneofIndex != nil && message != nil { + return + } else if needsStar(*field.Type) { + typ = "*" + typ + } + return +} + +func (g *Generator) RecordTypeUse(t string) { + if _, ok := g.typeNameToObject[t]; !ok { + return + } + importPath := g.ObjectNamed(t).GoImportPath() + if importPath == g.outputImportPath { + // Don't record use of objects in our package. + return + } + g.AddImport(importPath) + g.usedPackages[importPath] = true +} + +// Method names that may be generated. Fields with these names get an +// underscore appended. Any change to this set is a potential incompatible +// API change because it changes generated field names. +var methodNames = [...]string{ + "Reset", + "String", + "ProtoMessage", + "Marshal", + "Unmarshal", + "ExtensionRangeArray", + "ExtensionMap", + "Descriptor", +} + +// Names of messages in the `google.protobuf` package for which +// we will generate XXX_WellKnownType methods. +var wellKnownTypes = map[string]bool{ + "Any": true, + "Duration": true, + "Empty": true, + "Struct": true, + "Timestamp": true, + + "Value": true, + "ListValue": true, + "DoubleValue": true, + "FloatValue": true, + "Int64Value": true, + "UInt64Value": true, + "Int32Value": true, + "UInt32Value": true, + "BoolValue": true, + "StringValue": true, + "BytesValue": true, +} + +// getterDefault finds the default value for the field to return from a getter, +// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName" +func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string { + if isRepeated(field) { + return "nil" + } + if def := field.GetDefaultValue(); def != "" { + defaultConstant := g.defaultConstantName(goMessageType, field.GetName()) + if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { + return defaultConstant + } + return "append([]byte(nil), " + defaultConstant + "...)" + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + return "false" + case descriptor.FieldDescriptorProto_TYPE_STRING: + return `""` + case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES: + return "nil" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate getter for %s", field.GetName()) + return "nil" + } + if len(enum.Value) == 0 { + return "0 // empty enum" + } + first := enum.Value[0].GetName() + return g.DefaultPackageName(obj) + enum.prefix() + first + default: + return "0" + } +} + +// defaultConstantName builds the name of the default constant from the message +// type name and the untouched field name, e.g. "Default_MessageType_FieldName" +func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string { + return "Default_" + goMessageType + "_" + CamelCase(protoFieldName) +} + +// The different types of fields in a message and how to actually print them +// Most of the logic for generateMessage is in the methods of these types. +// +// Note that the content of the field is irrelevant, a simpleField can contain +// anything from a scalar to a group (which is just a message). +// +// Extension fields (and message sets) are however handled separately. +// +// simpleField - a field that is neiter weak nor oneof, possibly repeated +// oneofField - field containing list of subfields: +// - oneofSubField - a field within the oneof + +// msgCtx contains the context for the generator functions. +type msgCtx struct { + goName string // Go struct name of the message, e.g. MessageName + message *Descriptor // The descriptor for the message +} + +// fieldCommon contains data common to all types of fields. +type fieldCommon struct { + goName string // Go name of field, e.g. "FieldName" or "Descriptor_" + protoName string // Name of field in proto language, e.g. "field_name" or "descriptor" + getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_" + goType string // The Go type as a string, e.g. "*int32" or "*OtherMessage" + tags string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"` + fullPath string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0" +} + +// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor". +func (f *fieldCommon) getProtoName() string { + return f.protoName +} + +// getGoType returns the go type of the field as a string, e.g. "*int32". +func (f *fieldCommon) getGoType() string { + return f.goType +} + +// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated. +type simpleField struct { + fieldCommon + protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" + protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 + deprecated string // Deprecation comment, if any, e.g. "// Deprecated: Do not use." + getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" + protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" + comment string // The full comment for the field, e.g. "// Useful information" +} + +// decl prints the declaration of the field in the struct (if any). +func (f *simpleField) decl(g *Generator, mc *msgCtx) { + g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated) +} + +// getter prints the getter for the field. +func (f *simpleField) getter(g *Generator, mc *msgCtx) { + star := "" + tname := f.goType + if needsStar(f.protoType) && tname[0] == '*' { + tname = tname[1:] + star = "*" + } + if f.deprecated != "" { + g.P(f.deprecated) + } + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {") + if f.getterDef == "nil" { // Simpler getter + g.P("if m != nil {") + g.P("return m." + f.goName) + g.P("}") + g.P("return nil") + g.P("}") + g.P() + return + } + if mc.message.proto3() { + g.P("if m != nil {") + } else { + g.P("if m != nil && m." + f.goName + " != nil {") + } + g.P("return " + star + "m." + f.goName) + g.P("}") + g.P("return ", f.getterDef) + g.P("}") + g.P() +} + +// setter prints the setter method of the field. +func (f *simpleField) setter(g *Generator, mc *msgCtx) { + // No setter for regular fields yet +} + +// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". +func (f *simpleField) getProtoDef() string { + return f.protoDef +} + +// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". +func (f *simpleField) getProtoTypeName() string { + return f.protoTypeName +} + +// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. +func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type { + return f.protoType +} + +// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message. +type oneofSubField struct { + fieldCommon + protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" + protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 + oneofTypeName string // Type name of the enclosing struct, e.g. "MessageName_FieldName" + fieldNumber int // Actual field number, as defined in proto, e.g. 12 + getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" + protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" + deprecated string // Deprecation comment, if any. +} + +// typedNil prints a nil casted to the pointer to this field. +// - for XXX_OneofWrappers +func (f *oneofSubField) typedNil(g *Generator) { + g.P("(*", f.oneofTypeName, ")(nil),") +} + +// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". +func (f *oneofSubField) getProtoDef() string { + return f.protoDef +} + +// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". +func (f *oneofSubField) getProtoTypeName() string { + return f.protoTypeName +} + +// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. +func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type { + return f.protoType +} + +// oneofField represents the oneof on top level. +// The alternative fields within the oneof are represented by oneofSubField. +type oneofField struct { + fieldCommon + subFields []*oneofSubField // All the possible oneof fields + comment string // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\" +} + +// decl prints the declaration of the field in the struct (if any). +func (f *oneofField) decl(g *Generator, mc *msgCtx) { + comment := f.comment + for _, sf := range f.subFields { + comment += "//\t*" + sf.oneofTypeName + "\n" + } + g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`") +} + +// getter for a oneof field will print additional discriminators and interfaces for the oneof, +// also it prints all the getters for the sub fields. +func (f *oneofField) getter(g *Generator, mc *msgCtx) { + // The discriminator type + g.P("type ", f.goType, " interface {") + g.P(f.goType, "()") + g.P("}") + g.P() + // The subField types, fulfilling the discriminator type contract + for _, sf := range f.subFields { + g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {") + g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`") + g.P("}") + g.P() + } + for _, sf := range f.subFields { + g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}") + g.P() + } + // Getter for the oneof field + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {") + g.P("if m != nil { return m.", f.goName, " }") + g.P("return nil") + g.P("}") + g.P() + // Getters for each oneof + for _, sf := range f.subFields { + if sf.deprecated != "" { + g.P(sf.deprecated) + } + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {") + g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {") + g.P("return x.", sf.goName) + g.P("}") + g.P("return ", sf.getterDef) + g.P("}") + g.P() + } +} + +// setter prints the setter method of the field. +func (f *oneofField) setter(g *Generator, mc *msgCtx) { + // No setters for oneof yet +} + +// topLevelField interface implemented by all types of fields on the top level (not oneofSubField). +type topLevelField interface { + decl(g *Generator, mc *msgCtx) // print declaration within the struct + getter(g *Generator, mc *msgCtx) // print getter + setter(g *Generator, mc *msgCtx) // print setter if applicable +} + +// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField). +type defField interface { + getProtoDef() string // default value explicitly stated in the proto file, e.g "yoshi" or "5" + getProtoName() string // proto name of a field, e.g. "field_name" or "descriptor" + getGoType() string // go type of the field as a string, e.g. "*int32" + getProtoTypeName() string // protobuf type name for the field, e.g. ".google.protobuf.Duration" + getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 +} + +// generateDefaultConstants adds constants for default values if needed, which is only if the default value is. +// explicit in the proto. +func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) { + // Collect fields that can have defaults + dFields := []defField{} + for _, pf := range topLevelFields { + if f, ok := pf.(*oneofField); ok { + for _, osf := range f.subFields { + dFields = append(dFields, osf) + } + continue + } + dFields = append(dFields, pf.(defField)) + } + for _, df := range dFields { + def := df.getProtoDef() + if def == "" { + continue + } + fieldname := g.defaultConstantName(mc.goName, df.getProtoName()) + typename := df.getGoType() + if typename[0] == '*' { + typename = typename[1:] + } + kind := "const " + switch { + case typename == "bool": + case typename == "string": + def = strconv.Quote(def) + case typename == "[]byte": + def = "[]byte(" + strconv.Quote(unescape(def)) + ")" + kind = "var " + case def == "inf", def == "-inf", def == "nan": + // These names are known to, and defined by, the protocol language. + switch def { + case "inf": + def = "math.Inf(1)" + case "-inf": + def = "math.Inf(-1)" + case "nan": + def = "math.NaN()" + } + if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT { + def = "float32(" + def + ")" + } + kind = "var " + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT: + if f, err := strconv.ParseFloat(def, 32); err == nil { + def = fmt.Sprint(float32(f)) + } + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if f, err := strconv.ParseFloat(def, 64); err == nil { + def = fmt.Sprint(f) + } + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM: + // Must be an enum. Need to construct the prefixed name. + obj := g.ObjectNamed(df.getProtoTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate constant for %s", fieldname) + continue + } + def = g.DefaultPackageName(obj) + enum.prefix() + def + } + g.P(kind, fieldname, " ", typename, " = ", def) + g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""}) + } + g.P() +} + +// generateInternalStructFields just adds the XXX_ fields to the message struct. +func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) { + g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals + if len(mc.message.ExtensionRange) > 0 { + messageset := "" + if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() { + messageset = "protobuf_messageset:\"1\" " + } + g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`") + } + g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + g.P("XXX_sizecache\tint32 `json:\"-\"`") + +} + +// generateOneofFuncs adds all the utility functions for oneof, including marshaling, unmarshaling and sizer. +func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) { + ofields := []*oneofField{} + for _, f := range topLevelFields { + if o, ok := f.(*oneofField); ok { + ofields = append(ofields, o) + } + } + if len(ofields) == 0 { + return + } + + // OneofFuncs + g.P("// XXX_OneofWrappers is for the internal use of the proto package.") + g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {") + g.P("return []interface{}{") + for _, of := range ofields { + for _, sf := range of.subFields { + sf.typedNil(g) + } + } + g.P("}") + g.P("}") + g.P() +} + +// generateMessageStruct adds the actual struct with it's members (but not methods) to the output. +func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) { + comments := g.PrintComments(mc.message.path) + + // Guarantee deprecation comments appear after user-provided comments. + if mc.message.GetOptions().GetDeprecated() { + if comments { + // Convention: Separate deprecation comments from original + // comments with an empty line. + g.P("//") + } + g.P(deprecationComment) + } + + g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {") + for _, pf := range topLevelFields { + pf.decl(g, mc) + } + g.generateInternalStructFields(mc, topLevelFields) + g.P("}") +} + +// generateGetters adds getters for all fields, including oneofs and weak fields when applicable. +func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) { + for _, pf := range topLevelFields { + pf.getter(g, mc) + } +} + +// generateSetters add setters for all fields, including oneofs and weak fields when applicable. +func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) { + for _, pf := range topLevelFields { + pf.setter(g, mc) + } +} + +// generateCommonMethods adds methods to the message that are not on a per field basis. +func (g *Generator) generateCommonMethods(mc *msgCtx) { + // Reset, String and ProtoMessage methods. + g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }") + g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") + g.P("func (*", mc.goName, ") ProtoMessage() {}") + var indexes []string + for m := mc.message; m != nil; m = m.parent { + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {") + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.P("}") + g.P() + // TODO: Revisit the decision to use a XXX_WellKnownType method + // if we change proto.MessageName to work with multiple equivalents. + if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] { + g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`) + g.P() + } + + // Extension support methods + if len(mc.message.ExtensionRange) > 0 { + g.P() + g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{") + for _, r := range mc.message.ExtensionRange { + end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends + g.P("{Start: ", r.Start, ", End: ", end, "},") + } + g.P("}") + g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") + g.P("return extRange_", mc.goName) + g.P("}") + g.P() + } + + // TODO: It does not scale to keep adding another method for every + // operation on protos that we want to switch over to using the + // table-driven approach. Instead, we should only add a single method + // that allows getting access to the *InternalMessageInfo struct and then + // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that. + + // Wrapper for table-driven marshaling and unmarshaling. + g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {") + g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {") + g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {") + g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message + g.P("return xxx_messageInfo_", mc.goName, ".Size(m)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {") + g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)") + g.P("}") + + g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo") + g.P() +} + +// Generate the type, methods and default constant definitions for this Descriptor. +func (g *Generator) generateMessage(message *Descriptor) { + topLevelFields := []topLevelField{} + oFields := make(map[int32]*oneofField) + // The full type name + typeName := message.TypeName() + // The full type name, CamelCased. + goTypeName := CamelCaseSlice(typeName) + + usedNames := make(map[string]bool) + for _, n := range methodNames { + usedNames[n] = true + } + + // allocNames finds a conflict-free variation of the given strings, + // consistently mutating their suffixes. + // It returns the same number of strings. + allocNames := func(ns ...string) []string { + Loop: + for { + for _, n := range ns { + if usedNames[n] { + for i := range ns { + ns[i] += "_" + } + continue Loop + } + } + for _, n := range ns { + usedNames[n] = true + } + return ns + } + } + + mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later + + // Build a structure more suitable for generating the text in one pass + for i, field := range message.Field { + // Allocate the getter and the field at the same time so name + // collisions create field/method consistent names. + // TODO: This allocation occurs based on the order of the fields + // in the proto file, meaning that a change in the field + // ordering can change generated Method/Field names. + base := CamelCase(*field.Name) + ns := allocNames(base, "Get"+base) + fieldName, fieldGetterName := ns[0], ns[1] + typename, wiretype := g.GoType(message, field) + jsonName := *field.Name + tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty") + + oneof := field.OneofIndex != nil + if oneof && oFields[*field.OneofIndex] == nil { + odp := message.OneofDecl[int(*field.OneofIndex)] + base := CamelCase(odp.GetName()) + fname := allocNames(base)[0] + + // This is the first field of a oneof we haven't seen before. + // Generate the union field. + oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex) + c, ok := g.makeComments(oneofFullPath) + if ok { + c += "\n//\n" + } + c += "// Types that are valid to be assigned to " + fname + ":\n" + // Generate the rest of this comment later, + // when we've computed any disambiguation. + + dname := "is" + goTypeName + "_" + fname + tag := `protobuf_oneof:"` + odp.GetName() + `"` + of := oneofField{ + fieldCommon: fieldCommon{ + goName: fname, + getterName: "Get" + fname, + goType: dname, + tags: tag, + protoName: odp.GetName(), + fullPath: oneofFullPath, + }, + comment: c, + } + topLevelFields = append(topLevelFields, &of) + oFields[*field.OneofIndex] = &of + } + + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { + desc := g.ObjectNamed(field.GetTypeName()) + if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { + // Figure out the Go types and tags for the key and value types. + keyField, valField := d.Field[0], d.Field[1] + keyType, keyWire := g.GoType(d, keyField) + valType, valWire := g.GoType(d, valField) + keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire) + + // We don't use stars, except for message-typed values. + // Message and enum types are the only two possibly foreign types used in maps, + // so record their use. They are not permitted as map keys. + keyType = strings.TrimPrefix(keyType, "*") + switch *valField.Type { + case descriptor.FieldDescriptorProto_TYPE_ENUM: + valType = strings.TrimPrefix(valType, "*") + g.RecordTypeUse(valField.GetTypeName()) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + g.RecordTypeUse(valField.GetTypeName()) + default: + valType = strings.TrimPrefix(valType, "*") + } + + typename = fmt.Sprintf("map[%s]%s", keyType, valType) + mapFieldTypes[field] = typename // record for the getter generation + + tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag) + } + } + + fieldDeprecated := "" + if field.GetOptions().GetDeprecated() { + fieldDeprecated = deprecationComment + } + + dvalue := g.getterDefault(field, goTypeName) + if oneof { + tname := goTypeName + "_" + fieldName + // It is possible for this to collide with a message or enum + // nested in this message. Check for collisions. + for { + ok := true + for _, desc := range message.nested { + if CamelCaseSlice(desc.TypeName()) == tname { + ok = false + break + } + } + for _, enum := range message.enums { + if CamelCaseSlice(enum.TypeName()) == tname { + ok = false + break + } + } + if !ok { + tname += "_" + continue + } + break + } + + oneofField := oFields[*field.OneofIndex] + tag := "protobuf:" + g.goTag(message, field, wiretype) + sf := oneofSubField{ + fieldCommon: fieldCommon{ + goName: fieldName, + getterName: fieldGetterName, + goType: typename, + tags: tag, + protoName: field.GetName(), + fullPath: fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i), + }, + protoTypeName: field.GetTypeName(), + fieldNumber: int(*field.Number), + protoType: *field.Type, + getterDef: dvalue, + protoDef: field.GetDefaultValue(), + oneofTypeName: tname, + deprecated: fieldDeprecated, + } + oneofField.subFields = append(oneofField.subFields, &sf) + g.RecordTypeUse(field.GetTypeName()) + continue + } + + fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) + c, ok := g.makeComments(fieldFullPath) + if ok { + c += "\n" + } + rf := simpleField{ + fieldCommon: fieldCommon{ + goName: fieldName, + getterName: fieldGetterName, + goType: typename, + tags: tag, + protoName: field.GetName(), + fullPath: fieldFullPath, + }, + protoTypeName: field.GetTypeName(), + protoType: *field.Type, + deprecated: fieldDeprecated, + getterDef: dvalue, + protoDef: field.GetDefaultValue(), + comment: c, + } + var pf topLevelField = &rf + + topLevelFields = append(topLevelFields, pf) + g.RecordTypeUse(field.GetTypeName()) + } + + mc := &msgCtx{ + goName: goTypeName, + message: message, + } + + g.generateMessageStruct(mc, topLevelFields) + g.P() + g.generateCommonMethods(mc) + g.P() + g.generateDefaultConstants(mc, topLevelFields) + g.P() + g.generateGetters(mc, topLevelFields) + g.P() + g.generateSetters(mc, topLevelFields) + g.P() + g.generateOneofFuncs(mc, topLevelFields) + g.P() + + var oneofTypes []string + for _, f := range topLevelFields { + if of, ok := f.(*oneofField); ok { + for _, osf := range of.subFields { + oneofTypes = append(oneofTypes, osf.oneofTypeName) + } + } + } + + opts := message.Options + ms := &messageSymbol{ + sym: goTypeName, + hasExtensions: len(message.ExtensionRange) > 0, + isMessageSet: opts != nil && opts.GetMessageSetWireFormat(), + oneofTypes: oneofTypes, + } + g.file.addExport(message, ms) + + for _, ext := range message.ext { + g.generateExtension(ext) + } + + fullName := strings.Join(message.TypeName(), ".") + if g.file.Package != nil { + fullName = *g.file.Package + "." + fullName + } + + g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName) + // Register types for native map types. + for _, k := range mapFieldKeys(mapFieldTypes) { + fullName := strings.TrimPrefix(*k.TypeName, ".") + g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName) + } + +} + +type byTypeName []*descriptor.FieldDescriptorProto + +func (a byTypeName) Len() int { return len(a) } +func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName } + +// mapFieldKeys returns the keys of m in a consistent order. +func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto { + keys := make([]*descriptor.FieldDescriptorProto, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(byTypeName(keys)) + return keys +} + +var escapeChars = [256]byte{ + 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', +} + +// unescape reverses the "C" escaping that protoc does for default values of bytes fields. +// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape +// sequences are conveyed, unmodified, into the decoded result. +func unescape(s string) string { + // NB: Sadly, we can't use strconv.Unquote because protoc will escape both + // single and double quotes, but strconv.Unquote only allows one or the + // other (based on actual surrounding quotes of its input argument). + + var out []byte + for len(s) > 0 { + // regular character, or too short to be valid escape + if s[0] != '\\' || len(s) < 2 { + out = append(out, s[0]) + s = s[1:] + } else if c := escapeChars[s[1]]; c != 0 { + // escape sequence + out = append(out, c) + s = s[2:] + } else if s[1] == 'x' || s[1] == 'X' { + // hex escape, e.g. "\x80 + if len(s) < 4 { + // too short to be valid + out = append(out, s[:2]...) + s = s[2:] + continue + } + v, err := strconv.ParseUint(s[2:4], 16, 8) + if err != nil { + out = append(out, s[:4]...) + } else { + out = append(out, byte(v)) + } + s = s[4:] + } else if '0' <= s[1] && s[1] <= '7' { + // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" + // so consume up to 2 more bytes or up to end-of-string + n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(s[1:1+n], 8, 8) + if err != nil { + out = append(out, s[:1+n]...) + } else { + out = append(out, byte(v)) + } + s = s[1+n:] + } else { + // bad escape, just propagate the slash as-is + out = append(out, s[0]) + s = s[1:] + } + } + + return string(out) +} + +func (g *Generator) generateExtension(ext *ExtensionDescriptor) { + ccTypeName := ext.DescName() + + extObj := g.ObjectNamed(*ext.Extendee) + var extDesc *Descriptor + if id, ok := extObj.(*ImportedDescriptor); ok { + // This is extending a publicly imported message. + // We need the underlying type for goTag. + extDesc = id.o.(*Descriptor) + } else { + extDesc = extObj.(*Descriptor) + } + extendedType := "*" + g.TypeName(extObj) // always use the original + field := ext.FieldDescriptorProto + fieldType, wireType := g.GoType(ext.parent, field) + tag := g.goTag(extDesc, field, wireType) + g.RecordTypeUse(*ext.Extendee) + if n := ext.FieldDescriptorProto.TypeName; n != nil { + // foreign extension type + g.RecordTypeUse(*n) + } + + typeName := ext.TypeName() + + // Special case for proto2 message sets: If this extension is extending + // proto2.bridge.MessageSet, and its final name component is "message_set_extension", + // then drop that last component. + // + // TODO: This should be implemented in the text formatter rather than the generator. + // In addition, the situation for when to apply this special case is implemented + // differently in other languages: + // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560 + if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" { + typeName = typeName[:len(typeName)-1] + } + + // For text formatting, the package must be exactly what the .proto file declares, + // ignoring overrides such as the go_package option, and with no dot/underscore mapping. + extName := strings.Join(typeName, ".") + if g.file.Package != nil { + extName = *g.file.Package + "." + extName + } + + g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") + g.P("ExtendedType: (", extendedType, ")(nil),") + g.P("ExtensionType: (", fieldType, ")(nil),") + g.P("Field: ", field.Number, ",") + g.P(`Name: "`, extName, `",`) + g.P("Tag: ", tag, ",") + g.P(`Filename: "`, g.file.GetName(), `",`) + + g.P("}") + g.P() + + g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) + + g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) +} + +func (g *Generator) generateInitFunction() { + if len(g.init) == 0 { + return + } + g.P("func init() {") + for _, l := range g.init { + g.P(l) + } + g.P("}") + g.init = nil +} + +func (g *Generator) generateFileDescriptor(file *FileDescriptor) { + // Make a copy and trim source_code_info data. + // TODO: Trim this more when we know exactly what we need. + pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) + pb.SourceCodeInfo = nil + + b, err := proto.Marshal(pb) + if err != nil { + g.Fail(err.Error()) + } + + var buf bytes.Buffer + w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) + w.Write(b) + w.Close() + b = buf.Bytes() + + v := file.VarName() + g.P() + g.P("func init() {") + g.P(g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ")") + g.P("}") + g.P("var ", v, " = []byte{") + g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.P("}") +} + +func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { + // // We always print the full (proto-world) package name here. + pkg := enum.File().GetPackage() + if pkg != "" { + pkg += "." + } + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) +} + +// And now lots of helper functions. + +// Is c an ASCII lower-case letter? +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +// Is c an ASCII digit? +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +// CamelCase returns the CamelCased name. +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +// There is a remote possibility of this rewrite causing a name collision, +// but it's so remote we're prepared to pretend it's nonexistent - since the +// C++ generator lowercases names, it's extremely unlikely to have two fields +// with different capitalizations. +// In short, _my_field_name_2 becomes XMyFieldName_2. +func CamelCase(s string) string { + if s == "" { + return "" + } + t := make([]byte, 0, 32) + i := 0 + if s[0] == '_' { + // Need a capital letter; drop the '_'. + t = append(t, 'X') + i++ + } + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + for ; i < len(s); i++ { + c := s[i] + if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { + continue // Skip the underscore in s. + } + if isASCIIDigit(c) { + t = append(t, c) + continue + } + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c ^= ' ' // Make it a capital letter. + } + t = append(t, c) // Guaranteed not lower case. + // Accept lower case sequence that follows. + for i+1 < len(s) && isASCIILower(s[i+1]) { + i++ + t = append(t, s[i]) + } + } + return string(t) +} + +// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to +// be joined with "_". +func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } + +// dottedSlice turns a sliced name into a dotted name. +func dottedSlice(elem []string) string { return strings.Join(elem, ".") } + +// Is this field optional? +func isOptional(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL +} + +// Is this field required? +func isRequired(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED +} + +// Is this field repeated? +func isRepeated(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED +} + +// Is this field a scalar numeric type? +func isScalar(field *descriptor.FieldDescriptorProto) bool { + if field.Type == nil { + return false + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_BOOL, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_SFIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED64, + descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} + +// badToUnderscore is the mapping function used to generate Go names from package names, +// which can be dotted in the input .proto file. It replaces non-identifier characters such as +// dot or dash with underscore. +func badToUnderscore(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + return r + } + return '_' +} + +// baseName returns the last path element of the name, with the last dotted suffix removed. +func baseName(name string) string { + // First, find the last element + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i+1:] + } + // Now drop the suffix + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[0:i] + } + return name +} + +// The SourceCodeInfo message describes the location of elements of a parsed +// .proto file by way of a "path", which is a sequence of integers that +// describe the route from a FileDescriptorProto to the relevant submessage. +// The path alternates between a field number of a repeated field, and an index +// into that repeated field. The constants below define the field numbers that +// are used. +// +// See descriptor.proto for more information about this. +const ( + // tag numbers in FileDescriptorProto + packagePath = 2 // package + messagePath = 4 // message_type + enumPath = 5 // enum_type + // tag numbers in DescriptorProto + messageFieldPath = 2 // field + messageMessagePath = 3 // nested_type + messageEnumPath = 4 // enum_type + messageOneofPath = 8 // oneof_decl + // tag numbers in EnumDescriptorProto + enumValuePath = 2 // value +) + +var supportTypeAliases bool + +func init() { + for _, tag := range build.Default.ReleaseTags { + if tag == "go1.9" { + supportTypeAliases = true + return + } + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go new file mode 100644 index 0000000000..39968eb9f9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go @@ -0,0 +1,117 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package remap handles tracking the locations of Go tokens in a source text +across a rewrite by the Go formatter. +*/ +package remap + +import ( + "fmt" + "go/scanner" + "go/token" +) + +// A Location represents a span of byte offsets in the source text. +type Location struct { + Pos, End int // End is exclusive +} + +// A Map represents a mapping between token locations in an input source text +// and locations in the corresponding output text. +type Map map[Location]Location + +// Find reports whether the specified span is recorded by m, and if so returns +// the new location it was mapped to. If the input span was not found, the +// returned location is the same as the input. +func (m Map) Find(pos, end int) (Location, bool) { + key := Location{ + Pos: pos, + End: end, + } + if loc, ok := m[key]; ok { + return loc, true + } + return key, false +} + +func (m Map) add(opos, oend, npos, nend int) { + m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} +} + +// Compute constructs a location mapping from input to output. An error is +// reported if any of the tokens of output cannot be mapped. +func Compute(input, output []byte) (Map, error) { + itok := tokenize(input) + otok := tokenize(output) + if len(itok) != len(otok) { + return nil, fmt.Errorf("wrong number of tokens, %d โ‰  %d", len(itok), len(otok)) + } + m := make(Map) + for i, ti := range itok { + to := otok[i] + if ti.Token != to.Token { + return nil, fmt.Errorf("token %d type mismatch: %s โ‰  %s", i+1, ti, to) + } + m.add(ti.pos, ti.end, to.pos, to.end) + } + return m, nil +} + +// tokinfo records the span and type of a source token. +type tokinfo struct { + pos, end int + token.Token +} + +func tokenize(src []byte) []tokinfo { + fs := token.NewFileSet() + var s scanner.Scanner + s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) + var info []tokinfo + for { + pos, next, lit := s.Scan() + switch next { + case token.SEMICOLON: + continue + } + info = append(info, tokinfo{ + pos: int(pos - 1), + end: int(pos + token.Pos(len(lit)) - 1), + Token: next, + }) + if next == token.EOF { + break + } + } + return info +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go new file mode 100644 index 0000000000..957c3f2a63 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go @@ -0,0 +1,545 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package grpc outputs gRPC service descriptions in Go code. +// It runs as a plugin for the Go protocol buffer compiler plugin. +// It is linked in to protoc-gen-go. +package grpc + +import ( + "fmt" + "strconv" + "strings" + + pb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/golang/protobuf/protoc-gen-go/generator" +) + +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// the grpc package is introduced; the generated code references +// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 6 + +// Paths for packages used by code generated in this file, +// relative to the import_prefix of the generator.Generator. +const ( + contextPkgPath = "context" + grpcPkgPath = "google.golang.org/grpc" + codePkgPath = "google.golang.org/grpc/codes" + statusPkgPath = "google.golang.org/grpc/status" +) + +func init() { + generator.RegisterPlugin(new(grpc)) +} + +// grpc is an implementation of the Go protocol buffer compiler's +// plugin architecture. It generates bindings for gRPC support. +type grpc struct { + gen *generator.Generator +} + +// Name returns the name of this plugin, "grpc". +func (g *grpc) Name() string { + return "grpc" +} + +// The names for packages imported in the generated code. +// They may vary from the final path component of the import path +// if the name is used by other packages. +var ( + contextPkg string + grpcPkg string +) + +// Init initializes the plugin. +func (g *grpc) Init(gen *generator.Generator) { + g.gen = gen +} + +// Given a type name defined in a .proto, return its object. +// Also record that we're using it, to guarantee the associated import. +func (g *grpc) objectNamed(name string) generator.Object { + g.gen.RecordTypeUse(name) + return g.gen.ObjectNamed(name) +} + +// Given a type name defined in a .proto, return its name as we will print it. +func (g *grpc) typeName(str string) string { + return g.gen.TypeName(g.objectNamed(str)) +} + +// P forwards to g.gen.P. +func (g *grpc) P(args ...interface{}) { g.gen.P(args...) } + +// Generate generates code for the services in the given file. +func (g *grpc) Generate(file *generator.FileDescriptor) { + if len(file.FileDescriptorProto.Service) == 0 { + return + } + + contextPkg = string(g.gen.AddImport(contextPkgPath)) + grpcPkg = string(g.gen.AddImport(grpcPkgPath)) + + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ ", contextPkg, ".Context") + g.P("var _ ", grpcPkg, ".ClientConnInterface") + g.P() + + // Assert version compatibility. + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the grpc package it is being compiled against.") + g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion) + g.P() + + for i, service := range file.FileDescriptorProto.Service { + g.generateService(file, service, i) + } +} + +// GenerateImports generates the import declaration for this file. +func (g *grpc) GenerateImports(file *generator.FileDescriptor) { +} + +// reservedClientName records whether a client name is reserved on the client side. +var reservedClientName = map[string]bool{ + // TODO: do we need any in gRPC? +} + +func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } + +// deprecationComment is the standard comment added to deprecated +// messages, fields, enums, and enum values. +var deprecationComment = "// Deprecated: Do not use." + +// generateService generates all the code for the named service. +func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) { + path := fmt.Sprintf("6,%d", index) // 6 means service. + + origServName := service.GetName() + fullServName := origServName + if pkg := file.GetPackage(); pkg != "" { + fullServName = pkg + "." + fullServName + } + servName := generator.CamelCase(origServName) + deprecated := service.GetOptions().GetDeprecated() + + g.P() + g.P(fmt.Sprintf(`// %sClient is the client API for %s service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.`, servName, servName)) + + // Client interface. + if deprecated { + g.P("//") + g.P(deprecationComment) + } + g.P("type ", servName, "Client interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + if method.GetOptions().GetDeprecated() { + g.P("//") + g.P(deprecationComment) + } + g.P(g.generateClientSignature(servName, method)) + } + g.P("}") + g.P() + + // Client structure. + g.P("type ", unexport(servName), "Client struct {") + g.P("cc ", grpcPkg, ".ClientConnInterface") + g.P("}") + g.P() + + // NewClient factory. + if deprecated { + g.P(deprecationComment) + } + g.P("func New", servName, "Client (cc ", grpcPkg, ".ClientConnInterface) ", servName, "Client {") + g.P("return &", unexport(servName), "Client{cc}") + g.P("}") + g.P() + + var methodIndex, streamIndex int + serviceDescVar := "_" + servName + "_serviceDesc" + // Client method implementations. + for _, method := range service.Method { + var descExpr string + if !method.GetServerStreaming() && !method.GetClientStreaming() { + // Unary RPC method + descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex) + methodIndex++ + } else { + // Streaming RPC method + descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex) + streamIndex++ + } + g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr) + } + + // Server interface. + serverType := servName + "Server" + g.P("// ", serverType, " is the server API for ", servName, " service.") + if deprecated { + g.P("//") + g.P(deprecationComment) + } + g.P("type ", serverType, " interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + if method.GetOptions().GetDeprecated() { + g.P("//") + g.P(deprecationComment) + } + g.P(g.generateServerSignature(servName, method)) + } + g.P("}") + g.P() + + // Server Unimplemented struct for forward compatibility. + if deprecated { + g.P(deprecationComment) + } + g.generateUnimplementedServer(servName, service) + + // Server registration. + if deprecated { + g.P(deprecationComment) + } + g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {") + g.P("s.RegisterService(&", serviceDescVar, `, srv)`) + g.P("}") + g.P() + + // Server handler implementations. + var handlerNames []string + for _, method := range service.Method { + hname := g.generateServerMethod(servName, fullServName, method) + handlerNames = append(handlerNames, hname) + } + + // Service descriptor. + g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {") + g.P("ServiceName: ", strconv.Quote(fullServName), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPkg, ".MethodDesc{") + for i, method := range service.Method { + if method.GetServerStreaming() || method.GetClientStreaming() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPkg, ".StreamDesc{") + for i, method := range service.Method { + if !method.GetServerStreaming() && !method.GetClientStreaming() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.GetServerStreaming() { + g.P("ServerStreams: true,") + } + if method.GetClientStreaming() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("Metadata: \"", file.GetName(), "\",") + g.P("}") + g.P() +} + +// generateUnimplementedServer creates the unimplemented server struct +func (g *grpc) generateUnimplementedServer(servName string, service *pb.ServiceDescriptorProto) { + serverType := servName + "Server" + g.P("// Unimplemented", serverType, " can be embedded to have forward compatible implementations.") + g.P("type Unimplemented", serverType, " struct {") + g.P("}") + g.P() + // UnimplementedServer's concrete methods + for _, method := range service.Method { + g.generateServerMethodConcrete(servName, method) + } + g.P() +} + +// generateServerMethodConcrete returns unimplemented methods which ensure forward compatibility +func (g *grpc) generateServerMethodConcrete(servName string, method *pb.MethodDescriptorProto) { + header := g.generateServerSignatureWithParamNames(servName, method) + g.P("func (*Unimplemented", servName, "Server) ", header, " {") + var nilArg string + if !method.GetServerStreaming() && !method.GetClientStreaming() { + nilArg = "nil, " + } + methName := generator.CamelCase(method.GetName()) + statusPkg := string(g.gen.AddImport(statusPkgPath)) + codePkg := string(g.gen.AddImport(codePkgPath)) + g.P("return ", nilArg, statusPkg, `.Errorf(`, codePkg, `.Unimplemented, "method `, methName, ` not implemented")`) + g.P("}") +} + +// generateClientSignature returns the client-side signature for a method. +func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + reqArg := ", in *" + g.typeName(method.GetInputType()) + if method.GetClientStreaming() { + reqArg = "" + } + respName := "*" + g.typeName(method.GetOutputType()) + if method.GetServerStreaming() || method.GetClientStreaming() { + respName = servName + "_" + generator.CamelCase(origMethName) + "Client" + } + return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName) +} + +func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) { + sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName()) + methName := generator.CamelCase(method.GetName()) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + if method.GetOptions().GetDeprecated() { + g.P(deprecationComment) + } + g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{") + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("out := new(", outType, ")") + // TODO: Pass descExpr to Invoke. + g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`) + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return + } + streamType := unexport(servName) + methName + "Client" + g.P("stream, err := c.cc.NewStream(ctx, ", descExpr, `, "`, sname, `", opts...)`) + g.P("if err != nil { return nil, err }") + g.P("x := &", streamType, "{stream}") + if !method.GetClientStreaming() { + g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + } + g.P("return x, nil") + g.P("}") + g.P() + + genSend := method.GetClientStreaming() + genRecv := method.GetServerStreaming() + genCloseAndRecv := !method.GetServerStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Client interface {") + if genSend { + g.P("Send(*", inType, ") error") + } + if genRecv { + g.P("Recv() (*", outType, ", error)") + } + if genCloseAndRecv { + g.P("CloseAndRecv() (*", outType, ", error)") + } + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", inType, ") error {") + g.P("return x.ClientStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + if genCloseAndRecv { + g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } +} + +// generateServerSignatureWithParamNames returns the server-side signature for a method with parameter names. +func (g *grpc) generateServerSignatureWithParamNames(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + + var reqArgs []string + ret := "error" + if !method.GetServerStreaming() && !method.GetClientStreaming() { + reqArgs = append(reqArgs, "ctx "+contextPkg+".Context") + ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" + } + if !method.GetClientStreaming() { + reqArgs = append(reqArgs, "req *"+g.typeName(method.GetInputType())) + } + if method.GetServerStreaming() || method.GetClientStreaming() { + reqArgs = append(reqArgs, "srv "+servName+"_"+generator.CamelCase(origMethName)+"Server") + } + + return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +// generateServerSignature returns the server-side signature for a method. +func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + + var reqArgs []string + ret := "error" + if !method.GetServerStreaming() && !method.GetClientStreaming() { + reqArgs = append(reqArgs, contextPkg+".Context") + ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" + } + if !method.GetClientStreaming() { + reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType())) + } + if method.GetServerStreaming() || method.GetClientStreaming() { + reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server") + } + + return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string { + methName := generator.CamelCase(method.GetName()) + hname := fmt.Sprintf("_%s_%s_Handler", servName, methName) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {") + g.P("in := new(", inType, ")") + g.P("if err := dec(in); err != nil { return nil, err }") + g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }") + g.P("info := &", grpcPkg, ".UnaryServerInfo{") + g.P("Server: srv,") + g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",") + g.P("}") + g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {") + g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))") + g.P("}") + g.P("return interceptor(ctx, in, info, handler)") + g.P("}") + g.P() + return hname + } + streamType := unexport(servName) + methName + "Server" + g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {") + if !method.GetClientStreaming() { + g.P("m := new(", inType, ")") + g.P("if err := stream.RecvMsg(m); err != nil { return err }") + g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})") + } else { + g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})") + } + g.P("}") + g.P() + + genSend := method.GetServerStreaming() + genSendAndClose := !method.GetServerStreaming() + genRecv := method.GetClientStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Server interface {") + if genSend { + g.P("Send(*", outType, ") error") + } + if genSendAndClose { + g.P("SendAndClose(*", outType, ") error") + } + if genRecv { + g.P("Recv() (*", inType, ", error)") + } + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genSendAndClose { + g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {") + g.P("m := new(", inType, ")") + g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + + return hname +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go new file mode 100644 index 0000000000..532a550050 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go @@ -0,0 +1,34 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import _ "github.com/golang/protobuf/protoc-gen-go/grpc" diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go new file mode 100644 index 0000000000..8e2486de0b --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go @@ -0,0 +1,98 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate +// Go code. Run it by building this program and putting it in your path with +// the name +// protoc-gen-go +// That word 'go' at the end becomes part of the option string set for the +// protocol compiler, so once the protocol compiler (protoc) is installed +// you can run +// protoc --go_out=output_directory input_directory/file.proto +// to generate Go bindings for the protocol defined by file.proto. +// With that input, the output will be written to +// output_directory/file.pb.go +// +// The generated code is documented in the package comment for +// the library. +// +// See the README and documentation for protocol buffers to learn more: +// https://developers.google.com/protocol-buffers/ +package main + +import ( + "io/ioutil" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/protoc-gen-go/generator" +) + +func main() { + // Begin by allocating a generator. The request and response structures are stored there + // so we can do error handling easily - the response structure contains the field to + // report failure. + g := generator.New() + + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + g.Error(err, "reading input") + } + + if err := proto.Unmarshal(data, g.Request); err != nil { + g.Error(err, "parsing input proto") + } + + if len(g.Request.FileToGenerate) == 0 { + g.Fail("no files to generate") + } + + g.CommandLineParameters(g.Request.GetParameter()) + + // Create a wrapped version of the Descriptors and EnumDescriptors that + // point to the file that defines them. + g.WrapTypes() + + g.SetPackageNames() + g.BuildTypeNameMap() + + g.GenerateAllFiles() + + // Send back the results. + data, err = proto.Marshal(g.Response) + if err != nil { + g.Error(err, "failed to marshal output proto") + } + _, err = os.Stdout.Write(data) + if err != nil { + g.Error(err, "failed to write output proto") + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go new file mode 100644 index 0000000000..61bfc10e02 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go @@ -0,0 +1,369 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +/* +Package plugin_go is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/compiler/plugin.proto + +It has these top-level messages: + Version + CodeGeneratorRequest + CodeGeneratorResponse +*/ +package plugin_go + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of protocol compiler. +type Version struct { + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Version) Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetMajor() int32 { + if m != nil && m.Major != nil { + return *m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil && m.Minor != nil { + return *m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil && m.Patch != nil { + return *m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil && m.Suffix != nil { + return *m.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } +func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorRequest) ProtoMessage() {} +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *CodeGeneratorRequest) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) +} +func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src) +} +func (m *CodeGeneratorRequest) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorRequest.Size(m) +} +func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo + +func (m *CodeGeneratorRequest) GetFileToGenerate() []string { + if m != nil { + return m.FileToGenerate + } + return nil +} + +func (m *CodeGeneratorRequest) GetParameter() string { + if m != nil && m.Parameter != nil { + return *m.Parameter + } + return "" +} + +func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { + if m != nil { + return m.ProtoFile + } + return nil +} + +func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } +func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse) ProtoMessage() {} +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *CodeGeneratorResponse) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src) +} +func (m *CodeGeneratorResponse) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse.Size(m) +} +func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo + +func (m *CodeGeneratorResponse) GetError() string { + if m != nil && m.Error != nil { + return *m.Error + } + return "" +} + +func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if m != nil { + return m.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } +func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } +func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src) +} +func (m *CodeGeneratorResponse_File) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) +} +func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo + +func (m *CodeGeneratorResponse_File) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { + if m != nil && m.InsertionPoint != nil { + return *m.InsertionPoint + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetContent() string { + if m != nil && m.Content != nil { + return *m.Content + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") + proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") + proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") + proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") +} + +func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41, + 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2, + 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30, + 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa, + 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91, + 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63, + 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb, + 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55, + 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8, + 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1, + 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f, + 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d, + 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2, + 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a, + 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2, + 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d, + 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda, + 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed, + 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34, + 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79, + 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45, + 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4, + 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e, + 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92, + 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d, + 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto new file mode 100644 index 0000000000..5b5574529e --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto @@ -0,0 +1,167 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; + +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + } + repeated File file = 15; +} diff --git a/vendor/github.com/googleapis/gax-go/README.md b/vendor/github.com/googleapis/gax-go/README.md index 3cedd5be96..c3bb2e1877 100644 --- a/vendor/github.com/googleapis/gax-go/README.md +++ b/vendor/github.com/googleapis/gax-go/README.md @@ -1,19 +1,22 @@ Google API Extensions for Go ============================ -[![Build Status](https://travis-ci.org/googleapis/gax-go.svg?branch=master)](https://travis-ci.org/googleapis/gax-go) -[![Code Coverage](https://img.shields.io/codecov/c/github/googleapis/gax-go.svg)](https://codecov.io/github/googleapis/gax-go) +[![GoDoc](https://godoc.org/github.com/googleapis/gax-go?status.svg)](https://godoc.org/github.com/googleapis/gax-go) Google API Extensions for Go (gax-go) is a set of modules which aids the development of APIs for clients and servers based on `gRPC` and Google API conventions. -Application code will rarely need to use this library directly, +To install the API extensions, use: + +``` +go get -u github.com/googleapis/gax-go +``` + +**Note:** Application code will rarely need to use this library directly, but the code generated automatically from API definition files can use it to simplify code generation and to provide more convenient and idiomatic API surface. -**This project is currently experimental and not supported.** - Go Versions =========== This library requires Go 1.6 or above. diff --git a/vendor/github.com/googleapis/gax-go/go.mod b/vendor/github.com/googleapis/gax-go/go.mod new file mode 100644 index 0000000000..acfd6c9c61 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/go.mod @@ -0,0 +1,11 @@ +module github.com/googleapis/gax-go + +require ( + github.com/golang/protobuf v1.3.1 + github.com/googleapis/gax-go/v2 v2.0.2 + golang.org/x/exp v0.0.0-20190221220918-438050ddec5e + golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3 + golang.org/x/tools v0.0.0-20190114222345-bf090417da8b + google.golang.org/grpc v1.19.0 + honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099 +) diff --git a/vendor/github.com/googleapis/gax-go/header.go b/vendor/github.com/googleapis/gax-go/header.go deleted file mode 100644 index d81455eccd..0000000000 --- a/vendor/github.com/googleapis/gax-go/header.go +++ /dev/null @@ -1,24 +0,0 @@ -package gax - -import "bytes" - -// XGoogHeader is for use by the Google Cloud Libraries only. -// -// XGoogHeader formats key-value pairs. -// The resulting string is suitable for x-goog-api-client header. -func XGoogHeader(keyval ...string) string { - if len(keyval) == 0 { - return "" - } - if len(keyval)%2 != 0 { - panic("gax.Header: odd argument count") - } - var buf bytes.Buffer - for i := 0; i < len(keyval); i += 2 { - buf.WriteByte(' ') - buf.WriteString(keyval[i]) - buf.WriteByte('/') - buf.WriteString(keyval[i+1]) - } - return buf.String()[1:] -} diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go similarity index 90% rename from vendor/github.com/googleapis/gax-go/call_option.go rename to vendor/github.com/googleapis/gax-go/v2/call_option.go index 7b621643e9..b1d53dd19c 100644 --- a/vendor/github.com/googleapis/gax-go/call_option.go +++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -113,6 +113,7 @@ type Backoff struct { cur time.Duration } +// Pause returns the next time.Duration that the caller should use to backoff. func (bo *Backoff) Pause() time.Duration { if bo.Initial == 0 { bo.Initial = time.Second @@ -126,10 +127,11 @@ func (bo *Backoff) Pause() time.Duration { if bo.Multiplier < 1 { bo.Multiplier = 2 } - // Select a duration between zero and the current max. It might seem counterintuitive to - // have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html - // argues that that is the best strategy. - d := time.Duration(rand.Int63n(int64(bo.cur))) + // Select a duration between 1ns and the current max. It might seem + // counterintuitive to have so much jitter, but + // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that + // that is the best strategy. + d := time.Duration(1 + rand.Int63n(int64(bo.cur))) bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) if bo.cur > bo.Max { bo.cur = bo.Max @@ -143,10 +145,12 @@ func (o grpcOpt) Resolve(s *CallSettings) { s.GRPC = o } +// WithGRPCOptions allows passing gRPC call options during client creation. func WithGRPCOptions(opt ...grpc.CallOption) CallOption { return grpcOpt(append([]grpc.CallOption(nil), opt...)) } +// CallSettings allow fine-grained control over how calls are made. type CallSettings struct { // Retry returns a Retryer to be used to control retry logic of a method call. // If Retry is nil or the returned Retryer is nil, the call will not be retried. diff --git a/vendor/github.com/googleapis/gax-go/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go similarity index 95% rename from vendor/github.com/googleapis/gax-go/gax.go rename to vendor/github.com/googleapis/gax-go/v2/gax.go index 5ebedff0d0..3fd1b0b84b 100644 --- a/vendor/github.com/googleapis/gax-go/gax.go +++ b/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -33,8 +33,7 @@ // Application code will rarely need to use this library directly. // However, code generated automatically from API definition files can use it // to simplify code generation and to provide more convenient and idiomatic API surfaces. -// -// This project is currently experimental and not supported. package gax -const Version = "0.1.0" +// Version specifies the gax-go version being used. +const Version = "2.0.4" diff --git a/vendor/github.com/googleapis/gax-go/v2/go.mod b/vendor/github.com/googleapis/gax-go/v2/go.mod new file mode 100644 index 0000000000..9cdfaf4475 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/googleapis/gax-go/v2 + +require google.golang.org/grpc v1.19.0 diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go new file mode 100644 index 0000000000..139371a0bf --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -0,0 +1,53 @@ +// Copyright 2018, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import "bytes" + +// XGoogHeader is for use by the Google Cloud Libraries only. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] +} diff --git a/vendor/github.com/googleapis/gax-go/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go similarity index 83% rename from vendor/github.com/googleapis/gax-go/invoke.go rename to vendor/github.com/googleapis/gax-go/v2/invoke.go index 86049d826f..fe31dd004e 100644 --- a/vendor/github.com/googleapis/gax-go/invoke.go +++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -30,12 +30,12 @@ package gax import ( + "context" + "strings" "time" - - "golang.org/x/net/context" ) -// A user defined call stub. +// APICall is a user defined call stub. type APICall func(context.Context, CallSettings) error // Invoke calls the given APICall, @@ -74,6 +74,15 @@ func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper if settings.Retry == nil { return err } + // Never retry permanent certificate errors. (e.x. if ca-certificates + // are not installed). We should only make very few, targeted + // exceptions: many (other) status=Unavailable should be retried, such + // as if there's a network hiccup, or the internet goes out for a + // minute. This is also why here we are doing string parsing instead of + // simply making Unavailable a non-retried code elsewhere. + if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { + return err + } if retryer == nil { if r := settings.Retry(); r != nil { retryer = r diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md index 6bafca5070..1d7e837116 100644 --- a/vendor/go.opencensus.io/README.md +++ b/vendor/go.opencensus.io/README.md @@ -7,7 +7,9 @@ OpenCensus Go is a Go implementation of OpenCensus, a toolkit for collecting application performance and behavior monitoring data. -Currently it consists of three major components: tags, stats, and tracing. +Currently it consists of three major components: tags, stats and tracing. + +#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289). ## Installation @@ -22,17 +24,42 @@ The use of vendoring or a dependency management tool is recommended. OpenCensus Go libraries require Go 1.8 or later. +## Getting Started + +The easiest way to get started using OpenCensus in your application is to use an existing +integration with your RPC framework: + +* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) +* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) +* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql) +* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) +* [Groupcache](https://godoc.org/github.com/orijtech/groupcache) +* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) +* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver) +* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo) +* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) +* [Memcache](https://godoc.org/github.com/orijtech/gomemcache) + +If you're using a framework not listed here, you could either implement your own middleware for your +framework or use [custom stats](#stats) and [spans](#spans) directly in your application. + ## Exporters -OpenCensus can export instrumentation data to various backends. -Currently, OpenCensus supports: +OpenCensus can export instrumentation data to various backends. +OpenCensus has exporter implementations for the following, users +can implement their own exporters by implementing the exporter interfaces +([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), +[trace](https://godoc.org/go.opencensus.io/trace#Exporter)): * [Prometheus][exporter-prom] for stats * [OpenZipkin][exporter-zipkin] for traces -* Stackdriver [Monitoring][exporter-stackdriver] and [Trace][exporter-stackdriver] +* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces * [Jaeger][exporter-jaeger] for traces * [AWS X-Ray][exporter-xray] for traces - +* [Datadog][exporter-datadog] for stats and traces +* [Graphite][exporter-graphite] for stats +* [Honeycomb][exporter-honeycomb] for traces +* [New Relic][exporter-newrelic] for stats and traces ## Overview @@ -43,13 +70,6 @@ multiple services until there is a response. OpenCensus allows you to instrument your services and collect diagnostics data all through your services end-to-end. -Start with instrumenting HTTP and gRPC clients and servers, -then add additional custom instrumentation if needed. - -* [HTTP guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/http) -* [gRPC guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/grpc) - - ## Tags Tags represent propagated key-value pairs. They are propagated using `context.Context` @@ -57,11 +77,11 @@ in the same process or can be encoded to be transmitted on the wire. Usually, th be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` for gRPC. -Package tag allows adding or modifying tags in the current context. +Package `tag` allows adding or modifying tags in the current context. [embedmd]:# (internal/readme/tags.go new) ```go -ctx, err = tag.New(ctx, +ctx, err := tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), tag.Upsert(userIDKey, "cde36753ed"), ) @@ -106,7 +126,7 @@ Currently three types of aggregations are supported: [embedmd]:# (internal/readme/stats.go aggs) ```go -distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32) +distAgg := view.Distribution(1<<32, 2<<32, 3<<32) countAgg := view.Count() sumAgg := view.Sum() ``` @@ -116,26 +136,79 @@ Here we create a view with the DistributionAggregation over our measure. [embedmd]:# (internal/readme/stats.go view) ```go if err := view.Register(&view.View{ - Name: "my.org/video_size_distribution", + Name: "example.com/video_size_distribution", Description: "distribution of processed video size over time", Measure: videoSize, - Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32), + Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), }); err != nil { - log.Fatalf("Failed to subscribe to view: %v", err) + log.Fatalf("Failed to register view: %v", err) } ``` -Subscribe begins collecting data for the view. Subscribed views' data will be +Register begins collecting data for the view. Registered views' data will be exported via the registered exporters. ## Traces +A distributed trace tracks the progression of a single user request as +it is handled by the services and processes that make up an application. +Each step is called a span in the trace. Spans include metadata about the step, +including especially the time spent in the step, called the spanโ€™s latency. + +Below you see a trace and several spans underneath it. + +![Traces and spans](https://i.imgur.com/7hZwRVj.png) + +### Spans + +Span is the unit step in a trace. Each span has a name, latency, status and +additional metadata. + +Below we are starting a span for a cache read and ending it +when we are done: + [embedmd]:# (internal/readme/trace.go startend) ```go -ctx, span := trace.StartSpan(ctx, "your choice of name") +ctx, span := trace.StartSpan(ctx, "cache.Get") defer span.End() + +// Do work to get from cache. ``` +### Propagation + +Spans can have parents or can be root spans if they don't have any parents. +The current span is propagated in-process and across the network to allow associating +new child spans with the parent. + +In the same process, `context.Context` is used to propagate spans. +`trace.StartSpan` creates a new span as a root if the current context +doesn't contain a span. Or, it creates a child of the span that is +already in current context. The returned context can be used to keep +propagating the newly created span in the current context. + +[embedmd]:# (internal/readme/trace.go startend) +```go +ctx, span := trace.StartSpan(ctx, "cache.Get") +defer span.End() + +// Do work to get from cache. +``` + +Across the network, OpenCensus provides different propagation +methods for different protocols. + +* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). +* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) + by default but can be configured to use a custom propagation method by setting another + [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). + +## Execution Tracer + +With Go 1.11, OpenCensus Go will support integration with the Go execution tracer. +See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68) +for an example of their mutual use. + ## Profiles OpenCensus tags can be applied as profiler labels @@ -167,7 +240,7 @@ Before version 1.0.0, the following deprecation policy will be observed: No backwards-incompatible changes will be made except for the removal of symbols that have been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release -removing the *Deprecated* functionality will be made no sooner than 28 days after the first +removing the *Deprecated* functionality will be made no sooner than 28 days after the first release in which the functionality was marked *Deprecated*. [travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master @@ -183,8 +256,12 @@ release in which the functionality was marked *Deprecated*. [new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap [new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace -[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus +[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus [exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver -[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin -[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger -[exporter-xray]: https://github.com/census-instrumentation/opencensus-go-exporter-aws +[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin +[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger +[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws +[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog +[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite +[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter +[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod new file mode 100644 index 0000000000..c867df5f5c --- /dev/null +++ b/vendor/go.opencensus.io/go.mod @@ -0,0 +1,15 @@ +module go.opencensus.io + +require ( + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 + github.com/golang/protobuf v1.3.1 + github.com/google/go-cmp v0.3.0 + github.com/stretchr/testify v1.4.0 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 + golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect + golang.org/x/text v0.3.2 // indirect + google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect + google.golang.org/grpc v1.20.1 +) + +go 1.13 diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go index 97abf756c2..81dc7183ec 100644 --- a/vendor/go.opencensus.io/internal/internal.go +++ b/vendor/go.opencensus.io/internal/internal.go @@ -14,11 +14,16 @@ package internal // import "go.opencensus.io/internal" -import "time" +import ( + "fmt" + "time" + + opencensus "go.opencensus.io" +) // UserAgent is the user agent to be added to the outgoing // requests from the exporters. -const UserAgent = "opencensus-go [0.11.0]" +var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) // MonotonicEndTime returns the end time at present // but offset from start, monotonically. @@ -28,5 +33,5 @@ const UserAgent = "opencensus-go [0.11.0]" // end as a monotonic time. // See https://golang.org/pkg/time/#hdr-Monotonic_Clocks func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Now().Sub(start)) + return start.Add(time.Since(start)) } diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go index 3b1af8b4b8..41b2c3fc03 100644 --- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go +++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go @@ -17,6 +17,7 @@ // used interally by the stats collector. package tagencoding // import "go.opencensus.io/internal/tagencoding" +// Values represent the encoded buffer for the values. type Values struct { Buffer []byte WriteIndex int @@ -31,6 +32,7 @@ func (vb *Values) growIfRequired(expected int) { } } +// WriteValue is the helper method to encode Values from map[Key][]byte. func (vb *Values) WriteValue(v []byte) { length := len(v) & 0xff vb.growIfRequired(1 + length) @@ -49,7 +51,7 @@ func (vb *Values) WriteValue(v []byte) { vb.WriteIndex += length } -// ReadValue is the helper method to read the values when decoding valuesBytes to a map[Key][]byte. +// ReadValue is the helper method to decode Values to a map[Key][]byte. func (vb *Values) ReadValue() []byte { // read length of v length := int(vb.Buffer[vb.ReadIndex]) @@ -67,6 +69,7 @@ func (vb *Values) ReadValue() []byte { return v } +// Bytes returns a reference to already written bytes in the Buffer. func (vb *Values) Bytes() []byte { return vb.Buffer[:vb.WriteIndex] } diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go index 553ca68dc4..073af7b473 100644 --- a/vendor/go.opencensus.io/internal/traceinternals.go +++ b/vendor/go.opencensus.io/internal/traceinternals.go @@ -22,6 +22,7 @@ import ( // TODO(#412): remove this var Trace interface{} +// LocalSpanStoreEnabled true if the local span store is enabled. var LocalSpanStoreEnabled bool // BucketConfiguration stores the number of samples to store for span buckets diff --git a/vendor/go.opencensus.io/stats/internal/validation.go b/vendor/go.opencensus.io/metric/metricdata/doc.go similarity index 72% rename from vendor/go.opencensus.io/stats/internal/validation.go rename to vendor/go.opencensus.io/metric/metricdata/doc.go index b946667f96..52a7b3bf85 100644 --- a/vendor/go.opencensus.io/stats/internal/validation.go +++ b/vendor/go.opencensus.io/metric/metricdata/doc.go @@ -12,17 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -package internal // import "go.opencensus.io/stats/internal" - -const ( - MaxNameLength = 255 -) - -func IsPrintable(str string) bool { - for _, r := range str { - if !(r >= ' ' && r <= '~') { - return false - } - } - return true -} +// Package metricdata contains the metrics data model. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go new file mode 100644 index 0000000000..12695ce2dc --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/exemplar.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Exemplars keys. +const ( + AttachmentKeySpanContext = "SpanContext" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +// +// Their purpose is to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go new file mode 100644 index 0000000000..aadae41e6a --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/label.go @@ -0,0 +1,35 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// LabelKey represents key of a label. It has optional +// description attribute. +type LabelKey struct { + Key string + Description string +} + +// LabelValue represents the value of a label. +// The zero value represents a missing label value, which may be treated +// differently to an empty string value by some back ends. +type LabelValue struct { + Value string // string value of the label + Present bool // flag that indicated whether a value is present or not +} + +// NewLabelValue creates a new non-nil LabelValue that represents the given string. +func NewLabelValue(val string) LabelValue { + return LabelValue{Value: val, Present: true} +} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go new file mode 100644 index 0000000000..8293712c77 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/metric.go @@ -0,0 +1,46 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" + + "go.opencensus.io/resource" +) + +// Descriptor holds metadata about a metric. +type Descriptor struct { + Name string // full name of the metric + Description string // human-readable description + Unit Unit // units for the measure + Type Type // type of measure + LabelKeys []LabelKey // label keys +} + +// Metric represents a quantity measured against a resource with different +// label value combinations. +type Metric struct { + Descriptor Descriptor // metric descriptor + Resource *resource.Resource // resource against which this was measured + TimeSeries []*TimeSeries // one time series for each combination of label values +} + +// TimeSeries is a sequence of points associated with a combination of label +// values. +type TimeSeries struct { + LabelValues []LabelValue // label values, same order as keys in the metric descriptor + Points []Point // points sequence + StartTime time.Time // time we started recording this time series +} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go new file mode 100644 index 0000000000..7fe057b19c --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/point.go @@ -0,0 +1,193 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Point is a single data point of a time series. +type Point struct { + // Time is the point in time that this point represents in a time series. + Time time.Time + // Value is the value of this point. Prefer using ReadValue to switching on + // the value type, since new value types might be added. + Value interface{} +} + +//go:generate stringer -type ValueType + +// NewFloat64Point creates a new Point holding a float64 value. +func NewFloat64Point(t time.Time, val float64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewInt64Point creates a new Point holding an int64 value. +func NewInt64Point(t time.Time, val int64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewDistributionPoint creates a new Point holding a Distribution value. +func NewDistributionPoint(t time.Time, val *Distribution) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewSummaryPoint creates a new Point holding a Summary value. +func NewSummaryPoint(t time.Time, val *Summary) Point { + return Point{ + Value: val, + Time: t, + } +} + +// ValueVisitor allows reading the value of a point. +type ValueVisitor interface { + VisitFloat64Value(float64) + VisitInt64Value(int64) + VisitDistributionValue(*Distribution) + VisitSummaryValue(*Summary) +} + +// ReadValue accepts a ValueVisitor and calls the appropriate method with the +// value of this point. +// Consumers of Point should use this in preference to switching on the type +// of the value directly, since new value types may be added. +func (p Point) ReadValue(vv ValueVisitor) { + switch v := p.Value.(type) { + case int64: + vv.VisitInt64Value(v) + case float64: + vv.VisitFloat64Value(v) + case *Distribution: + vv.VisitDistributionValue(v) + case *Summary: + vv.VisitSummaryValue(v) + default: + panic("unexpected value type") + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type Distribution struct { + // Count is the number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 + // Sum is the sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 + // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 + // BucketOptions describes the bounds of the histogram buckets in this + // distribution. + // + // A Distribution may optionally contain a histogram of the values in the + // population. + // + // If nil, there is no associated histogram. + BucketOptions *BucketOptions + // Bucket If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []Bucket +} + +// BucketOptions describes the bounds of the histogram buckets in this +// distribution. +type BucketOptions struct { + // Bounds specifies a set of bucket upper bounds. + // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket + // index i are: + // + // [0, Bounds[i]) for i == 0 + // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 + // [Bounds[i-1], +infinity) for i == N-1 + Bounds []float64 +} + +// Bucket represents a single bucket (value range) in a distribution. +type Bucket struct { + // Count is the number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 + // Exemplar associated with this bucket (if any). + Exemplar *Exemplar +} + +// Summary is a representation of percentiles. +type Summary struct { + // Count is the cumulative count (if available). + Count int64 + // Sum is the cumulative sum of values (if available). + Sum float64 + // HasCountAndSum is true if Count and Sum are available. + HasCountAndSum bool + // Snapshot represents percentiles calculated over an arbitrary time window. + // The values in this struct can be reset at arbitrary unknown times, with + // the requirement that all of them are reset at the same time. + Snapshot Snapshot +} + +// Snapshot represents percentiles over an arbitrary time. +// The values in this struct can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type Snapshot struct { + // Count is the number of values in the snapshot. Optional since some systems don't + // expose this. Set to 0 if not available. + Count int64 + // Sum is the sum of values in the snapshot. Optional since some systems don't + // expose this. If count is 0 then this field must be zero. + Sum float64 + // Percentiles is a map from percentile (range (0-100.0]) to the value of + // the percentile. + Percentiles map[float64]float64 +} + +//go:generate stringer -type Type + +// Type is the overall type of metric, including its value type and whether it +// represents a cumulative total (since the start time) or if it represents a +// gauge value. +type Type int + +// Metric types. +const ( + TypeGaugeInt64 Type = iota + TypeGaugeFloat64 + TypeGaugeDistribution + TypeCumulativeInt64 + TypeCumulativeFloat64 + TypeCumulativeDistribution + TypeSummary +) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go new file mode 100644 index 0000000000..c3f8ec27b5 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/type_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type Type"; DO NOT EDIT. + +package metricdata + +import "strconv" + +const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" + +var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go new file mode 100644 index 0000000000..b483a1371b --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/unit.go @@ -0,0 +1,27 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// Unit is a string encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +type Unit string + +// Predefined units. To record against a unit not represented here, create your +// own Unit type constant from a string. +const ( + UnitDimensionless Unit = "1" + UnitBytes Unit = "By" + UnitMilliseconds Unit = "ms" +) diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go new file mode 100644 index 0000000000..ca1f390493 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/manager.go @@ -0,0 +1,78 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "sync" +) + +// Manager maintains a list of active producers. Producers can register +// with the manager to allow readers to read all metrics provided by them. +// Readers can retrieve all producers registered with the manager, +// read metrics from the producers and export them. +type Manager struct { + mu sync.RWMutex + producers map[Producer]struct{} +} + +var prodMgr *Manager +var once sync.Once + +// GlobalManager is a single instance of producer manager +// that is used by all producers and all readers. +func GlobalManager() *Manager { + once.Do(func() { + prodMgr = &Manager{} + prodMgr.producers = make(map[Producer]struct{}) + }) + return prodMgr +} + +// AddProducer adds the producer to the Manager if it is not already present. +func (pm *Manager) AddProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + pm.producers[producer] = struct{}{} +} + +// DeleteProducer deletes the producer from the Manager if it is present. +func (pm *Manager) DeleteProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + delete(pm.producers, producer) +} + +// GetAll returns a slice of all producer currently registered with +// the Manager. For each call it generates a new slice. The slice +// should not be cached as registration may change at any time. It is +// typically called periodically by exporter to read metrics from +// the producers. +func (pm *Manager) GetAll() []Producer { + pm.mu.Lock() + defer pm.mu.Unlock() + producers := make([]Producer, len(pm.producers)) + i := 0 + for producer := range pm.producers { + producers[i] = producer + i++ + } + return producers +} diff --git a/vendor/google.golang.org/api/transport/grpc/not_go18.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go similarity index 53% rename from vendor/google.golang.org/api/transport/grpc/not_go18.go rename to vendor/go.opencensus.io/metric/metricproducer/producer.go index f509d8636c..6cee9ed178 100644 --- a/vendor/google.golang.org/api/transport/grpc/not_go18.go +++ b/vendor/go.opencensus.io/metric/metricproducer/producer.go @@ -1,10 +1,10 @@ -// Copyright 2018 Google Inc. All Rights Reserved. +// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,10 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !go1.8 +package metricproducer -package grpc +import ( + "go.opencensus.io/metric/metricdata" +) -import "google.golang.org/grpc" - -func addOCStatsHandler(opts []grpc.DialOption) []grpc.DialOption { return opts } +// Producer is a source of metrics. +type Producer interface { + // Read should return the current values of all metrics supported by this + // metric provider. + // The returned metrics should be unique for each combination of name and + // resource. + Read() []*metricdata.Metric +} diff --git a/vendor/google.golang.org/api/transport/http/not_go18.go b/vendor/go.opencensus.io/opencensus.go similarity index 62% rename from vendor/google.golang.org/api/transport/http/not_go18.go rename to vendor/go.opencensus.io/opencensus.go index 628a21a84d..e5e4b4368c 100644 --- a/vendor/google.golang.org/api/transport/http/not_go18.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -1,10 +1,10 @@ -// Copyright 2018 Google Inc. All Rights Reserved. +// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !go1.8 +// Package opencensus contains Go support for OpenCensus. +package opencensus // import "go.opencensus.io" -package http - -import "net/http" - -func addOCTransport(trans http.RoundTripper) http.RoundTripper { return trans } +// Version is the current release version of OpenCensus in use. +func Version() string { + return "0.23.0" +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go index 37d238f149..28fddb8440 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client.go @@ -15,8 +15,8 @@ package ocgrpc import ( + "context" "go.opencensus.io/trace" - "golang.org/x/net/context" "google.golang.org/grpc/stats" ) @@ -31,6 +31,7 @@ type ClientHandler struct { StartOptions trace.StartOptions } +// HandleConn exists to satisfy gRPC stats.Handler. func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { // no-op } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go index b8efacfb3f..abe978b67b 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -31,9 +31,9 @@ var ( ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) ) -// Predefined views may be subscribed to collect data for the above measures. +// Predefined views may be registered to collect data for the above measures. // As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are subscribed by +// package. These are declared as a convenience only; none are registered by // default. var ( ClientSentBytesPerRPCView = &view.View{ @@ -91,15 +91,6 @@ var ( TagKeys: []tag.Key{KeyClientMethod}, Aggregation: DefaultMillisecondsDistribution, } - - // Deprecated: This view is going to be removed, if you need it please define it - // yourself. - ClientRequestCountView = &view.View{ - Name: "Count of request messages per client RPC", - TagKeys: []tag.Key{KeyClientMethod}, - Measure: ClientRoundtripLatency, - Aggregation: view.Count(), - } ) // DefaultClientViews are the default client views provided by this package. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go index 303c607f63..b36349820d 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go @@ -16,10 +16,10 @@ package ocgrpc import ( + "context" "time" "go.opencensus.io/tag" - "golang.org/x/net/context" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/stats" ) @@ -30,7 +30,7 @@ func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) startTime := time.Now() if info == nil { if grpclog.V(2) { - grpclog.Infof("clientHandler.TagRPC called with nil info.", info.FullMethodName) + grpclog.Info("clientHandler.TagRPC called with nil info.") } return ctx } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go index b67b3e2be2..15ada839d6 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server.go @@ -15,8 +15,8 @@ package ocgrpc import ( + "context" "go.opencensus.io/trace" - "golang.org/x/net/context" "google.golang.org/grpc/stats" ) diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go index 02323f8712..609d9ed248 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -34,9 +34,9 @@ var ( // mechanism to load these defaults from a common repository/config shared by // all supported languages. Likely a serialized protobuf of these defaults. -// Predefined views may be subscribed to collect data for the above measures. +// Predefined views may be registered to collect data for the above measures. // As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are subscribed by +// package. These are declared as a convenience only; none are registered by // default. var ( ServerReceivedBytesPerRPCView = &view.View{ diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go index 7847c1a912..afcef023af 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go @@ -18,7 +18,7 @@ package ocgrpc import ( "time" - "golang.org/x/net/context" + "context" "go.opencensus.io/tag" "google.golang.org/grpc/grpclog" diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go index acb626e126..89cac9c4ec 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -22,9 +22,11 @@ import ( "sync/atomic" "time" + "go.opencensus.io/metric/metricdata" ocstats "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" + "go.opencensus.io/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/stats" @@ -51,16 +53,22 @@ type rpcData struct { // The following variables define the default hard-coded auxiliary data used by // both the default GRPC client and GRPC server metrics. var ( - DefaultBytesDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultMillisecondsDistribution = view.Distribution(0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) - DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) + DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) ) +// Server tags are applied to the context used to process each RPC, as well as +// the measures at the end of each RPC. var ( - KeyServerMethod, _ = tag.NewKey("grpc_server_method") - KeyClientMethod, _ = tag.NewKey("grpc_client_method") - KeyServerStatus, _ = tag.NewKey("grpc_server_status") - KeyClientStatus, _ = tag.NewKey("grpc_client_status") + KeyServerMethod = tag.MustNewKey("grpc_server_method") + KeyServerStatus = tag.MustNewKey("grpc_server_status") +) + +// Client tags are applied to measures at the end of each RPC. +var ( + KeyClientMethod = tag.MustNewKey("grpc_client_method") + KeyClientStatus = tag.MustNewKey("grpc_client_status") ) var ( @@ -135,24 +143,31 @@ func handleRPCEnd(ctx context.Context, s *stats.End) { } latencyMillis := float64(elapsedTime) / float64(time.Millisecond) + attachments := getSpanCtxAttachment(ctx) if s.Client { - ctx, _ = tag.New(ctx, - tag.Upsert(KeyClientMethod, methodName(d.method)), - tag.Upsert(KeyClientStatus, st)) - ocstats.Record(ctx, - ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ClientRoundtripLatency.M(latencyMillis)) + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( + tag.Upsert(KeyClientMethod, methodName(d.method)), + tag.Upsert(KeyClientStatus, st)), + ocstats.WithAttachments(attachments), + ocstats.WithMeasurements( + ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ClientRoundtripLatency.M(latencyMillis))) } else { - ctx, _ = tag.New(ctx, tag.Upsert(KeyServerStatus, st)) - ocstats.Record(ctx, - ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ServerLatency.M(latencyMillis)) + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( + tag.Upsert(KeyServerStatus, st), + ), + ocstats.WithAttachments(attachments), + ocstats.WithMeasurements( + ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ServerLatency.M(latencyMillis))) } } @@ -197,3 +212,16 @@ func statusCodeToString(s *status.Status) string { return "CODE_" + strconv.FormatInt(int64(c), 10) } } + +func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments { + attachments := map[string]interface{}{} + span := trace.FromContext(ctx) + if span == nil { + return attachments + } + spanCtx := span.SpanContext() + if spanCtx.IsSampled() { + attachments[metricdata.AttachmentKeySpanContext] = spanCtx + } + return attachments +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go index 720f381c27..fef5827566 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go @@ -19,9 +19,9 @@ import ( "google.golang.org/grpc/codes" + "context" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" - "golang.org/x/net/context" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go index 37f42b3b17..da815b2a73 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/client.go +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -16,14 +16,18 @@ package ochttp import ( "net/http" + "net/http/httptrace" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" ) // Transport is an http.RoundTripper that instruments all outgoing requests with -// stats and tracing. The zero value is intended to be a useful default, but for -// now it's recommended that you explicitly set Propagation. +// OpenCensus stats and tracing. +// +// The zero value is intended to be a useful default, but for +// now it's recommended that you explicitly set Propagation, since the default +// for this may change. type Transport struct { // Base may be set to wrap another http.RoundTripper that does the actual // requests. By default http.DefaultTransport is used. @@ -43,24 +47,53 @@ type Transport struct { // for spans started by this transport. StartOptions trace.StartOptions + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // NameFromRequest holds the function to use for generating the span name + // from the information found in the outgoing HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + + // NewClientTrace may be set to a function allowing the current *trace.Span + // to be annotated with HTTP request event information emitted by the + // httptrace package. + NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace + // TODO: Implement tag propagation for HTTP. } // RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.base() + if isHealthEndpoint(req.URL.Path) { + return rt.RoundTrip(req) + } // TODO: remove excessive nesting of http.RoundTrippers here. format := t.Propagation if format == nil { format = defaultFormat } + spanNameFormatter := t.FormatSpanName + if spanNameFormatter == nil { + spanNameFormatter = spanNameFromURL + } + + startOpts := t.StartOptions + if t.GetStartOptions != nil { + startOpts = t.GetStartOptions(req) + } + rt = &traceTransport{ base: rt, format: format, startOptions: trace.StartOptions{ - Sampler: t.StartOptions.Sampler, + Sampler: startOpts.Sampler, SpanKind: trace.SpanKindClient, }, + formatSpanName: spanNameFormatter, + newClientTrace: t.NewClientTrace, } rt = statsTransport{base: rt} return rt.RoundTrip(req) diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go index 9b286b929b..17142aabe0 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -34,8 +34,11 @@ type statsTransport struct { // RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { ctx, _ := tag.New(req.Context(), - tag.Upsert(Host, req.URL.Host), + tag.Upsert(KeyClientHost, req.Host), + tag.Upsert(Host, req.Host), + tag.Upsert(KeyClientPath, req.URL.Path), tag.Upsert(Path, req.URL.Path), + tag.Upsert(KeyClientMethod, req.Method), tag.Upsert(Method, req.Method)) req = req.WithContext(ctx) track := &tracker{ @@ -58,11 +61,14 @@ func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { track.end() } else { track.statusCode = resp.StatusCode + if req.Method != "HEAD" { + track.respContentLength = resp.ContentLength + } if resp.Body == nil { track.end() } else { track.body = resp.Body - resp.Body = track + resp.Body = wrappedBody(track, resp.Body) } } return resp, err @@ -79,36 +85,48 @@ func (t statsTransport) CancelRequest(req *http.Request) { } type tracker struct { - ctx context.Context - respSize int64 - reqSize int64 - start time.Time - body io.ReadCloser - statusCode int - endOnce sync.Once + ctx context.Context + respSize int64 + respContentLength int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once } var _ io.ReadCloser = (*tracker)(nil) func (t *tracker) end() { t.endOnce.Do(func() { + latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) + respSize := t.respSize + if t.respSize == 0 && t.respContentLength > 0 { + respSize = t.respContentLength + } m := []stats.Measurement{ - ClientLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ClientSentBytes.M(t.reqSize), + ClientReceivedBytes.M(respSize), + ClientRoundtripLatency.M(latencyMs), + ClientLatency.M(latencyMs), ClientResponseBytes.M(t.respSize), } if t.reqSize >= 0 { m = append(m, ClientRequestBytes.M(t.reqSize)) } - ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))) - stats.Record(ctx, m...) + + stats.RecordWithTags(t.ctx, []tag.Mutator{ + tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), + tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), + }, m...) }) } func (t *tracker) Read(b []byte) (int, error) { n, err := t.body.Read(b) + t.respSize += int64(n) switch err { case nil: - t.respSize += int64(n) return n, nil case io.EOF: t.end() diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go index f777772ec9..2f1c7f0063 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -38,7 +38,7 @@ const ( // because there are additional fields not represented in the // OpenCensus span context. Spans created from the incoming // header will be the direct children of the client-side span. -// Similarly, reciever of the outgoing spans should use client-side +// Similarly, receiver of the outgoing spans should use client-side // span created by OpenCensus as the parent. type HTTPFormat struct{} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go new file mode 100644 index 0000000000..5e6a343076 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/route.go @@ -0,0 +1,61 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "net/http" + + "go.opencensus.io/tag" +) + +// SetRoute sets the http_server_route tag to the given value. +// It's useful when an HTTP framework does not support the http.Handler interface +// and using WithRouteTag is not an option, but provides a way to hook into the request flow. +func SetRoute(ctx context.Context, route string) { + if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) + } +} + +// WithRouteTag returns an http.Handler that records stats with the +// http_server_route tag set to the given value. +func WithRouteTag(handler http.Handler, route string) http.Handler { + return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { + addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} + ctx, _ := tag.New(r.Context(), addRoute...) + r = r.WithContext(ctx) + handler.ServeHTTP(w, r) + return addRoute + }) +} + +// taggedHandlerFunc is a http.Handler that returns tags describing the +// processing of the request. These tags will be recorded along with the +// measures in this package at the end of the request. +type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator + +func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { + tags := h(w, r) + if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tags...) + } +} + +type addedTagsKey struct{} + +type addedTags struct { + t []tag.Mutator +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index 4b3c855e5b..c7ea642357 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -15,10 +15,8 @@ package ochttp import ( - "bufio" "context" - "errors" - "net" + "io" "net/http" "strconv" "sync" @@ -30,16 +28,19 @@ import ( "go.opencensus.io/trace/propagation" ) -// Handler is a http.Handler that is aware of the incoming request's span. +// Handler is an http.Handler wrapper to instrument your HTTP server with +// OpenCensus. It supports both stats and tracing. // +// Tracing +// +// This handler is aware of the incoming request's span, reading it from request +// headers as configured using the Propagation field. // The extracted span can be accessed from the incoming request's // context. // // span := trace.FromContext(r.Context()) // // The server span will be automatically ended at the end of ServeHTTP. -// -// Incoming propagation mechanism is determined by the given HTTP propagators. type Handler struct { // Propagation defines how traces are propagated. If unspecified, // B3 propagation will be used. @@ -55,50 +56,86 @@ type Handler struct { // for spans started by this transport. StartOptions trace.StartOptions + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) // servers. If true, any trace metadata set on the incoming request will // be added as a linked trace instead of being added as a parent of the // current trace. IsPublicEndpoint bool + + // FormatSpanName holds the function to use for generating the span name + // from the information found in the incoming HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + + // IsHealthEndpoint holds the function to use for determining if the + // incoming HTTP request should be considered a health check. This is in + // addition to the private isHealthEndpoint func which may also indicate + // tracing should be skipped. + IsHealthEndpoint func(*http.Request) bool } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var traceEnd, statsEnd func() - r, traceEnd = h.startTrace(w, r) + var tags addedTags + r, traceEnd := h.startTrace(w, r) defer traceEnd() - w, statsEnd = h.startStats(w, r) - defer statsEnd() + w, statsEnd := h.startStats(w, r) + defer statsEnd(&tags) handler := h.Handler if handler == nil { handler = http.DefaultServeMux } + r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) handler.ServeHTTP(w, r) } func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - name := spanNameFromURL(r.URL) + if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) { + return r, func() {} + } + var name string + if h.FormatSpanName == nil { + name = spanNameFromURL(r) + } else { + name = h.FormatSpanName(r) + } ctx := r.Context() + + startOpts := h.StartOptions + if h.GetStartOptions != nil { + startOpts = h.GetStartOptions(r) + } + var span *trace.Span sc, ok := h.extractSpanContext(r) if ok && !h.IsPublicEndpoint { ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, - trace.WithSampler(h.StartOptions.Sampler), + trace.WithSampler(startOpts.Sampler), trace.WithSpanKind(trace.SpanKindServer)) } else { ctx, span = trace.StartSpan(ctx, name, - trace.WithSampler(h.StartOptions.Sampler), + trace.WithSampler(startOpts.Sampler), trace.WithSpanKind(trace.SpanKindServer), ) if ok { span.AddLink(trace.Link{ TraceID: sc.TraceID, SpanID: sc.SpanID, - Type: trace.LinkTypeChild, + Type: trace.LinkTypeParent, Attributes: nil, }) } } span.AddAttributes(requestAttrs(r)...) + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + } else if r.ContentLength > 0 { + span.AddMessageReceiveEvent(0, /* TODO: messageID */ + r.ContentLength, -1) + } return r.WithContext(ctx), span.End } @@ -109,9 +146,9 @@ func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) return h.Propagation.SpanContextFromRequest(r) } -func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func()) { +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { ctx, _ := tag.New(r.Context(), - tag.Upsert(Host, r.URL.Host), + tag.Upsert(Host, r.Host), tag.Upsert(Path, r.URL.Path), tag.Upsert(Method, r.Method)) track := &trackingResponseWriter{ @@ -126,7 +163,7 @@ func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.Respo track.reqSize = r.ContentLength } stats.Record(ctx, ServerRequestCount.M(1)) - return track, track.end + return track.wrappedResponseWriter(), track.end } type trackingResponseWriter struct { @@ -140,40 +177,10 @@ type trackingResponseWriter struct { writer http.ResponseWriter } -// Compile time assertions for widely used net/http interfaces -var _ http.CloseNotifier = (*trackingResponseWriter)(nil) -var _ http.Flusher = (*trackingResponseWriter)(nil) -var _ http.Hijacker = (*trackingResponseWriter)(nil) -var _ http.Pusher = (*trackingResponseWriter)(nil) +// Compile time assertion for ResponseWriter interface var _ http.ResponseWriter = (*trackingResponseWriter)(nil) -var errHijackerUnimplemented = errors.New("ResponseWriter does not implement http.Hijacker") - -func (t *trackingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hj, ok := t.writer.(http.Hijacker) - if !ok { - return nil, nil, errHijackerUnimplemented - } - return hj.Hijack() -} - -func (t *trackingResponseWriter) CloseNotify() <-chan bool { - cn, ok := t.writer.(http.CloseNotifier) - if !ok { - return nil - } - return cn.CloseNotify() -} - -func (t *trackingResponseWriter) Push(target string, opts *http.PushOptions) error { - pusher, ok := t.writer.(http.Pusher) - if !ok { - return http.ErrNotSupported - } - return pusher.Push(target, opts) -} - -func (t *trackingResponseWriter) end() { +func (t *trackingResponseWriter) end(tags *addedTags) { t.endOnce.Do(func() { if t.statusCode == 0 { t.statusCode = 200 @@ -181,6 +188,7 @@ func (t *trackingResponseWriter) end() { span := trace.FromContext(t.ctx) span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) + span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) m := []stats.Measurement{ ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), @@ -189,8 +197,10 @@ func (t *trackingResponseWriter) end() { if t.reqSize >= 0 { m = append(m, ServerRequestBytes.M(t.reqSize)) } - ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))) - stats.Record(ctx, m...) + allTags := make([]tag.Mutator, len(tags.t)+1) + allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) + copy(allTags[1:], tags.t) + stats.RecordWithTags(t.ctx, allTags, m...) }) } @@ -201,6 +211,9 @@ func (t *trackingResponseWriter) Header() http.Header { func (t *trackingResponseWriter) Write(data []byte) (int, error) { n, err := t.writer.Write(data) t.respSize += int64(n) + // Add message event for request bytes sent. + span := trace.FromContext(t.ctx) + span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) return n, err } @@ -210,8 +223,231 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) { t.statusLine = http.StatusText(t.statusCode) } -func (t *trackingResponseWriter) Flush() { - if flusher, ok := t.writer.(http.Flusher); ok { - flusher.Flush() +// wrappedResponseWriter returns a wrapped version of the original +// ResponseWriter and only implements the same combination of additional +// interfaces as the original. +// This implementation is based on https://github.com/felixge/httpsnoop. +func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { + var ( + hj, i0 = t.writer.(http.Hijacker) + cn, i1 = t.writer.(http.CloseNotifier) + pu, i2 = t.writer.(http.Pusher) + fl, i3 = t.writer.(http.Flusher) + rf, i4 = t.writer.(io.ReaderFrom) + ) + + switch { + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + }{t} + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + io.ReaderFrom + }{t, rf} + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Flusher + }{t, fl} + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{t, fl, rf} + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + }{t, pu} + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + io.ReaderFrom + }{t, pu, rf} + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + }{t, pu, fl} + case !i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + io.ReaderFrom + }{t, pu, fl, rf} + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + }{t, cn} + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{t, cn, rf} + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + }{t, cn, fl} + case !i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, cn, fl, rf} + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{t, cn, pu} + case !i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, cn, pu, rf} + case !i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + }{t, cn, pu, fl} + case !i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, cn, pu, fl, rf} + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + }{t, hj} + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{t, hj, rf} + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + }{t, hj, fl} + case i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + io.ReaderFrom + }{t, hj, fl, rf} + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + }{t, hj, pu} + case i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + io.ReaderFrom + }{t, hj, pu, rf} + case i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + }{t, hj, pu, fl} + case i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, pu, fl, rf} + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + }{t, hj, cn} + case i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + io.ReaderFrom + }{t, hj, cn, rf} + case i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + }{t, hj, cn, fl} + case i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, hj, cn, fl, rf} + case i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + }{t, hj, cn, pu} + case i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, hj, cn, pu, rf} + case i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + }{t, hj, cn, pu, fl} + case i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, cn, pu, fl, rf} + default: + return struct { + http.ResponseWriter + }{t} } } diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go new file mode 100644 index 0000000000..05c6c56cc7 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go @@ -0,0 +1,169 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "strings" + + "go.opencensus.io/trace" +) + +type spanAnnotator struct { + sp *trace.Span +} + +// TODO: Remove NewSpanAnnotator at the next release. + +// NewSpanAnnotator returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +// Deprecated: Use NewSpanAnnotatingClientTrace instead +func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { + return NewSpanAnnotatingClientTrace(r, s) +} + +// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { + sa := spanAnnotator{sp: s} + + return &httptrace.ClientTrace{ + GetConn: sa.getConn, + GotConn: sa.gotConn, + PutIdleConn: sa.putIdleConn, + GotFirstResponseByte: sa.gotFirstResponseByte, + Got100Continue: sa.got100Continue, + DNSStart: sa.dnsStart, + DNSDone: sa.dnsDone, + ConnectStart: sa.connectStart, + ConnectDone: sa.connectDone, + TLSHandshakeStart: sa.tlsHandshakeStart, + TLSHandshakeDone: sa.tlsHandshakeDone, + WroteHeaders: sa.wroteHeaders, + Wait100Continue: sa.wait100Continue, + WroteRequest: sa.wroteRequest, + } +} + +func (s spanAnnotator) getConn(hostPort string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.get_connection.host_port", hostPort), + } + s.sp.Annotate(attrs, "GetConn") +} + +func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { + attrs := []trace.Attribute{ + trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), + trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), + } + if info.WasIdle { + attrs = append(attrs, + trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) + } + s.sp.Annotate(attrs, "GotConn") +} + +// PutIdleConn implements a httptrace.ClientTrace hook +func (s spanAnnotator) putIdleConn(err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) + } + s.sp.Annotate(attrs, "PutIdleConn") +} + +func (s spanAnnotator) gotFirstResponseByte() { + s.sp.Annotate(nil, "GotFirstResponseByte") +} + +func (s spanAnnotator) got100Continue() { + s.sp.Annotate(nil, "Got100Continue") +} + +func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_start.host", info.Host), + } + s.sp.Annotate(attrs, "DNSStart") +} + +func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { + var addrs []string + for _, addr := range info.Addrs { + addrs = append(addrs, addr.String()) + } + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), + } + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "DNSDone") +} + +func (s spanAnnotator) connectStart(network, addr string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_start.network", network), + trace.StringAttribute("httptrace.connect_start.addr", addr), + } + s.sp.Annotate(attrs, "ConnectStart") +} + +func (s spanAnnotator) connectDone(network, addr string, err error) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_done.network", network), + trace.StringAttribute("httptrace.connect_done.addr", addr), + } + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.connect_done.error", err.Error())) + } + s.sp.Annotate(attrs, "ConnectDone") +} + +func (s spanAnnotator) tlsHandshakeStart() { + s.sp.Annotate(nil, "TLSHandshakeStart") +} + +func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) + } + s.sp.Annotate(attrs, "TLSHandshakeDone") +} + +func (s spanAnnotator) wroteHeaders() { + s.sp.Annotate(nil, "WroteHeaders") +} + +func (s spanAnnotator) wait100Continue() { + s.sp.Annotate(nil, "Wait100Continue") +} + +func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { + var attrs []trace.Attribute + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "WroteRequest") +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go index 2bd11f6ddc..ee3729040d 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -20,20 +20,67 @@ import ( "go.opencensus.io/tag" ) +// Deprecated: client HTTP measures. +var ( + // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. + ClientRequestCount = stats.Int64( + "opencensus.io/http/client/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + // Deprecated: Use ClientSentBytes. + ClientRequestBytes = stats.Int64( + "opencensus.io/http/client/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientReceivedBytes. + ClientResponseBytes = stats.Int64( + "opencensus.io/http/client/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientRoundtripLatency. + ClientLatency = stats.Float64( + "opencensus.io/http/client/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + // The following client HTTP measures are supported for use in custom views. var ( - ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless) - ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) - ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) - ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) + ClientSentBytes = stats.Int64( + "opencensus.io/http/client/sent_bytes", + "Total bytes sent in request body (not including headers)", + stats.UnitBytes, + ) + ClientReceivedBytes = stats.Int64( + "opencensus.io/http/client/received_bytes", + "Total bytes received in response bodies (not including headers but including error responses with bodies)", + stats.UnitBytes, + ) + ClientRoundtripLatency = stats.Float64( + "opencensus.io/http/client/roundtrip_latency", + "Time between first byte of request headers sent to last byte of response received, or terminal error", + stats.UnitMilliseconds, + ) ) // The following server HTTP measures are supported for use in custom views: var ( - ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless) - ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) - ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) - ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds) + ServerRequestCount = stats.Int64( + "opencensus.io/http/server/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + ServerRequestBytes = stats.Int64( + "opencensus.io/http/server/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + ServerResponseBytes = stats.Int64( + "opencensus.io/http/server/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + ServerLatency = stats.Float64( + "opencensus.io/http/server/latency", + "End-to-end latency", + stats.UnitMilliseconds) ) // The following tags are applied to stats recorded by this package. Host, Path @@ -41,28 +88,89 @@ var ( // ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. var ( // Host is the value of the HTTP Host header. - Host, _ = tag.NewKey("http.host") + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Host = tag.MustNewKey("http.host") // StatusCode is the numeric HTTP response status code, // or "error" if a transport error occurred and no status code was read. - StatusCode, _ = tag.NewKey("http.status") + StatusCode = tag.MustNewKey("http.status") // Path is the URL path (not including query string) in the request. - Path, _ = tag.NewKey("http.path") + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Path = tag.MustNewKey("http.path") // Method is the HTTP method of the request, capitalized (GET, POST, etc.). - Method, _ = tag.NewKey("http.method") + Method = tag.MustNewKey("http.method") + + // KeyServerRoute is a low cardinality string representing the logical + // handler of the request. This is usually the pattern registered on the a + // ServeMux (or similar string). + KeyServerRoute = tag.MustNewKey("http_server_route") +) + +// Client tag keys. +var ( + // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). + KeyClientMethod = tag.MustNewKey("http_client_method") + // KeyClientPath is the URL path (not including query string). + KeyClientPath = tag.MustNewKey("http_client_path") + // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. + KeyClientStatus = tag.MustNewKey("http_client_status") + // KeyClientHost is the value of the request Host header. + KeyClientHost = tag.MustNewKey("http_client_host") ) // Default distributions used by views in this package. var ( - DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) ) -// Package ochttp provides some convenience views. -// You need to subscribe to the views for data to actually be collected. +// Package ochttp provides some convenience views for client measures. +// You still need to register these views for data to actually be collected. var ( + ClientSentBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/sent_bytes", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientReceivedBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/received_bytes", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientRoundtripLatencyDistribution = &view.View{ + Name: "opencensus.io/http/client/roundtrip_latency", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + Description: "End-to-end latency, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientCompletedCount = &view.View{ + Name: "opencensus.io/http/client/completed_count", + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + Description: "Count of completed requests, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } +) + +// Deprecated: Old client Views. +var ( + // Deprecated: No direct replacement, but see ClientCompletedCount. ClientRequestCountView = &view.View{ Name: "opencensus.io/http/client/request_count", Description: "Count of HTTP requests started", @@ -70,43 +178,52 @@ var ( Aggregation: view.Count(), } + // Deprecated: Use ClientSentBytesDistribution. ClientRequestBytesView = &view.View{ Name: "opencensus.io/http/client/request_bytes", Description: "Size distribution of HTTP request body", - Measure: ClientRequestBytes, + Measure: ClientSentBytes, Aggregation: DefaultSizeDistribution, } + // Deprecated: Use ClientReceivedBytesDistribution instead. ClientResponseBytesView = &view.View{ Name: "opencensus.io/http/client/response_bytes", Description: "Size distribution of HTTP response body", - Measure: ClientResponseBytes, + Measure: ClientReceivedBytes, Aggregation: DefaultSizeDistribution, } + // Deprecated: Use ClientRoundtripLatencyDistribution instead. ClientLatencyView = &view.View{ Name: "opencensus.io/http/client/latency", Description: "Latency distribution of HTTP requests", - Measure: ClientLatency, + Measure: ClientRoundtripLatency, Aggregation: DefaultLatencyDistribution, } + // Deprecated: Use ClientCompletedCount instead. ClientRequestCountByMethod = &view.View{ Name: "opencensus.io/http/client/request_count_by_method", Description: "Client request count by HTTP method", TagKeys: []tag.Key{Method}, - Measure: ClientRequestCount, + Measure: ClientSentBytes, Aggregation: view.Count(), } + // Deprecated: Use ClientCompletedCount instead. ClientResponseCountByStatusCode = &view.View{ Name: "opencensus.io/http/client/response_count_by_status_code", Description: "Client response count by status code", TagKeys: []tag.Key{StatusCode}, - Measure: ClientLatency, + Measure: ClientRoundtripLatency, Aggregation: view.Count(), } +) +// Package ochttp provides some convenience views for server measures. +// You still need to register these views for data to actually be collected. +var ( ServerRequestCountView = &view.View{ Name: "opencensus.io/http/server/request_count", Description: "Count of HTTP requests started", @@ -153,6 +270,7 @@ var ( ) // DefaultClientViews are the default client views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. var DefaultClientViews = []*view.View{ ClientRequestCountView, ClientRequestBytesView, @@ -163,6 +281,7 @@ var DefaultClientViews = []*view.View{ } // DefaultServerViews are the default server views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. var DefaultServerViews = []*view.View{ ServerRequestCountView, ServerRequestBytesView, diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go index 80ee86c7a1..ed3a5db561 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -17,7 +17,7 @@ package ochttp import ( "io" "net/http" - "net/url" + "net/http/httptrace" "go.opencensus.io/plugin/ochttp/propagation/b3" "go.opencensus.io/trace" @@ -34,14 +34,17 @@ const ( HostAttribute = "http.host" MethodAttribute = "http.method" PathAttribute = "http.path" + URLAttribute = "http.url" UserAgentAttribute = "http.user_agent" StatusCodeAttribute = "http.status_code" ) type traceTransport struct { - base http.RoundTripper - startOptions trace.StartOptions - format propagation.HTTPFormat + base http.RoundTripper + startOptions trace.StartOptions + format propagation.HTTPFormat + formatSpanName func(*http.Request) string + newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace } // TODO(jbd): Add message events for request and response size. @@ -50,15 +53,30 @@ type traceTransport struct { // The created span can follow a parent span, if a parent is presented in // the request's context. func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { - name := spanNameFromURL(req.URL) + name := t.formatSpanName(req) // TODO(jbd): Discuss whether we want to prefix // outgoing requests with Sent. - _, span := trace.StartSpan(req.Context(), name, + ctx, span := trace.StartSpan(req.Context(), name, trace.WithSampler(t.startOptions.Sampler), trace.WithSpanKind(trace.SpanKindClient)) - req = req.WithContext(trace.WithSpan(req.Context(), span)) + if t.newClientTrace != nil { + req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) + } else { + req = req.WithContext(ctx) + } + if t.format != nil { + // SpanContextToRequest will modify its Request argument, which is + // contrary to the contract for http.RoundTripper, so we need to + // pass it a copy of the Request. + // However, the Request struct itself was already copied by + // the WithContext calls above and so we just need to copy the header. + header := make(http.Header) + for k, v := range req.Header { + header[k] = v + } + req.Header = header t.format.SpanContextToRequest(span.SpanContext(), req) } @@ -76,7 +94,8 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { // span.End() will be invoked after // a read from resp.Body returns io.EOF or when // resp.Body.Close() is invoked. - resp.Body = &bodyTracker{rc: resp.Body, span: span} + bt := &bodyTracker{rc: resp.Body, span: span} + resp.Body = wrappedBody(bt, resp.Body) return resp, err } @@ -127,17 +146,26 @@ func (t *traceTransport) CancelRequest(req *http.Request) { } } -func spanNameFromURL(u *url.URL) string { - return u.Path +func spanNameFromURL(req *http.Request) string { + return req.URL.Path } func requestAttrs(r *http.Request) []trace.Attribute { - return []trace.Attribute{ + userAgent := r.UserAgent() + + attrs := make([]trace.Attribute, 0, 5) + attrs = append(attrs, trace.StringAttribute(PathAttribute, r.URL.Path), - trace.StringAttribute(HostAttribute, r.URL.Host), + trace.StringAttribute(URLAttribute, r.URL.String()), + trace.StringAttribute(HostAttribute, r.Host), trace.StringAttribute(MethodAttribute, r.Method), - trace.StringAttribute(UserAgentAttribute, r.UserAgent()), + ) + + if userAgent != "" { + attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) } + + return attrs } func responseAttrs(resp *http.Response) []trace.Attribute { @@ -146,7 +174,7 @@ func responseAttrs(resp *http.Response) []trace.Attribute { } } -// HTTPStatusToTraceStatus converts the HTTP status code to a trace.Status that +// TraceStatus is a utility to convert the HTTP status code to a trace.Status that // represents the outcome as closely as possible. func TraceStatus(httpStatusCode int, statusLine string) trace.Status { var code int32 @@ -158,6 +186,8 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status { code = trace.StatusCodeCancelled case http.StatusBadRequest: code = trace.StatusCodeInvalidArgument + case http.StatusUnprocessableEntity: + code = trace.StatusCodeInvalidArgument case http.StatusGatewayTimeout: code = trace.StatusCodeDeadlineExceeded case http.StatusNotFound: @@ -174,26 +204,41 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status { code = trace.StatusCodeUnavailable case http.StatusOK: code = trace.StatusCodeOK + case http.StatusConflict: + code = trace.StatusCodeAlreadyExists } + return trace.Status{Code: code, Message: codeToStr[code]} } var codeToStr = map[int32]string{ - trace.StatusCodeOK: `"OK"`, - trace.StatusCodeCancelled: `"CANCELLED"`, - trace.StatusCodeUnknown: `"UNKNOWN"`, - trace.StatusCodeInvalidArgument: `"INVALID_ARGUMENT"`, - trace.StatusCodeDeadlineExceeded: `"DEADLINE_EXCEEDED"`, - trace.StatusCodeNotFound: `"NOT_FOUND"`, - trace.StatusCodeAlreadyExists: `"ALREADY_EXISTS"`, - trace.StatusCodePermissionDenied: `"PERMISSION_DENIED"`, - trace.StatusCodeResourceExhausted: `"RESOURCE_EXHAUSTED"`, - trace.StatusCodeFailedPrecondition: `"FAILED_PRECONDITION"`, - trace.StatusCodeAborted: `"ABORTED"`, - trace.StatusCodeOutOfRange: `"OUT_OF_RANGE"`, - trace.StatusCodeUnimplemented: `"UNIMPLEMENTED"`, - trace.StatusCodeInternal: `"INTERNAL"`, - trace.StatusCodeUnavailable: `"UNAVAILABLE"`, - trace.StatusCodeDataLoss: `"DATA_LOSS"`, - trace.StatusCodeUnauthenticated: `"UNAUTHENTICATED"`, + trace.StatusCodeOK: `OK`, + trace.StatusCodeCancelled: `CANCELLED`, + trace.StatusCodeUnknown: `UNKNOWN`, + trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, + trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, + trace.StatusCodeNotFound: `NOT_FOUND`, + trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, + trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, + trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, + trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, + trace.StatusCodeAborted: `ABORTED`, + trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, + trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, + trace.StatusCodeInternal: `INTERNAL`, + trace.StatusCodeUnavailable: `UNAVAILABLE`, + trace.StatusCodeDataLoss: `DATA_LOSS`, + trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, +} + +func isHealthEndpoint(path string) bool { + // Health checking is pretty frequent and + // traces collected for health endpoints + // can be extremely noisy and expensive. + // Disable canonical health checking endpoints + // like /healthz and /_ah/health for now. + if path == "/healthz" || path == "/_ah/health" { + return true + } + return false } diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go new file mode 100644 index 0000000000..7d75cae2b1 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go @@ -0,0 +1,44 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" +) + +// wrappedBody returns a wrapped version of the original +// Body and only implements the same combination of additional +// interfaces as the original. +func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { + var ( + wr, i0 = body.(io.Writer) + ) + switch { + case !i0: + return struct { + io.ReadCloser + }{wrapper} + + case i0: + return struct { + io.ReadCloser + io.Writer + }{wrapper, wr} + default: + return struct { + io.ReadCloser + }{wrapper} + } +} diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go new file mode 100644 index 0000000000..b1764e1d3b --- /dev/null +++ b/vendor/go.opencensus.io/resource/resource.go @@ -0,0 +1,164 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides functionality for resource, which capture +// identifying information about the entities for which signals are exported. +package resource + +import ( + "context" + "fmt" + "os" + "regexp" + "sort" + "strconv" + "strings" +) + +// Environment variables used by FromEnv to decode a resource. +const ( + EnvVarType = "OC_RESOURCE_TYPE" + EnvVarLabels = "OC_RESOURCE_LABELS" +) + +// Resource describes an entity about which identifying information and metadata is exposed. +// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. +type Resource struct { + Type string + Labels map[string]string +} + +// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. +func EncodeLabels(labels map[string]string) string { + sortedKeys := make([]string, 0, len(labels)) + for k := range labels { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + s := "" + for i, k := range sortedKeys { + if i > 0 { + s += "," + } + s += k + "=" + strconv.Quote(labels[k]) + } + return s +} + +var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) + +// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. +// A list of labels of the form `="",="",...` is accepted. +// Domain names and paths are accepted as label keys. +// Most users will want to use FromEnv instead. +func DecodeLabels(s string) (map[string]string, error) { + m := map[string]string{} + // Ensure a trailing comma, which allows us to keep the regex simpler + s = strings.TrimRight(strings.TrimSpace(s), ",") + "," + + for len(s) > 0 { + match := labelRegex.FindStringSubmatch(s) + if len(match) == 0 { + return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) + } + v := match[2] + if v == "" { + v = match[3] + } else { + var err error + if v, err = strconv.Unquote(v); err != nil { + return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) + } + } + m[match[1]] = v + + s = s[len(match[0]):] + } + return m, nil +} + +// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE +// and OC_RESOURCE_labelS environment variables. +func FromEnv(context.Context) (*Resource, error) { + res := &Resource{ + Type: strings.TrimSpace(os.Getenv(EnvVarType)), + } + labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) + if labels == "" { + return res, nil + } + var err error + if res.Labels, err = DecodeLabels(labels); err != nil { + return nil, err + } + return res, nil +} + +var _ Detector = FromEnv + +// merge resource information from b into a. In case of a collision, a takes precedence. +func merge(a, b *Resource) *Resource { + if a == nil { + return b + } + if b == nil { + return a + } + res := &Resource{ + Type: a.Type, + Labels: map[string]string{}, + } + if res.Type == "" { + res.Type = b.Type + } + for k, v := range b.Labels { + res.Labels[k] = v + } + // Labels from resource a overwrite labels from resource b. + for k, v := range a.Labels { + res.Labels[k] = v + } + return res +} + +// Detector attempts to detect resource information. +// If the detector cannot find resource information, the returned resource is nil but no +// error is returned. +// An error is only returned on unexpected failures. +type Detector func(context.Context) (*Resource, error) + +// MultiDetector returns a Detector that calls all input detectors in order and +// merges each result with the previous one. In case a type of label key is already set, +// the first set value is takes precedence. +// It returns on the first error that a sub-detector encounters. +func MultiDetector(detectors ...Detector) Detector { + return func(ctx context.Context) (*Resource, error) { + return detectAll(ctx, detectors...) + } +} + +// detectall calls all input detectors sequentially an merges each result with the previous one. +// It returns on the first error that a sub-detector encounters. +func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { + var res *Resource + for _, d := range detectors { + r, err := d(ctx) + if err != nil { + return nil, err + } + res = merge(res, r) + } + return res, nil +} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go index 7a8a62c143..00d473ee02 100644 --- a/vendor/go.opencensus.io/stats/doc.go +++ b/vendor/go.opencensus.io/stats/doc.go @@ -21,35 +21,49 @@ aggregate the collected data, and export the aggregated data. Measures -A measure represents a type of metric to be tracked and recorded. +A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures to collect from a server. -Each measure needs to be registered before being used. Measure -constructors such as Int64 and Float64 automatically +Measure constructors such as Int64 and Float64 automatically register the measure by the given name. Each registered measure needs to be unique by name. Measures also have a description and a unit. -Libraries can define and export measures for their end users to -create views and collect instrumentation data. +Libraries can define and export measures. Application authors can then +create views and collect and break down measures by the tags they are +interested in. Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms -latency event. Users collect data points on the existing measures with +latency event. Measurements are created from measures with the current context. Tags from the current context are recorded with the measurements if they are any. -Recorded measurements are dropped immediately if user is not aggregating -them via views. Users don't necessarily need to conditionally enable/disable +Recorded measurements are dropped immediately if no views are registered for them. +There is usually no need to conditionally enable and disable recording to reduce cost. Recording of measurements is cheap. -Libraries can always record measurements, and end-users can later decide +Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. + +Exemplars + +For a given recorded measurement, the associated exemplar is a diagnostic map +that gives more information about the measurement. + +When aggregated using a Distribution aggregation, an exemplar is kept for each +bucket in the Distribution. This allows you to easily find an example of a +measurement that fell into each bucket. + +For example, if you also use the OpenCensus trace package and you +record a measurement with a context that contains a sampled trace span, +then the trace span will be added to the exemplar associated with the measurement. + +When exported to a supporting back end, you should be able to easily navigate +to example traces that fell into each bucket in the Distribution. + */ package stats // import "go.opencensus.io/stats" - -// TODO(acetechnologist): Add a link to the language independent OpenCensus -// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index 6341eb2ad7..36935e629b 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -19,7 +19,7 @@ import ( ) // DefaultRecorder will be called for each Record call. -var DefaultRecorder func(*tag.Map, interface{}) +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go index aa555c209c..1ffd3cefc7 100644 --- a/vendor/go.opencensus.io/stats/measure.go +++ b/vendor/go.opencensus.io/stats/measure.go @@ -20,19 +20,31 @@ import ( "sync/atomic" ) -// Measure represents a type of metric to be tracked and recorded. -// For example, latency, request Mb/s, and response Mb/s are measures +// Measure represents a single numeric value to be tracked and recorded. +// For example, latency, request bytes, and response bytes could be measures // to collect from a server. // -// Each measure needs to be registered before being used. -// Measure constructors such as Int64 and -// Float64 automatically registers the measure -// by the given name. -// Each registered measure needs to be unique by name. -// Measures also have a description and a unit. +// Measures by themselves have no outside effects. In order to be exported, +// the measure needs to be used in a View. If no Views are defined over a +// measure, there is very little cost in recording it. type Measure interface { + // Name returns the name of this measure. + // + // Measure names are globally unique (among all libraries linked into your program). + // We recommend prefixing the measure name with a domain name relevant to your + // project or application. + // + // Measure names are never sent over the wire or exported to backends. + // They are only used to create Views. Name() string + + // Description returns the human-readable description of this measure. Description() string + + // Unit returns the units for the values this measure takes on. + // + // Units are encoded according to the case-sensitive abbreviations from the + // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html Unit() string } @@ -81,8 +93,9 @@ func registerMeasureHandle(name, desc, unit string) *measureDescriptor { // provides methods to create measurements of their kind. For example, Int64Measure // provides M to convert an int64 into a measurement. type Measurement struct { - v float64 - m Measure + v float64 + m Measure + desc *measureDescriptor } // Value returns the value of the Measurement as a float64. diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go index 8de6b52218..f02c1eda84 100644 --- a/vendor/go.opencensus.io/stats/measure_float64.go +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -15,38 +15,41 @@ package stats -// Float64Measure is a measure of type float64. +// Float64Measure is a measure for float64 values. type Float64Measure struct { - md *measureDescriptor -} - -// Name returns the name of the measure. -func (m *Float64Measure) Name() string { - return m.md.name -} - -// Description returns the description of the measure. -func (m *Float64Measure) Description() string { - return m.md.description -} - -// Unit returns the unit of the measure. -func (m *Float64Measure) Unit() string { - return m.md.unit + desc *measureDescriptor } // M creates a new float64 measurement. // Use Record to record measurements. func (m *Float64Measure) M(v float64) Measurement { - if !m.md.subscribed() { - return Measurement{} + return Measurement{ + m: m, + desc: m.desc, + v: v, } - return Measurement{m: m, v: v} } -// Float64 creates a new measure of type Float64Measure. -// It never returns an error. +// Float64 creates a new measure for float64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. func Float64(name, description, unit string) *Float64Measure { mi := registerMeasureHandle(name, description, unit) return &Float64Measure{mi} } + +// Name returns the name of the measure. +func (m *Float64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Float64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Float64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go index b6fd25f0d3..d101d79735 100644 --- a/vendor/go.opencensus.io/stats/measure_int64.go +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -15,38 +15,41 @@ package stats -// Int64Measure is a measure of type int64. +// Int64Measure is a measure for int64 values. type Int64Measure struct { - md *measureDescriptor -} - -// Name returns the name of the measure. -func (m *Int64Measure) Name() string { - return m.md.name -} - -// Description returns the description of the measure. -func (m *Int64Measure) Description() string { - return m.md.description -} - -// Unit returns the unit of the measure. -func (m *Int64Measure) Unit() string { - return m.md.unit + desc *measureDescriptor } // M creates a new int64 measurement. // Use Record to record measurements. func (m *Int64Measure) M(v int64) Measurement { - if !m.md.subscribed() { - return Measurement{} + return Measurement{ + m: m, + desc: m.desc, + v: float64(v), } - return Measurement{m: m, v: float64(v)} } -// Int64 creates a new measure of type Int64Measure. -// It never returns an error. +// Int64 creates a new measure for int64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. func Int64(name, description, unit string) *Int64Measure { mi := registerMeasureHandle(name, description, unit) return &Int64Measure{mi} } + +// Name returns the name of the measure. +func (m *Int64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Int64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Int64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index 98865ff697..ad4691184d 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -18,6 +18,7 @@ package stats import ( "context" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) @@ -30,23 +31,87 @@ func init() { } } -// Record records one or multiple measurements with the same tags at once. +type recordOptions struct { + attachments metricdata.Attachments + mutators []tag.Mutator + measurements []Measurement +} + +// WithAttachments applies provided exemplar attachments. +func WithAttachments(attachments metricdata.Attachments) Options { + return func(ro *recordOptions) { + ro.attachments = attachments + } +} + +// WithTags applies provided tag mutators. +func WithTags(mutators ...tag.Mutator) Options { + return func(ro *recordOptions) { + ro.mutators = mutators + } +} + +// WithMeasurements applies provided measurements. +func WithMeasurements(measurements ...Measurement) Options { + return func(ro *recordOptions) { + ro.measurements = measurements + } +} + +// Options apply changes to recordOptions. +type Options func(*recordOptions) + +func createRecordOption(ros ...Options) *recordOptions { + o := &recordOptions{} + for _, ro := range ros { + ro(o) + } + return o +} + +// Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { - if len(ms) == 0 { - return + RecordWithOptions(ctx, WithMeasurements(ms...)) +} + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) +} + +// RecordWithOptions records measurements from the given options (if any) against context +// and tags and attachments in the options (if any). +// If there are any tags in the context, measurements will be tagged with them. +func RecordWithOptions(ctx context.Context, ros ...Options) error { + o := createRecordOption(ros...) + if len(o.measurements) == 0 { + return nil } - var record bool - for _, m := range ms { - if (m != Measurement{}) { + recorder := internal.DefaultRecorder + if recorder == nil { + return nil + } + record := false + for _, m := range o.measurements { + if m.desc.subscribed() { record = true break } } if !record { - return + return nil } - if internal.DefaultRecorder != nil { - internal.DefaultRecorder(tag.FromContext(ctx), ms) + if len(o.mutators) > 0 { + var err error + if ctx, err = tag.New(ctx, o.mutators...); err != nil { + return err + } } + recorder(tag.FromContext(ctx), o.measurements, o.attachments) + return nil } diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go index 6931a5f296..736399652c 100644 --- a/vendor/go.opencensus.io/stats/units.go +++ b/vendor/go.opencensus.io/stats/units.go @@ -22,4 +22,5 @@ const ( UnitDimensionless = "1" UnitBytes = "By" UnitMilliseconds = "ms" + UnitSeconds = "s" ) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go index b7f169b4a5..9d7093728e 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -82,7 +82,7 @@ func Sum() *Aggregation { // Distribution indicates that the desired aggregation is // a histogram distribution. // -// An distribution aggregation may contain a histogram of the values in the +// A distribution aggregation may contain a histogram of the values in the // population. The bucket boundaries for that histogram are described // by the bounds. This defines len(bounds)+1 buckets. // @@ -99,13 +99,14 @@ func Sum() *Aggregation { // If len(bounds) is 1 then there is no finite buckets, and that single // element is the common boundary of the overflow and underflow buckets. func Distribution(bounds ...float64) *Aggregation { - return &Aggregation{ + agg := &Aggregation{ Type: AggTypeDistribution, Buckets: bounds, - newData: func() AggregationData { - return newDistributionData(bounds) - }, } + agg.newData = func() AggregationData { + return newDistributionData(agg) + } + return agg } // LastValue only reports the last value recorded using this diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go index 88c500bff9..f331d456e9 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -17,6 +17,9 @@ package view import ( "math" + "time" + + "go.opencensus.io/metric/metricdata" ) // AggregationData represents an aggregated value from a collection. @@ -24,9 +27,10 @@ import ( // Mosts users won't directly access aggregration data. type AggregationData interface { isAggregationData() bool - addSample(v float64) + addSample(v float64, attachments map[string]interface{}, t time.Time) clone() AggregationData equal(other AggregationData) bool + toPoint(t metricdata.Type, time time.Time) metricdata.Point } const epsilon = 1e-9 @@ -41,7 +45,7 @@ type CountData struct { func (a *CountData) isAggregationData() bool { return true } -func (a *CountData) addSample(v float64) { +func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { a.Value = a.Value + 1 } @@ -58,6 +62,15 @@ func (a *CountData) equal(other AggregationData) bool { return a.Value == a2.Value } +func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + // SumData is the aggregated data for the Sum aggregation. // A sum aggregation processes data and sums up the recordings. // @@ -68,8 +81,8 @@ type SumData struct { func (a *SumData) isAggregationData() bool { return true } -func (a *SumData) addSample(f float64) { - a.Value += f +func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + a.Value += v } func (a *SumData) clone() AggregationData { @@ -84,26 +97,45 @@ func (a *SumData) equal(other AggregationData) bool { return math.Pow(a.Value-a2.Value, 2) < epsilon } +func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, int64(a.Value)) + case metricdata.TypeCumulativeFloat64: + return metricdata.NewFloat64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + // DistributionData is the aggregated data for the // Distribution aggregation. // // Most users won't directly access distribution data. +// +// For a distribution with N bounds, the associated DistributionData will have +// N+1 buckets. type DistributionData struct { - Count int64 // number of data points aggregated - Min float64 // minimum value in the distribution - Max float64 // max value in the distribution - Mean float64 // mean of the distribution - SumOfSquaredDev float64 // sum of the squared deviation from the mean - CountPerBucket []int64 // number of occurrences per bucket - bounds []float64 // histogram distribution of the values + Count int64 // number of data points aggregated + Min float64 // minimum value in the distribution + Max float64 // max value in the distribution + Mean float64 // mean of the distribution + SumOfSquaredDev float64 // sum of the squared deviation from the mean + CountPerBucket []int64 // number of occurrences per bucket + // ExemplarsPerBucket is slice the same length as CountPerBucket containing + // an exemplar for the associated bucket, or nil. + ExemplarsPerBucket []*metricdata.Exemplar + bounds []float64 // histogram distribution of the values } -func newDistributionData(bounds []float64) *DistributionData { +func newDistributionData(agg *Aggregation) *DistributionData { + bucketCount := len(agg.Buckets) + 1 return &DistributionData{ - CountPerBucket: make([]int64, len(bounds)+1), - bounds: bounds, - Min: math.MaxFloat64, - Max: math.SmallestNonzeroFloat64, + CountPerBucket: make([]int64, bucketCount), + ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), + bounds: agg.Buckets, + Min: math.MaxFloat64, + Max: math.SmallestNonzeroFloat64, } } @@ -119,46 +151,62 @@ func (a *DistributionData) variance() float64 { func (a *DistributionData) isAggregationData() bool { return true } -func (a *DistributionData) addSample(f float64) { - if f < a.Min { - a.Min = f +// TODO(songy23): support exemplar attachments. +func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { + if v < a.Min { + a.Min = v } - if f > a.Max { - a.Max = f + if v > a.Max { + a.Max = v } a.Count++ - a.incrementBucketCount(f) + a.addToBucket(v, attachments, t) if a.Count == 1 { - a.Mean = f + a.Mean = v return } oldMean := a.Mean - a.Mean = a.Mean + (f-a.Mean)/float64(a.Count) - a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean) + a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) } -func (a *DistributionData) incrementBucketCount(f float64) { - if len(a.bounds) == 0 { - a.CountPerBucket[0]++ - return - } - - for i, b := range a.bounds { - if f < b { - a.CountPerBucket[i]++ - return +func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { + var count *int64 + var i int + var b float64 + for i, b = range a.bounds { + if v < b { + count = &a.CountPerBucket[i] + break } } - a.CountPerBucket[len(a.bounds)]++ + if count == nil { // Last bucket. + i = len(a.bounds) + count = &a.CountPerBucket[i] + } + *count++ + if exemplar := getExemplar(v, attachments, t); exemplar != nil { + a.ExemplarsPerBucket[i] = exemplar + } +} + +func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { + if len(attachments) == 0 { + return nil + } + return &metricdata.Exemplar{ + Value: v, + Timestamp: t, + Attachments: attachments, + } } func (a *DistributionData) clone() AggregationData { - counts := make([]int64, len(a.CountPerBucket)) - copy(counts, a.CountPerBucket) c := *a - c.CountPerBucket = counts + c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) + c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) return &c } @@ -181,6 +229,33 @@ func (a *DistributionData) equal(other AggregationData) bool { return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon } +func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeDistribution: + buckets := []metricdata.Bucket{} + for i := 0; i < len(a.CountPerBucket); i++ { + buckets = append(buckets, metricdata.Bucket{ + Count: a.CountPerBucket[i], + Exemplar: a.ExemplarsPerBucket[i], + }) + } + bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} + + val := &metricdata.Distribution{ + Count: a.Count, + Sum: a.Sum(), + SumOfSquaredDeviation: a.SumOfSquaredDev, + BucketOptions: bucketOptions, + Buckets: buckets, + } + return metricdata.NewDistributionPoint(t, val) + + default: + // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. + panic("unsupported metricdata.Type") + } +} + // LastValueData returns the last value recorded for LastValue aggregation. type LastValueData struct { Value float64 @@ -190,7 +265,7 @@ func (l *LastValueData) isAggregationData() bool { return true } -func (l *LastValueData) addSample(v float64) { +func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { l.Value = v } @@ -205,3 +280,14 @@ func (l *LastValueData) equal(other AggregationData) bool { } return l.Value == a2.Value } + +func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeGaugeInt64: + return metricdata.NewInt64Point(t, int64(l.Value)) + case metricdata.TypeGaugeFloat64: + return metricdata.NewFloat64Point(t, l.Value) + default: + panic("unsupported metricdata.Type") + } +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index 863a5b62a1..8a6a2c0fdc 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -17,6 +17,7 @@ package view import ( "sort" + "time" "go.opencensus.io/internal/tagencoding" "go.opencensus.io/tag" @@ -31,20 +32,21 @@ type collector struct { a *Aggregation } -func (c *collector) addSample(s string, v float64) { +func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { aggregator, ok := c.signatures[s] if !ok { aggregator = c.a.newData() c.signatures[s] = aggregator } - aggregator.addSample(v) + aggregator.addSample(v, attachments, t) } +// collectRows returns a snapshot of the collected Row values. func (c *collector) collectedRows(keys []tag.Key) []*Row { - var rows []*Row + rows := make([]*Row, 0, len(c.signatures)) for sig, aggregator := range c.signatures { tags := decodeTags([]byte(sig), keys) - row := &Row{tags, aggregator} + row := &Row{Tags: tags, Data: aggregator.clone()} rows = append(rows, row) } return rows diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go index 856fb4e153..7bbedfe1ff 100644 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -13,33 +13,34 @@ // limitations under the License. // -/* -Package view contains support for collecting and exposing aggregates over stats. - -In order to collect measurements, views need to be defined and registered. -A view allows recorded measurements to be filtered and aggregated over a time window. - -All recorded measurements can be filtered by a list of tags. - -OpenCensus provides several aggregation methods: count, distribution and sum. -Count aggregation only counts the number of measurement points. Distribution -aggregation provides statistical summary of the aggregated data. Sum distribution -sums up the measurement points. Aggregations are cumulative. - -Users can dynamically create and delete views. - -Libraries can export their own views and claim the view names -by registering them themselves. - -Exporting - -Collected and aggregated data can be exported to a metric collection -backend by registering its exporter. - -Multiple exporters can be registered to upload the data to various -different backends. Users need to unregister the exporters once they -no longer are needed. -*/ +// Package view contains support for collecting and exposing aggregates over stats. +// +// In order to collect measurements, views need to be defined and registered. +// A view allows recorded measurements to be filtered and aggregated. +// +// All recorded measurements can be grouped by a list of tags. +// +// OpenCensus provides several aggregation methods: Count, Distribution and Sum. +// +// Count only counts the number of measurement points recorded. +// Distribution provides statistical summary of the aggregated data by counting +// how many recorded measurements fall into each bucket. +// Sum adds up the measurement values. +// LastValue just keeps track of the most recently recorded measurement value. +// All aggregations are cumulative. +// +// Views can be registered and unregistered at any time during program execution. +// +// Libraries can define views but it is recommended that in most cases registering +// views be left up to applications. +// +// Exporting +// +// Collected and aggregated data can be exported to a metric collection +// backend by registering its exporter. +// +// Multiple exporters can be registered to upload the data to various +// different back ends. package view // import "go.opencensus.io/stats/view" // TODO(acetechnologist): Add a link to the language independent OpenCensus diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go index ffd0d1ac70..7cb59718f5 100644 --- a/vendor/go.opencensus.io/stats/view/export.go +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -27,6 +27,9 @@ var ( // Exporter takes a significant amount of time to // process a Data, that work should be done on another goroutine. // +// It is safe to assume that ExportView will not be called concurrently from +// multiple goroutines. +// // The Data should not be modified. type Exporter interface { ExportView(viewData *Data) diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go index 87bf5d4669..293b54ecbe 100644 --- a/vendor/go.opencensus.io/stats/view/view.go +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -17,19 +17,20 @@ package view import ( "bytes" + "errors" "fmt" "reflect" "sort" "sync/atomic" "time" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) // View allows users to aggregate the recorded stats.Measurements. -// Views need to be passed to the Subscribe function to be before data will be +// Views need to be passed to the Register function before data will be // collected and sent to Exporters. type View struct { Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. @@ -42,7 +43,7 @@ type View struct { // Measure is a stats.Measure to aggregate in this view. Measure stats.Measure - // Aggregation is the aggregation function tp apply to the set of Measurements. + // Aggregation is the aggregation function to apply to the set of Measurements. Aggregation *Aggregation } @@ -67,14 +68,19 @@ func (v *View) same(other *View) bool { v.Measure.Name() == other.Measure.Name() } +// ErrNegativeBucketBounds error returned if histogram contains negative bounds. +// +// Deprecated: this should not be public. +var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") + // canonicalize canonicalizes v by setting explicit // defaults for Name and Description and sorting the TagKeys func (v *View) canonicalize() error { if v.Measure == nil { - return fmt.Errorf("cannot subscribe view %q: measure not set", v.Name) + return fmt.Errorf("cannot register view %q: measure not set", v.Name) } if v.Aggregation == nil { - return fmt.Errorf("cannot subscribe view %q: aggregation not set", v.Name) + return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) } if v.Name == "" { v.Name = v.Measure.Name() @@ -88,20 +94,40 @@ func (v *View) canonicalize() error { sort.Slice(v.TagKeys, func(i, j int) bool { return v.TagKeys[i].Name() < v.TagKeys[j].Name() }) + sort.Float64s(v.Aggregation.Buckets) + for _, b := range v.Aggregation.Buckets { + if b < 0 { + return ErrNegativeBucketBounds + } + } + // drop 0 bucket silently. + v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) + return nil } +func dropZeroBounds(bounds ...float64) []float64 { + for i, bound := range bounds { + if bound > 0 { + return bounds[i:] + } + } + return []float64{} +} + // viewInternal is the internal representation of a View. type viewInternal struct { - view *View // view is the canonicalized View definition associated with this view. - subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access - collector *collector + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector + metricDescriptor *metricdata.Descriptor } func newViewInternal(v *View) (*viewInternal, error) { return &viewInternal{ - view: v, - collector: &collector{make(map[string]AggregationData), v.Aggregation}, + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + metricDescriptor: viewToMetricDescriptor(v), }, nil } @@ -127,12 +153,12 @@ func (v *viewInternal) collectedRows() []*Row { return v.collector.collectedRows(v.view.TagKeys) } -func (v *viewInternal) addSample(m *tag.Map, val float64) { +func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { if !v.isSubscribed() { return } sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, val) + v.collector.addSample(sig, val, attachments, t) } // A Data is a set of rows about usage of the single measure associated @@ -163,7 +189,7 @@ func (r *Row) String() string { } // Equal returns true if both rows are equal. Tags are expected to be ordered -// by the key name. Even both rows have the same tags but the tags appear in +// by the key name. Even if both rows have the same tags but the tags appear in // different orders it will return false. func (r *Row) Equal(other *Row) bool { if r == other { @@ -172,11 +198,23 @@ func (r *Row) Equal(other *Row) bool { return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) } -func checkViewName(name string) error { - if len(name) > internal.MaxNameLength { - return fmt.Errorf("view name cannot be larger than %v", internal.MaxNameLength) +const maxNameLength = 255 + +// Returns true if the given string contains only printable characters. +func isPrintable(str string) bool { + for _, r := range str { + if !(r >= ' ' && r <= '~') { + return false + } } - if !internal.IsPrintable(name) { + return true +} + +func checkViewName(name string) error { + if len(name) > maxNameLength { + return fmt.Errorf("view name cannot be larger than %v", maxNameLength) + } + if !isPrintable(name) { return fmt.Errorf("view name needs to be an ASCII string") } return nil diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go new file mode 100644 index 0000000000..293c1646df --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -0,0 +1,149 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" +) + +func getUnit(unit string) metricdata.Unit { + switch unit { + case "1": + return metricdata.UnitDimensionless + case "ms": + return metricdata.UnitMilliseconds + case "By": + return metricdata.UnitBytes + } + return metricdata.UnitDimensionless +} + +func getType(v *View) metricdata.Type { + m := v.Measure + agg := v.Aggregation + + switch agg.Type { + case AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeDistribution: + return metricdata.TypeCumulativeDistribution + case AggTypeLastValue: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeGaugeInt64 + case *stats.Float64Measure: + return metricdata.TypeGaugeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeCount: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeInt64 + default: + panic("unexpected measure type") + } + default: + panic("unexpected aggregation type") + } +} + +func getLabelKeys(v *View) []metricdata.LabelKey { + labelKeys := []metricdata.LabelKey{} + for _, k := range v.TagKeys { + labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) + } + return labelKeys +} + +func viewToMetricDescriptor(v *View) *metricdata.Descriptor { + return &metricdata.Descriptor{ + Name: v.Name, + Description: v.Description, + Unit: convertUnit(v), + Type: getType(v), + LabelKeys: getLabelKeys(v), + } +} + +func convertUnit(v *View) metricdata.Unit { + switch v.Aggregation.Type { + case AggTypeCount: + return metricdata.UnitDimensionless + default: + return getUnit(v.Measure.Unit()) + } +} + +func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { + labelValues := []metricdata.LabelValue{} + tagMap := make(map[string]string) + for _, tag := range row.Tags { + tagMap[tag.Key.Name()] = tag.Value + } + + for _, key := range expectedKeys { + if val, ok := tagMap[key.Key]; ok { + labelValues = append(labelValues, metricdata.NewLabelValue(val)) + } else { + labelValues = append(labelValues, metricdata.LabelValue{}) + } + } + return labelValues +} + +func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { + return &metricdata.TimeSeries{ + Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, + LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), + StartTime: startTime, + } +} + +func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } + + rows := v.collectedRows() + if len(rows) == 0 { + return nil + } + + ts := []*metricdata.TimeSeries{} + for _, row := range rows { + ts = append(ts, rowToTimeseries(v, row, now, startTime)) + } + + m := &metricdata.Metric{ + Descriptor: *v.metricDescriptor, + TimeSeries: ts, + } + return m +} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 2d1e8059c2..2f3c018af0 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -17,8 +17,11 @@ package view import ( "fmt" + "sync" "time" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" @@ -43,14 +46,15 @@ type worker struct { timer *time.Ticker c chan command quit, done chan bool + mu sync.RWMutex } var defaultWorker *worker var defaultReportingDuration = 10 * time.Second -// Find returns a subscribed view associated with this name. -// If no subscribed view is found, nil is returned. +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. func Find(name string) (v *View) { req := &getViewByNameReq{ name: name, @@ -62,13 +66,8 @@ func Find(name string) (v *View) { } // Register begins collecting data for the given views. -// Once a view is subscribed, it reports data to the registered exporters. +// Once a view is registered, it reports data to the registered exporters. func Register(views ...*View) error { - for _, v := range views { - if err := v.canonicalize(); err != nil { - return err - } - } req := ®isterViewReq{ views: views, err: make(chan error), @@ -94,6 +93,8 @@ func Unregister(views ...*View) { <-req.done } +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. func RetrieveData(viewName string) ([]*Row, error) { req := &retrieveDataReq{ now: time.Now(), @@ -105,17 +106,23 @@ func RetrieveData(viewName string) ([]*Row, error) { return resp.rows, resp.err } -func record(tags *tag.Map, ms interface{}) { +func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { req := &recordReq{ - tm: tags, - ms: ms.([]stats.Measurement), + tm: tags, + ms: ms.([]stats.Measurement), + attachments: attachments, + t: time.Now(), } defaultWorker.c <- req } // SetReportingPeriod sets the interval between reporting aggregated views in -// the program. If duration is less than or -// equal to zero, it enables the default behavior. +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. func SetReportingPeriod(d time.Duration) { // TODO(acetechnologist): ensure that the duration d is more than a certain // value. e.g. 1s @@ -140,6 +147,9 @@ func newWorker() *worker { } func (w *worker) start() { + prodMgr := metricproducer.GlobalManager() + prodMgr.AddProducer(w) + for { select { case cmd := <-w.c: @@ -156,6 +166,9 @@ func (w *worker) start() { } func (w *worker) stop() { + prodMgr := metricproducer.GlobalManager() + prodMgr.DeleteProducer(w) + w.quit <- true <-w.done } @@ -173,13 +186,15 @@ func (w *worker) getMeasureRef(name string) *measureRef { } func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + w.mu.Lock() + defer w.mu.Unlock() vi, err := newViewInternal(v) if err != nil { return nil, err } if x, ok := w.views[vi.view.Name]; ok { if !x.view.same(vi.view) { - return nil, fmt.Errorf("cannot subscribe view %q; a different view with the same name is already subscribed", v.Name) + return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) } // the view is already registered so there is nothing to do and the @@ -192,40 +207,75 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { return vi, nil } +func (w *worker) unregisterView(viewName string) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.views, viewName) +} + +func (w *worker) reportView(v *viewInternal, now time.Time) { + if !v.isSubscribed() { + return + } + rows := v.collectedRows() + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + viewData := &Data{ + View: v.view, + Start: w.startTimes[v], + End: time.Now(), + Rows: rows, + } + exportersMu.Lock() + for e := range exporters { + e.ExportView(viewData) + } + exportersMu.Unlock() +} + func (w *worker) reportUsage(now time.Time) { + w.mu.Lock() + defer w.mu.Unlock() for _, v := range w.views { - if !v.isSubscribed() { - continue - } - rows := v.collectedRows() - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } - // Make sure collector is never going - // to mutate the exported data. - rows = deepCopyRowData(rows) - viewData := &Data{ - View: v.view, - Start: w.startTimes[v], - End: time.Now(), - Rows: rows, - } - exportersMu.Lock() - for e := range exporters { - e.ExportView(viewData) - } - exportersMu.Unlock() + w.reportView(v, now) } } -func deepCopyRowData(rows []*Row) []*Row { - newRows := make([]*Row, 0, len(rows)) - for _, r := range rows { - newRows = append(newRows, &Row{ - Data: r.Data.clone(), - Tags: r.Tags, - }) +func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { + if !v.isSubscribed() { + return nil } - return newRows + + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + + var startTime time.Time + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } else { + startTime = w.startTimes[v] + } + + return viewToMetric(v, now, startTime) +} + +// Read reads all view data and returns them as metrics. +// It is typically invoked by metric reader to export stats in metric format. +func (w *worker) Read() []*metricdata.Metric { + w.mu.Lock() + defer w.mu.Unlock() + now := time.Now() + metrics := make([]*metricdata.Metric, 0, len(w.views)) + for _, v := range w.views { + metric := w.toMetric(v, now) + if metric != nil { + metrics = append(metrics, metric) + } + } + return metrics } diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go index ef79ec383c..0267e179ae 100644 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -56,6 +56,12 @@ type registerViewReq struct { } func (cmd *registerViewReq) handleCommand(w *worker) { + for _, v := range cmd.views { + if err := v.canonicalize(); err != nil { + cmd.err <- err + return + } + } var errstr []string for _, view := range cmd.views { vi, err := w.tryRegisterView(view) @@ -73,7 +79,7 @@ func (cmd *registerViewReq) handleCommand(w *worker) { } } -// unregisterFromViewReq is the command to unsubscribe to a view. Has no +// unregisterFromViewReq is the command to unregister to a view. Has no // impact on the data collection for client that are pulling data from the // library. type unregisterFromViewReq struct { @@ -88,13 +94,16 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { continue } + // Report pending data for this view before removing it. + w.reportView(vi, time.Now()) + vi.unsubscribe() if !vi.isSubscribed() { // this was the last subscription and view is not collecting anymore. // The collected data can be cleared. vi.clearRows() } - delete(w.views, name) + w.unregisterView(name) } cmd.done <- struct{}{} } @@ -112,6 +121,8 @@ type retrieveDataResp struct { } func (cmd *retrieveDataReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() vi, ok := w.views[cmd.v] if !ok { cmd.c <- &retrieveDataResp{ @@ -137,24 +148,28 @@ func (cmd *retrieveDataReq) handleCommand(w *worker) { // recordReq is the command to record data related to multiple measures // at once. type recordReq struct { - tm *tag.Map - ms []stats.Measurement + tm *tag.Map + ms []stats.Measurement + attachments map[string]interface{} + t time.Time } func (cmd *recordReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() for _, m := range cmd.ms { - if (m == stats.Measurement{}) { // not subscribed + if (m == stats.Measurement{}) { // not registered continue } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - v.addSample(cmd.tm, m.Value()) + v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) } } } // setReportingPeriodReq is the command to modify the duration between -// reporting the collected data to the subscribed clients. +// reporting the collected data to the registered clients. type setReportingPeriodReq struct { d time.Duration c chan bool diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go index ed528bcb3c..b27d1b26b1 100644 --- a/vendor/go.opencensus.io/tag/context.go +++ b/vendor/go.opencensus.io/tag/context.go @@ -15,7 +15,9 @@ package tag -import "context" +import ( + "context" +) // FromContext returns the tag map stored in the context. func FromContext(ctx context.Context) *Map { diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go index ebbed95000..71ec913657 100644 --- a/vendor/go.opencensus.io/tag/key.go +++ b/vendor/go.opencensus.io/tag/key.go @@ -21,7 +21,7 @@ type Key struct { } // NewKey creates or retrieves a string key identified by name. -// Calling NewKey consequently with the same name returns the same key. +// Calling NewKey more than once with the same name returns the same key. func NewKey(name string) (Key, error) { if !checkKeyName(name) { return Key{}, errInvalidKeyName @@ -29,6 +29,15 @@ func NewKey(name string) (Key, error) { return Key{name: name}, nil } +// MustNewKey returns a key with the given name, and panics if name is an invalid key name. +func MustNewKey(name string) Key { + k, err := NewKey(name) + if err != nil { + panic(err) + } + return k +} + // Name returns the name of the key. func (k Key) Name() string { return k.name diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go index 5b72ba6ad3..0272ef85a4 100644 --- a/vendor/go.opencensus.io/tag/map.go +++ b/vendor/go.opencensus.io/tag/map.go @@ -28,10 +28,15 @@ type Tag struct { Value string } +type tagContent struct { + value string + m metadatas +} + // Map is a map of tags. Use New to create a context containing // a new Map. type Map struct { - m map[Key]string + m map[Key]tagContent } // Value returns the value for the key if a value for the key exists. @@ -40,7 +45,7 @@ func (m *Map) Value(k Key) (string, bool) { return "", false } v, ok := m.m[k] - return v, ok + return v.value, ok } func (m *Map) String() string { @@ -62,21 +67,21 @@ func (m *Map) String() string { return buffer.String() } -func (m *Map) insert(k Key, v string) { +func (m *Map) insert(k Key, v string, md metadatas) { if _, ok := m.m[k]; ok { return } - m.m[k] = v + m.m[k] = tagContent{value: v, m: md} } -func (m *Map) update(k Key, v string) { +func (m *Map) update(k Key, v string, md metadatas) { if _, ok := m.m[k]; ok { - m.m[k] = v + m.m[k] = tagContent{value: v, m: md} } } -func (m *Map) upsert(k Key, v string) { - m.m[k] = v +func (m *Map) upsert(k Key, v string, md metadatas) { + m.m[k] = tagContent{value: v, m: md} } func (m *Map) delete(k Key) { @@ -84,7 +89,7 @@ func (m *Map) delete(k Key) { } func newMap() *Map { - return &Map{m: make(map[Key]string)} + return &Map{m: make(map[Key]tagContent)} } // Mutator modifies a tag map. @@ -95,13 +100,17 @@ type Mutator interface { // Insert returns a mutator that inserts a // value associated with k. If k already exists in the tag map, // mutator doesn't update the value. -func Insert(k Key, v string) Mutator { +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Insert(k Key, v string, mds ...Metadata) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } - m.insert(k, v) + m.insert(k, v, createMetadatas(mds...)) return m, nil }, } @@ -110,13 +119,17 @@ func Insert(k Key, v string) Mutator { // Update returns a mutator that updates the // value of the tag associated with k with v. If k doesn't // exists in the tag map, the mutator doesn't insert the value. -func Update(k Key, v string) Mutator { +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Update(k Key, v string, mds ...Metadata) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } - m.update(k, v) + m.update(k, v, createMetadatas(mds...)) return m, nil }, } @@ -126,18 +139,37 @@ func Update(k Key, v string) Mutator { // value of the tag associated with k with v. It inserts the // value if k doesn't exist already. It mutates the value // if k already exists. -func Upsert(k Key, v string) Mutator { +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Upsert(k Key, v string, mds ...Metadata) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } - m.upsert(k, v) + m.upsert(k, v, createMetadatas(mds...)) return m, nil }, } } +func createMetadatas(mds ...Metadata) metadatas { + var metas metadatas + if len(mds) > 0 { + for _, md := range mds { + if md != nil { + md(&metas) + } + } + } else { + WithTTL(TTLUnlimitedPropagation)(&metas) + } + return metas + +} + // Delete returns a mutator that deletes // the value associated with k. func Delete(k Key) Mutator { @@ -160,10 +192,10 @@ func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { if !checkKeyName(k.Name()) { return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) } - if !checkValue(v) { + if !checkValue(v.value) { return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) } - m.insert(k, v) + m.insert(k, v.value, v.m) } } var err error diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go index 33be1f0d46..c242e695c8 100644 --- a/vendor/go.opencensus.io/tag/map_codec.go +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -162,14 +162,19 @@ func (eg *encoderGRPC) bytes() []byte { // Encode encodes the tag map into a []byte. It is useful to propagate // the tag maps on wire in binary format. func Encode(m *Map) []byte { + if m == nil { + return nil + } eg := &encoderGRPC{ buf: make([]byte, len(m.m)), } - eg.writeByte(byte(tagsVersionID)) + eg.writeByte(tagsVersionID) for k, v := range m.m { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k.name) - eg.writeBytesWithVarintLen([]byte(v)) + if v.m.ttl.ttl == valueTTLUnlimitedPropagation { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k.name) + eg.writeBytesWithVarintLen([]byte(v.value)) + } } return eg.bytes() } @@ -177,45 +182,58 @@ func Encode(m *Map) []byte { // Decode decodes the given []byte into a tag map. func Decode(bytes []byte) (*Map, error) { ts := newMap() + err := DecodeEach(bytes, ts.upsert) + if err != nil { + // no partial failures + return nil, err + } + return ts, nil +} +// DecodeEach decodes the given serialized tag map, calling handler for each +// tag key and value decoded. +func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { eg := &encoderGRPC{ buf: bytes, } if len(eg.buf) == 0 { - return ts, nil + return nil } version := eg.readByte() if version > tagsVersionID { - return nil, fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) + return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) } for !eg.readEnded() { typ := keyType(eg.readByte()) if typ != keyTypeString { - return nil, fmt.Errorf("cannot decode: invalid key type: %q", typ) + return fmt.Errorf("cannot decode: invalid key type: %q", typ) } k, err := eg.readBytesWithVarintLen() if err != nil { - return nil, err + return err } v, err := eg.readBytesWithVarintLen() if err != nil { - return nil, err + return err } key, err := NewKey(string(k)) if err != nil { - return nil, err // no partial failures + return err } val := string(v) if !checkValue(val) { - return nil, errInvalidValue // no partial failures + return errInvalidValue + } + fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) + if err != nil { + return err } - ts.upsert(key, val) } - return ts, nil + return nil } diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go new file mode 100644 index 0000000000..6571a583ea --- /dev/null +++ b/vendor/go.opencensus.io/tag/metadata.go @@ -0,0 +1,52 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +const ( + // valueTTLNoPropagation prevents tag from propagating. + valueTTLNoPropagation = 0 + + // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. + valueTTLUnlimitedPropagation = -1 +) + +// TTL is metadata that specifies number of hops a tag can propagate. +// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata +type TTL struct { + ttl int +} + +var ( + // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. + TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} + + // TTLNoPropagation is TTL metadata that prevents tag from propagating. + TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} +) + +type metadatas struct { + ttl TTL +} + +// Metadata applies metadatas specified by the function. +type Metadata func(*metadatas) + +// WithTTL applies metadata with provided ttl. +func WithTTL(ttl TTL) Metadata { + return func(m *metadatas) { + m.ttl = ttl + } +} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go index f81cd0b4a7..b34d95e34a 100644 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -25,7 +25,7 @@ func do(ctx context.Context, f func(ctx context.Context)) { m := FromContext(ctx) keyvals := make([]string, 0, 2*len(m.m)) for k, v := range m.m { - keyvals = append(keyvals, k.Name(), v) + keyvals = append(keyvals, k.Name(), v.value) } pprof.Do(ctx, pprof.Labels(keyvals...), f) } diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go index 01f0f90831..0c54492a2b 100644 --- a/vendor/go.opencensus.io/trace/basetypes.go +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -59,6 +59,11 @@ func Int64Attribute(key string, value int64) Attribute { return Attribute{key: key, value: value} } +// Float64Attribute returns a float64-valued attribute. +func Float64Attribute(key string, value float64) Attribute { + return Attribute{key: key, value: value} +} + // StringAttribute returns a string-valued attribute. func StringAttribute(key string, value string) Attribute { return Attribute{key: key, value: value} @@ -71,8 +76,8 @@ type LinkType int32 // LinkType values. const ( LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. - LinkTypeChild // The current span is a child of the linked span. - LinkTypeParent // The current span is the parent of the linked span. + LinkTypeChild // The linked span is a child of the current span. + LinkTypeParent // The linked span is the parent of the current span. ) // Link represents a reference from one span to another span. diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go index d5473a798c..775f8274fa 100644 --- a/vendor/go.opencensus.io/trace/config.go +++ b/vendor/go.opencensus.io/trace/config.go @@ -14,7 +14,11 @@ package trace -import "go.opencensus.io/trace/internal" +import ( + "sync" + + "go.opencensus.io/trace/internal" +) // Config represents the global tracing configuration. type Config struct { @@ -23,12 +27,42 @@ type Config struct { // IDGenerator is for internal use only. IDGenerator internal.IDGenerator + + // MaxAnnotationEventsPerSpan is max number of annotation events per span + MaxAnnotationEventsPerSpan int + + // MaxMessageEventsPerSpan is max number of message events per span + MaxMessageEventsPerSpan int + + // MaxAnnotationEventsPerSpan is max number of attributes per span + MaxAttributesPerSpan int + + // MaxLinksPerSpan is max number of links per span + MaxLinksPerSpan int } +var configWriteMu sync.Mutex + +const ( + // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span + DefaultMaxAnnotationEventsPerSpan = 32 + + // DefaultMaxMessageEventsPerSpan is default max number of message events per span + DefaultMaxMessageEventsPerSpan = 128 + + // DefaultMaxAttributesPerSpan is default max number of attributes per span + DefaultMaxAttributesPerSpan = 32 + + // DefaultMaxLinksPerSpan is default max number of links per span + DefaultMaxLinksPerSpan = 32 +) + // ApplyConfig applies changes to the global tracing configuration. // // Fields not provided in the given config are going to be preserved. func ApplyConfig(cfg Config) { + configWriteMu.Lock() + defer configWriteMu.Unlock() c := *config.Load().(*Config) if cfg.DefaultSampler != nil { c.DefaultSampler = cfg.DefaultSampler @@ -36,5 +70,17 @@ func ApplyConfig(cfg Config) { if cfg.IDGenerator != nil { c.IDGenerator = cfg.IDGenerator } + if cfg.MaxAnnotationEventsPerSpan > 0 { + c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan + } + if cfg.MaxMessageEventsPerSpan > 0 { + c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan + } + if cfg.MaxAttributesPerSpan > 0 { + c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan + } + if cfg.MaxLinksPerSpan > 0 { + c.MaxLinksPerSpan = cfg.MaxLinksPerSpan + } config.Store(&c) } diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go index a2b54e58ca..04b1ee4f38 100644 --- a/vendor/go.opencensus.io/trace/doc.go +++ b/vendor/go.opencensus.io/trace/doc.go @@ -32,6 +32,8 @@ to sample a subset of traces, or use AlwaysSample to collect a trace on every ru trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) +Be careful about using trace.AlwaysSample in a production application with +significant traffic: a new trace will be started and exported for every request. Adding Spans to a Trace @@ -42,7 +44,7 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "my.org/Run") + ctx, span := trace.StartSpan(ctx, "example.com/Run") defer span.End() StartSpan will create a new top-level span if the context diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go new file mode 100644 index 0000000000..ffc264f23d --- /dev/null +++ b/vendor/go.opencensus.io/trace/evictedqueue.go @@ -0,0 +1,38 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) *evictedQueue { + eq := &evictedQueue{ + capacity: capacity, + queue: make([]interface{}, 0), + } + + return eq +} + +func (eq *evictedQueue) add(value interface{}) { + if len(eq.queue) == eq.capacity { + eq.queue = eq.queue[1:] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go index c522550fa1..e0d9a4b99e 100644 --- a/vendor/go.opencensus.io/trace/export.go +++ b/vendor/go.opencensus.io/trace/export.go @@ -16,6 +16,7 @@ package trace import ( "sync" + "sync/atomic" "time" ) @@ -30,9 +31,11 @@ type Exporter interface { ExportSpan(s *SpanData) } +type exportersMap map[Exporter]struct{} + var ( - exportersMu sync.Mutex - exporters map[Exporter]struct{} + exporterMu sync.Mutex + exporters atomic.Value ) // RegisterExporter adds to the list of Exporters that will receive sampled @@ -40,20 +43,31 @@ var ( // // Binaries can register exporters, libraries shouldn't register exporters. func RegisterExporter(e Exporter) { - exportersMu.Lock() - if exporters == nil { - exporters = make(map[Exporter]struct{}) + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } } - exporters[e] = struct{}{} - exportersMu.Unlock() + new[e] = struct{}{} + exporters.Store(new) + exporterMu.Unlock() } // UnregisterExporter removes from the list of Exporters the Exporter that was // registered with the given name. func UnregisterExporter(e Exporter) { - exportersMu.Lock() - delete(exporters, e) - exportersMu.Unlock() + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + delete(new, e) + exporters.Store(new) + exporterMu.Unlock() } // SpanData contains all the information collected by a Span. @@ -71,6 +85,13 @@ type SpanData struct { Annotations []Annotation MessageEvents []MessageEvent Status - Links []Link - HasRemoteParent bool + Links []Link + HasRemoteParent bool + DroppedAttributeCount int + DroppedAnnotationCount int + DroppedMessageEventCount int + DroppedLinkCount int + + // ChildSpanCount holds the number of child span created for this span. + ChildSpanCount int } diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go index 1c8b9b34b2..7e808d8f30 100644 --- a/vendor/go.opencensus.io/trace/internal/internal.go +++ b/vendor/go.opencensus.io/trace/internal/internal.go @@ -15,6 +15,7 @@ // Package internal provides trace internals. package internal +// IDGenerator allows custom generators for TraceId and SpanId. type IDGenerator interface { NewTraceID() [16]byte NewSpanID() [8]byte diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go new file mode 100644 index 0000000000..dc7a295c77 --- /dev/null +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -0,0 +1,61 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "github.com/golang/groupcache/lru" +) + +// A simple lru.Cache wrapper that tracks the keys of the current contents and +// the cumulative number of evicted items. +type lruMap struct { + cacheKeys map[lru.Key]bool + cache *lru.Cache + droppedCount int +} + +func newLruMap(size int) *lruMap { + lm := &lruMap{ + cacheKeys: make(map[lru.Key]bool), + cache: lru.New(size), + droppedCount: 0, + } + lm.cache.OnEvicted = func(key lru.Key, value interface{}) { + delete(lm.cacheKeys, key) + lm.droppedCount++ + } + return lm +} + +func (lm lruMap) len() int { + return lm.cache.Len() +} + +func (lm lruMap) keys() []interface{} { + keys := []interface{}{} + for k := range lm.cacheKeys { + keys = append(keys, k) + } + return keys +} + +func (lm *lruMap) add(key, value interface{}) { + lm.cacheKeys[lru.Key(key)] = true + lm.cache.Add(lru.Key(key), value) +} + +func (lm *lruMap) get(key interface{}) (interface{}, bool) { + return lm.cache.Get(key) +} diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go index 313f8b68e2..71c10f9e3b 100644 --- a/vendor/go.opencensus.io/trace/sampling.go +++ b/vendor/go.opencensus.io/trace/sampling.go @@ -20,10 +20,6 @@ import ( const defaultSamplingProbability = 1e-4 -func newDefaultSampler() Sampler { - return ProbabilitySampler(defaultSamplingProbability) -} - // Sampler decides whether a trace should be sampled and exported. type Sampler func(SamplingParameters) SamplingDecision @@ -62,6 +58,9 @@ func ProbabilitySampler(fraction float64) Sampler { } // AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. func AlwaysSample() Sampler { return func(p SamplingParameters) SamplingDecision { return SamplingDecision{Sample: true} diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index 19c6930efa..3f8977b41b 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -25,6 +25,7 @@ import ( "time" "go.opencensus.io/internal" + "go.opencensus.io/trace/tracestate" ) // Span represents a span of a trace. It has an associated SpanContext, and @@ -41,6 +42,20 @@ type Span struct { data *SpanData mu sync.Mutex // protects the contents of *data (but not the pointer value.) spanContext SpanContext + + // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry + // is removed to create room for a new entry. + lruAttributes *lruMap + + // annotations are stored in FIFO queue capped by configured limit. + annotations *evictedQueue + + // messageEvents are stored in FIFO queue capped by configured limit. + messageEvents *evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links *evictedQueue + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. *spanStore endOnce sync.Once @@ -88,6 +103,7 @@ type SpanContext struct { TraceID TraceID SpanID SpanID TraceOptions TraceOptions + Tracestate *tracestate.Tracestate } type contextKey struct{} @@ -98,13 +114,6 @@ func FromContext(ctx context.Context) *Span { return s } -// WithSpan returns a new context with the given Span attached. -// -// Deprecated: Use NewContext. -func WithSpan(parent context.Context, s *Span) context.Context { - return NewContext(parent, s) -} - // NewContext returns a new context with the given Span attached. func NewContext(parent context.Context, s *Span) context.Context { return context.WithValue(parent, contextKey{}, s) @@ -154,10 +163,14 @@ func WithSampler(sampler Sampler) StartOption { // StartSpan starts a new child span of the current span in the context. If // there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { var opts StartOptions var parent SpanContext if p := FromContext(ctx); p != nil { + p.addChild() parent = p.spanContext } for _, op := range o { @@ -174,6 +187,9 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont // // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is // preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { var opts StartOptions for _, op := range o { @@ -185,26 +201,6 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont return NewContext(ctx, span), span } -// NewSpan returns a new span. -// -// If parent is not nil, created span will be a child of the parent. -// -// Deprecated: Use StartSpan. -func NewSpan(name string, parent *Span, o StartOptions) *Span { - var parentSpanContext SpanContext - if parent != nil { - parentSpanContext = parent.SpanContext() - } - return startSpanInternal(name, parent != nil, parentSpanContext, false, o) -} - -// NewSpanWithRemoteParent returns a new span with the given parent SpanContext. -// -// Deprecated: Use StartSpanWithRemoteParent. -func NewSpanWithRemoteParent(name string, parent SpanContext, o StartOptions) *Span { - return startSpanInternal(name, true, parent, true, o) -} - func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { span := &Span{} span.spanContext = parent @@ -245,6 +241,11 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa Name: name, HasRemoteParent: remoteParent, } + span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + span.links = newEvictedQueue(cfg.MaxLinksPerSpan) + if hasParent { span.data.ParentSpanID = parent.SpanID } @@ -262,26 +263,29 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa // End ends the span. func (s *Span) End() { + if s == nil { + return + } + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } if !s.IsRecordingEvents() { return } s.endOnce.Do(func() { - if s.executionTracerTaskEnd != nil { - s.executionTracerTaskEnd() - } - // TODO: optimize to avoid this call if sd won't be used. - sd := s.makeSpanData() - sd.EndTime = internal.MonotonicEndTime(sd.StartTime) - if s.spanStore != nil { - s.spanStore.finished(s, sd) - } - if s.spanContext.IsSampled() { - // TODO: consider holding exportersMu for less time. - exportersMu.Lock() - for e := range exporters { - e.ExportSpan(sd) + exp, _ := exporters.Load().(exportersMap) + mustExport := s.spanContext.IsSampled() && len(exp) > 0 + if s.spanStore != nil || mustExport { + sd := s.makeSpanData() + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) + if s.spanStore != nil { + s.spanStore.finished(s, sd) + } + if mustExport { + for e := range exp { + e.ExportSpan(sd) + } } - exportersMu.Unlock() } }) } @@ -292,11 +296,21 @@ func (s *Span) makeSpanData() *SpanData { var sd SpanData s.mu.Lock() sd = *s.data - if s.data.Attributes != nil { - sd.Attributes = make(map[string]interface{}) - for k, v := range s.data.Attributes { - sd.Attributes[k] = v - } + if s.lruAttributes.len() > 0 { + sd.Attributes = s.lruAttributesToAttributeMap() + sd.DroppedAttributeCount = s.lruAttributes.droppedCount + } + if len(s.annotations.queue) > 0 { + sd.Annotations = s.interfaceArrayToAnnotationArray() + sd.DroppedAnnotationCount = s.annotations.droppedCount + } + if len(s.messageEvents.queue) > 0 { + sd.MessageEvents = s.interfaceArrayToMessageEventArray() + sd.DroppedMessageEventCount = s.messageEvents.droppedCount + } + if len(s.links.queue) > 0 { + sd.Links = s.interfaceArrayToLinksArray() + sd.DroppedLinkCount = s.links.droppedCount } s.mu.Unlock() return &sd @@ -310,6 +324,16 @@ func (s *Span) SpanContext() SpanContext { return s.spanContext } +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Name = name + s.mu.Unlock() +} + // SetStatus sets the status of the span, if it is recording events. func (s *Span) SetStatus(status Status) { if !s.IsRecordingEvents() { @@ -320,6 +344,57 @@ func (s *Span) SetStatus(status Status) { s.mu.Unlock() } +func (s *Span) interfaceArrayToLinksArray() []Link { + linksArr := make([]Link, 0) + for _, value := range s.links.queue { + linksArr = append(linksArr, value.(Link)) + } + return linksArr +} + +func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { + messageEventArr := make([]MessageEvent, 0) + for _, value := range s.messageEvents.queue { + messageEventArr = append(messageEventArr, value.(MessageEvent)) + } + return messageEventArr +} + +func (s *Span) interfaceArrayToAnnotationArray() []Annotation { + annotationArr := make([]Annotation, 0) + for _, value := range s.annotations.queue { + annotationArr = append(annotationArr, value.(Annotation)) + } + return annotationArr +} + +func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { + attributes := make(map[string]interface{}) + for _, key := range s.lruAttributes.keys() { + value, ok := s.lruAttributes.get(key) + if ok { + keyStr := key.(string) + attributes[keyStr] = value + } + } + return attributes +} + +func (s *Span) copyToCappedAttributes(attributes []Attribute) { + for _, a := range attributes { + s.lruAttributes.add(a.key, a.value) + } +} + +func (s *Span) addChild() { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.ChildSpanCount++ + s.mu.Unlock() +} + // AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. @@ -328,10 +403,7 @@ func (s *Span) AddAttributes(attributes ...Attribute) { return } s.mu.Lock() - if s.data.Attributes == nil { - s.data.Attributes = make(map[string]interface{}) - } - copyAttributes(s.data.Attributes, attributes) + s.copyToCappedAttributes(attributes) s.mu.Unlock() } @@ -351,7 +423,7 @@ func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...in m = make(map[string]interface{}) copyAttributes(m, attributes) } - s.data.Annotations = append(s.data.Annotations, Annotation{ + s.annotations.add(Annotation{ Time: now, Message: msg, Attributes: m, @@ -367,7 +439,7 @@ func (s *Span) printStringInternal(attributes []Attribute, str string) { a = make(map[string]interface{}) copyAttributes(a, attributes) } - s.data.Annotations = append(s.data.Annotations, Annotation{ + s.annotations.add(Annotation{ Time: now, Message: str, Attributes: a, @@ -404,7 +476,7 @@ func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedBy } now := time.Now() s.mu.Lock() - s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ + s.messageEvents.add(MessageEvent{ Time: now, EventType: MessageEventTypeSent, MessageID: messageID, @@ -426,7 +498,7 @@ func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compresse } now := time.Now() s.mu.Lock() - s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ + s.messageEvents.add(MessageEvent{ Time: now, EventType: MessageEventTypeRecv, MessageID: messageID, @@ -442,7 +514,7 @@ func (s *Span) AddLink(l Link) { return } s.mu.Lock() - s.data.Links = append(s.data.Links, l) + s.links.add(l) s.mu.Unlock() } @@ -474,29 +546,39 @@ func init() { gen.spanIDInc |= 1 config.Store(&Config{ - DefaultSampler: ProbabilitySampler(defaultSamplingProbability), - IDGenerator: gen, + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: gen, + MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, + MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, + MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, + MaxLinksPerSpan: DefaultMaxLinksPerSpan, }) } type defaultIDGenerator struct { sync.Mutex - traceIDRand *rand.Rand + + // Please keep these as the first fields + // so that these 8 byte fields will be aligned on addresses + // divisible by 8, on both 32-bit and 64-bit machines when + // performing atomic increments and accesses. + // See: + // * https://github.com/census-instrumentation/opencensus-go/issues/587 + // * https://github.com/census-instrumentation/opencensus-go/issues/865 + // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG + nextSpanID uint64 + spanIDInc uint64 + traceIDAdd [2]uint64 - nextSpanID uint64 - spanIDInc uint64 + traceIDRand *rand.Rand } // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -// mu should be held while this function is called. func (gen *defaultIDGenerator) NewSpanID() [8]byte { - gen.Lock() - id := gen.nextSpanID - gen.nextSpanID += gen.spanIDInc - if gen.nextSpanID == 0 { - gen.nextSpanID += gen.spanIDInc + var id uint64 + for id == 0 { + id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) } - gen.Unlock() var sid [8]byte binary.LittleEndian.PutUint64(sid[:], id) return sid diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go new file mode 100644 index 0000000000..2d6c713eb3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/tracestate/tracestate.go @@ -0,0 +1,147 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracestate implements support for the Tracestate header of the +// W3C TraceContext propagation format. +package tracestate + +import ( + "fmt" + "regexp" +) + +const ( + keyMaxSize = 256 + valueMaxSize = 256 + maxKeyValuePairs = 32 +) + +const ( + keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` +) + +var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) +var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) + +// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different +// vendors propagate additional information and inter-operate with their legacy Id formats. +type Tracestate struct { + entries []Entry +} + +// Entry represents one key-value pair in a list of key-value pair of Tracestate. +type Entry struct { + // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, + // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and + // forward slashes /. + Key string + + // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the + // range 0x20 to 0x7E) except comma , and =. + Value string +} + +// Entries returns a slice of Entry. +func (ts *Tracestate) Entries() []Entry { + if ts == nil { + return nil + } + return ts.entries +} + +func (ts *Tracestate) remove(key string) *Entry { + for index, entry := range ts.entries { + if entry.Key == key { + ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) + return &entry + } + } + return nil +} + +func (ts *Tracestate) add(entries []Entry) error { + for _, entry := range entries { + ts.remove(entry.Key) + } + if len(ts.entries)+len(entries) > maxKeyValuePairs { + return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", + len(entries), len(ts.entries), maxKeyValuePairs) + } + ts.entries = append(entries, ts.entries...) + return nil +} + +func isValid(entry Entry) bool { + return keyValidationRegExp.MatchString(entry.Key) && + valueValidationRegExp.MatchString(entry.Value) +} + +func containsDuplicateKey(entries ...Entry) (string, bool) { + keyMap := make(map[string]int) + for _, entry := range entries { + if _, ok := keyMap[entry.Key]; ok { + return entry.Key, true + } + keyMap[entry.Key] = 1 + } + return "", false +} + +func areEntriesValid(entries ...Entry) (*Entry, bool) { + for _, entry := range entries { + if !isValid(entry) { + return &entry, false + } + } + return nil, true +} + +// New creates a Tracestate object from a parent and/or entries (key-value pair). +// Entries from the parent are copied if present. The entries passed to this function +// are inserted in front of those copied from the parent. If an entry copied from the +// parent contains the same key as one of the entry in entries then the entry copied +// from the parent is removed. See add func. +// +// An error is returned with nil Tracestate if +// 1. one or more entry in entries is invalid. +// 2. two or more entries in the input entries have the same key. +// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. +// (duplicate entry is counted only once). +func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { + if parent == nil && len(entries) == 0 { + return nil, nil + } + if entry, ok := areEntriesValid(entries...); !ok { + return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) + } + + if key, duplicate := containsDuplicateKey(entries...); duplicate { + return nil, fmt.Errorf("contains duplicate keys (%s)", key) + } + + tracestate := Tracestate{} + + if parent != nil && len(parent.entries) > 0 { + tracestate.entries = append([]Entry{}, parent.entries...) + } + + err := tracestate.add(entries) + if err != nil { + return nil, err + } + return &tracestate, nil +} diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/README.md b/vendor/golang.org/x/tools/README.md new file mode 100644 index 0000000000..20be9e1abd --- /dev/null +++ b/vendor/golang.org/x/tools/README.md @@ -0,0 +1,27 @@ +# Go Tools + +This subrepository holds the source for various packages and tools that support +the Go programming language. + +Some of the tools, `godoc` and `vet` for example, are included in binary Go +distributions. + +Others, including the Go `guru` and the test coverage tool, can be fetched with +`go get`. + +Packages include a type-checker for Go and an implementation of the +Static Single Assignment form (SSA) representation for Go programs. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/tools/...`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/tools`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the tools repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/tools/(your +subdir):" in the subject line, so it is easy to find. diff --git a/vendor/golang.org/x/tools/container/intsets/popcnt_amd64.go b/vendor/golang.org/x/tools/container/intsets/popcnt_amd64.go new file mode 100644 index 0000000000..99ea813d28 --- /dev/null +++ b/vendor/golang.org/x/tools/container/intsets/popcnt_amd64.go @@ -0,0 +1,20 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +package intsets + +func popcnt(x word) int +func havePOPCNT() bool + +var hasPOPCNT = havePOPCNT() + +// popcount returns the population count (number of set bits) of x. +func popcount(x word) int { + if hasPOPCNT { + return popcnt(x) + } + return popcountTable(x) // faster than Hacker's Delight +} diff --git a/vendor/golang.org/x/tools/container/intsets/popcnt_amd64.s b/vendor/golang.org/x/tools/container/intsets/popcnt_amd64.s new file mode 100644 index 0000000000..05c3d6fb57 --- /dev/null +++ b/vendor/golang.org/x/tools/container/intsets/popcnt_amd64.s @@ -0,0 +1,30 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +#include "textflag.h" + +// func havePOPCNT() bool +TEXT ยทhavePOPCNT(SB),4,$0 + MOVQ $1, AX + CPUID + SHRQ $23, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// func popcnt(word) int +TEXT ยทpopcnt(SB),NOSPLIT,$0-8 + XORQ AX, AX + MOVQ x+0(FP), SI + // POPCNT (SI), AX is not recognized by Go assembler, + // so we assemble it ourselves. + BYTE $0xf3 + BYTE $0x48 + BYTE $0x0f + BYTE $0xb8 + BYTE $0xc6 + MOVQ AX, ret+8(FP) + RET diff --git a/vendor/golang.org/x/tools/container/intsets/popcnt_gccgo.go b/vendor/golang.org/x/tools/container/intsets/popcnt_gccgo.go new file mode 100644 index 0000000000..82a8875c85 --- /dev/null +++ b/vendor/golang.org/x/tools/container/intsets/popcnt_gccgo.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package intsets + +func popcount(x word) int diff --git a/vendor/golang.org/x/tools/container/intsets/popcnt_gccgo_c.c b/vendor/golang.org/x/tools/container/intsets/popcnt_gccgo_c.c new file mode 100644 index 0000000000..08abb32ec4 --- /dev/null +++ b/vendor/golang.org/x/tools/container/intsets/popcnt_gccgo_c.c @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +#include +#include +#include + +#define _STRINGIFY2_(x) #x +#define _STRINGIFY_(x) _STRINGIFY2_(x) +#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__) + +extern intptr_t popcount(uintptr_t x) __asm__(GOSYM_PREFIX GOPKGPATH ".popcount"); + +intptr_t popcount(uintptr_t x) { + return __builtin_popcountl((unsigned long)(x)); +} diff --git a/vendor/golang.org/x/tools/container/intsets/popcnt_generic.go b/vendor/golang.org/x/tools/container/intsets/popcnt_generic.go new file mode 100644 index 0000000000..3985a1da1a --- /dev/null +++ b/vendor/golang.org/x/tools/container/intsets/popcnt_generic.go @@ -0,0 +1,33 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine +// +build !gccgo + +package intsets + +import "runtime" + +// We compared three algorithms---Hacker's Delight, table lookup, +// and AMD64's SSE4.1 hardware POPCNT---on a 2.67GHz Xeon X5550. +// +// % GOARCH=amd64 go test -run=NONE -bench=Popcount +// POPCNT 5.12 ns/op +// Table 8.53 ns/op +// HackersDelight 9.96 ns/op +// +// % GOARCH=386 go test -run=NONE -bench=Popcount +// Table 10.4 ns/op +// HackersDelight 5.23 ns/op +// +// (AMD64's ABM1 hardware supports ntz and nlz too, +// but they aren't critical.) + +// popcount returns the population count (number of set bits) of x. +func popcount(x word) int { + if runtime.GOARCH == "386" { + return popcountHD(uint32(x)) + } + return popcountTable(x) +} diff --git a/vendor/golang.org/x/tools/container/intsets/sparse.go b/vendor/golang.org/x/tools/container/intsets/sparse.go new file mode 100644 index 0000000000..5db01c1a44 --- /dev/null +++ b/vendor/golang.org/x/tools/container/intsets/sparse.go @@ -0,0 +1,1091 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package intsets provides Sparse, a compact and fast representation +// for sparse sets of int values. +// +// The time complexity of the operations Len, Insert, Remove and Has +// is in O(n) but in practice those methods are faster and more +// space-efficient than equivalent operations on sets based on the Go +// map type. The IsEmpty, Min, Max, Clear and TakeMin operations +// require constant time. +// +package intsets // import "golang.org/x/tools/container/intsets" + +// TODO(adonovan): +// - Add InsertAll(...int), RemoveAll(...int) +// - Add 'bool changed' results for {Intersection,Difference}With too. +// +// TODO(adonovan): implement Dense, a dense bit vector with a similar API. +// The space usage would be proportional to Max(), not Len(), and the +// implementation would be based upon big.Int. +// +// TODO(adonovan): opt: make UnionWith and Difference faster. +// These are the hot-spots for go/pointer. + +import ( + "bytes" + "fmt" +) + +// A Sparse is a set of int values. +// Sparse operations (even queries) are not concurrency-safe. +// +// The zero value for Sparse is a valid empty set. +// +// Sparse sets must be copied using the Copy method, not by assigning +// a Sparse value. +// +type Sparse struct { + // An uninitialized Sparse represents an empty set. + // An empty set may also be represented by + // root.next == root.prev == &root. + // + // The root is always the block with the smallest offset. + // It can be empty, but only if it is the only block; in that case, offset is + // MaxInt (which is not a valid offset). + root block +} + +type word uintptr + +const ( + _m = ^word(0) + bitsPerWord = 8 << (_m>>8&1 + _m>>16&1 + _m>>32&1) + bitsPerBlock = 256 // optimal value for go/pointer solver performance + wordsPerBlock = bitsPerBlock / bitsPerWord +) + +// Limit values of implementation-specific int type. +const ( + MaxInt = int(^uint(0) >> 1) + MinInt = -MaxInt - 1 +) + +// -- block ------------------------------------------------------------ + +// A set is represented as a circular doubly-linked list of blocks, +// each containing an offset and a bit array of fixed size +// bitsPerBlock; the blocks are ordered by increasing offset. +// +// The set contains an element x iff the block whose offset is x - (x +// mod bitsPerBlock) has the bit (x mod bitsPerBlock) set, where mod +// is the Euclidean remainder. +// +// A block may only be empty transiently. +// +type block struct { + offset int // offset mod bitsPerBlock == 0 + bits [wordsPerBlock]word // contains at least one set bit + next, prev *block // doubly-linked list of blocks +} + +// wordMask returns the word index (in block.bits) +// and single-bit mask for the block's ith bit. +func wordMask(i uint) (w uint, mask word) { + w = i / bitsPerWord + mask = 1 << (i % bitsPerWord) + return +} + +// insert sets the block b's ith bit and +// returns true if it was not already set. +// +func (b *block) insert(i uint) bool { + w, mask := wordMask(i) + if b.bits[w]&mask == 0 { + b.bits[w] |= mask + return true + } + return false +} + +// remove clears the block's ith bit and +// returns true if the bit was previously set. +// NB: may leave the block empty. +// +func (b *block) remove(i uint) bool { + w, mask := wordMask(i) + if b.bits[w]&mask != 0 { + b.bits[w] &^= mask + return true + } + return false +} + +// has reports whether the block's ith bit is set. +func (b *block) has(i uint) bool { + w, mask := wordMask(i) + return b.bits[w]&mask != 0 +} + +// empty reports whether b.len()==0, but more efficiently. +func (b *block) empty() bool { + for _, w := range b.bits { + if w != 0 { + return false + } + } + return true +} + +// len returns the number of set bits in block b. +func (b *block) len() int { + var l int + for _, w := range b.bits { + l += popcount(w) + } + return l +} + +// max returns the maximum element of the block. +// The block must not be empty. +func (b *block) max() int { + bi := b.offset + bitsPerBlock + // Decrement bi by number of high zeros in last.bits. + for i := len(b.bits) - 1; i >= 0; i-- { + if w := b.bits[i]; w != 0 { + return bi - nlz(w) - 1 + } + bi -= bitsPerWord + } + panic("BUG: empty block") +} + +// min returns the minimum element of the block, +// and also removes it if take is set. +// The block must not be initially empty. +// NB: may leave the block empty. +func (b *block) min(take bool) int { + for i, w := range b.bits { + if w != 0 { + tz := ntz(w) + if take { + b.bits[i] = w &^ (1 << uint(tz)) + } + return b.offset + int(i*bitsPerWord) + tz + } + } + panic("BUG: empty block") +} + +// lowerBound returns the smallest element of the block that is greater than or +// equal to the element corresponding to the ith bit. If there is no such +// element, the second return value is false. +func (b *block) lowerBound(i uint) (int, bool) { + w := i / bitsPerWord + bit := i % bitsPerWord + + if val := b.bits[w] >> bit; val != 0 { + return b.offset + int(i) + ntz(val), true + } + + for w++; w < wordsPerBlock; w++ { + if val := b.bits[w]; val != 0 { + return b.offset + int(w*bitsPerWord) + ntz(val), true + } + } + + return 0, false +} + +// forEach calls f for each element of block b. +// f must not mutate b's enclosing Sparse. +func (b *block) forEach(f func(int)) { + for i, w := range b.bits { + offset := b.offset + i*bitsPerWord + for bi := 0; w != 0 && bi < bitsPerWord; bi++ { + if w&1 != 0 { + f(offset) + } + offset++ + w >>= 1 + } + } +} + +// offsetAndBitIndex returns the offset of the block that would +// contain x and the bit index of x within that block. +// +func offsetAndBitIndex(x int) (int, uint) { + mod := x % bitsPerBlock + if mod < 0 { + // Euclidean (non-negative) remainder + mod += bitsPerBlock + } + return x - mod, uint(mod) +} + +// -- Sparse -------------------------------------------------------------- + +// none is a shared, empty, sentinel block that indicates the end of a block +// list. +var none block + +// Dummy type used to generate an implicit panic. This must be defined at the +// package level; if it is defined inside a function, it prevents the inlining +// of that function. +type to_copy_a_sparse_you_must_call_its_Copy_method struct{} + +// init ensures s is properly initialized. +func (s *Sparse) init() { + root := &s.root + if root.next == nil { + root.offset = MaxInt + root.next = root + root.prev = root + } else if root.next.prev != root { + // Copying a Sparse x leads to pernicious corruption: the + // new Sparse y shares the old linked list, but iteration + // on y will never encounter &y.root so it goes into a + // loop. Fail fast before this occurs. + // We don't want to call panic here because it prevents the + // inlining of this function. + _ = (interface{}(nil)).(to_copy_a_sparse_you_must_call_its_Copy_method) + } +} + +func (s *Sparse) first() *block { + s.init() + if s.root.offset == MaxInt { + return &none + } + return &s.root +} + +// next returns the next block in the list, or end if b is the last block. +func (s *Sparse) next(b *block) *block { + if b.next == &s.root { + return &none + } + return b.next +} + +// prev returns the previous block in the list, or end if b is the first block. +func (s *Sparse) prev(b *block) *block { + if b.prev == &s.root { + return &none + } + return b.prev +} + +// IsEmpty reports whether the set s is empty. +func (s *Sparse) IsEmpty() bool { + return s.root.next == nil || s.root.offset == MaxInt +} + +// Len returns the number of elements in the set s. +func (s *Sparse) Len() int { + var l int + for b := s.first(); b != &none; b = s.next(b) { + l += b.len() + } + return l +} + +// Max returns the maximum element of the set s, or MinInt if s is empty. +func (s *Sparse) Max() int { + if s.IsEmpty() { + return MinInt + } + return s.root.prev.max() +} + +// Min returns the minimum element of the set s, or MaxInt if s is empty. +func (s *Sparse) Min() int { + if s.IsEmpty() { + return MaxInt + } + return s.root.min(false) +} + +// LowerBound returns the smallest element >= x, or MaxInt if there is no such +// element. +func (s *Sparse) LowerBound(x int) int { + offset, i := offsetAndBitIndex(x) + for b := s.first(); b != &none; b = s.next(b) { + if b.offset > offset { + return b.min(false) + } + if b.offset == offset { + if y, ok := b.lowerBound(i); ok { + return y + } + } + } + return MaxInt +} + +// block returns the block that would contain offset, +// or nil if s contains no such block. +// Precondition: offset is a multiple of bitsPerBlock. +func (s *Sparse) block(offset int) *block { + for b := s.first(); b != &none && b.offset <= offset; b = s.next(b) { + if b.offset == offset { + return b + } + } + return nil +} + +// Insert adds x to the set s, and reports whether the set grew. +func (s *Sparse) Insert(x int) bool { + offset, i := offsetAndBitIndex(x) + + b := s.first() + for ; b != &none && b.offset <= offset; b = s.next(b) { + if b.offset == offset { + return b.insert(i) + } + } + + // Insert new block before b. + new := s.insertBlockBefore(b) + new.offset = offset + return new.insert(i) +} + +// removeBlock removes a block and returns the block that followed it (or end if +// it was the last block). +func (s *Sparse) removeBlock(b *block) *block { + if b != &s.root { + b.prev.next = b.next + b.next.prev = b.prev + if b.next == &s.root { + return &none + } + return b.next + } + + first := s.root.next + if first == &s.root { + // This was the only block. + s.Clear() + return &none + } + s.root.offset = first.offset + s.root.bits = first.bits + if first.next == &s.root { + // Single block remaining. + s.root.next = &s.root + s.root.prev = &s.root + } else { + s.root.next = first.next + first.next.prev = &s.root + } + return &s.root +} + +// Remove removes x from the set s, and reports whether the set shrank. +func (s *Sparse) Remove(x int) bool { + offset, i := offsetAndBitIndex(x) + if b := s.block(offset); b != nil { + if !b.remove(i) { + return false + } + if b.empty() { + s.removeBlock(b) + } + return true + } + return false +} + +// Clear removes all elements from the set s. +func (s *Sparse) Clear() { + s.root = block{ + offset: MaxInt, + next: &s.root, + prev: &s.root, + } +} + +// If set s is non-empty, TakeMin sets *p to the minimum element of +// the set s, removes that element from the set and returns true. +// Otherwise, it returns false and *p is undefined. +// +// This method may be used for iteration over a worklist like so: +// +// var x int +// for worklist.TakeMin(&x) { use(x) } +// +func (s *Sparse) TakeMin(p *int) bool { + if s.IsEmpty() { + return false + } + *p = s.root.min(true) + if s.root.empty() { + s.removeBlock(&s.root) + } + return true +} + +// Has reports whether x is an element of the set s. +func (s *Sparse) Has(x int) bool { + offset, i := offsetAndBitIndex(x) + if b := s.block(offset); b != nil { + return b.has(i) + } + return false +} + +// forEach applies function f to each element of the set s in order. +// +// f must not mutate s. Consequently, forEach is not safe to expose +// to clients. In any case, using "range s.AppendTo()" allows more +// natural control flow with continue/break/return. +// +func (s *Sparse) forEach(f func(int)) { + for b := s.first(); b != &none; b = s.next(b) { + b.forEach(f) + } +} + +// Copy sets s to the value of x. +func (s *Sparse) Copy(x *Sparse) { + if s == x { + return + } + + xb := x.first() + sb := s.first() + for xb != &none { + if sb == &none { + sb = s.insertBlockBefore(sb) + } + sb.offset = xb.offset + sb.bits = xb.bits + xb = x.next(xb) + sb = s.next(sb) + } + s.discardTail(sb) +} + +// insertBlockBefore returns a new block, inserting it before next. +// If next is the root, the root is replaced. If next is end, the block is +// inserted at the end. +func (s *Sparse) insertBlockBefore(next *block) *block { + if s.IsEmpty() { + if next != &none { + panic("BUG: passed block with empty set") + } + return &s.root + } + + if next == &s.root { + // Special case: we need to create a new block that will become the root + // block.The old root block becomes the second block. + second := s.root + s.root = block{ + next: &second, + } + if second.next == &s.root { + s.root.prev = &second + } else { + s.root.prev = second.prev + second.next.prev = &second + second.prev = &s.root + } + return &s.root + } + if next == &none { + // Insert before root. + next = &s.root + } + b := new(block) + b.next = next + b.prev = next.prev + b.prev.next = b + next.prev = b + return b +} + +// discardTail removes block b and all its successors from s. +func (s *Sparse) discardTail(b *block) { + if b != &none { + if b == &s.root { + s.Clear() + } else { + b.prev.next = &s.root + s.root.prev = b.prev + } + } +} + +// IntersectionWith sets s to the intersection s โˆฉ x. +func (s *Sparse) IntersectionWith(x *Sparse) { + if s == x { + return + } + + xb := x.first() + sb := s.first() + for xb != &none && sb != &none { + switch { + case xb.offset < sb.offset: + xb = x.next(xb) + + case xb.offset > sb.offset: + sb = s.removeBlock(sb) + + default: + var sum word + for i := range sb.bits { + r := xb.bits[i] & sb.bits[i] + sb.bits[i] = r + sum |= r + } + if sum != 0 { + sb = s.next(sb) + } else { + // sb will be overwritten or removed + } + + xb = x.next(xb) + } + } + + s.discardTail(sb) +} + +// Intersection sets s to the intersection x โˆฉ y. +func (s *Sparse) Intersection(x, y *Sparse) { + switch { + case s == x: + s.IntersectionWith(y) + return + case s == y: + s.IntersectionWith(x) + return + case x == y: + s.Copy(x) + return + } + + xb := x.first() + yb := y.first() + sb := s.first() + for xb != &none && yb != &none { + switch { + case xb.offset < yb.offset: + xb = x.next(xb) + continue + case xb.offset > yb.offset: + yb = y.next(yb) + continue + } + + if sb == &none { + sb = s.insertBlockBefore(sb) + } + sb.offset = xb.offset + + var sum word + for i := range sb.bits { + r := xb.bits[i] & yb.bits[i] + sb.bits[i] = r + sum |= r + } + if sum != 0 { + sb = s.next(sb) + } else { + // sb will be overwritten or removed + } + + xb = x.next(xb) + yb = y.next(yb) + } + + s.discardTail(sb) +} + +// Intersects reports whether s โˆฉ x โ‰  โˆ…. +func (s *Sparse) Intersects(x *Sparse) bool { + sb := s.first() + xb := x.first() + for sb != &none && xb != &none { + switch { + case xb.offset < sb.offset: + xb = x.next(xb) + case xb.offset > sb.offset: + sb = s.next(sb) + default: + for i := range sb.bits { + if sb.bits[i]&xb.bits[i] != 0 { + return true + } + } + sb = s.next(sb) + xb = x.next(xb) + } + } + return false +} + +// UnionWith sets s to the union s โˆช x, and reports whether s grew. +func (s *Sparse) UnionWith(x *Sparse) bool { + if s == x { + return false + } + + var changed bool + xb := x.first() + sb := s.first() + for xb != &none { + if sb != &none && sb.offset == xb.offset { + for i := range xb.bits { + if sb.bits[i] != xb.bits[i] { + sb.bits[i] |= xb.bits[i] + changed = true + } + } + xb = x.next(xb) + } else if sb == &none || sb.offset > xb.offset { + sb = s.insertBlockBefore(sb) + sb.offset = xb.offset + sb.bits = xb.bits + changed = true + + xb = x.next(xb) + } + sb = s.next(sb) + } + return changed +} + +// Union sets s to the union x โˆช y. +func (s *Sparse) Union(x, y *Sparse) { + switch { + case x == y: + s.Copy(x) + return + case s == x: + s.UnionWith(y) + return + case s == y: + s.UnionWith(x) + return + } + + xb := x.first() + yb := y.first() + sb := s.first() + for xb != &none || yb != &none { + if sb == &none { + sb = s.insertBlockBefore(sb) + } + switch { + case yb == &none || (xb != &none && xb.offset < yb.offset): + sb.offset = xb.offset + sb.bits = xb.bits + xb = x.next(xb) + + case xb == &none || (yb != &none && yb.offset < xb.offset): + sb.offset = yb.offset + sb.bits = yb.bits + yb = y.next(yb) + + default: + sb.offset = xb.offset + for i := range xb.bits { + sb.bits[i] = xb.bits[i] | yb.bits[i] + } + xb = x.next(xb) + yb = y.next(yb) + } + sb = s.next(sb) + } + + s.discardTail(sb) +} + +// DifferenceWith sets s to the difference s โˆ– x. +func (s *Sparse) DifferenceWith(x *Sparse) { + if s == x { + s.Clear() + return + } + + xb := x.first() + sb := s.first() + for xb != &none && sb != &none { + switch { + case xb.offset > sb.offset: + sb = s.next(sb) + + case xb.offset < sb.offset: + xb = x.next(xb) + + default: + var sum word + for i := range sb.bits { + r := sb.bits[i] & ^xb.bits[i] + sb.bits[i] = r + sum |= r + } + if sum == 0 { + sb = s.removeBlock(sb) + } else { + sb = s.next(sb) + } + xb = x.next(xb) + } + } +} + +// Difference sets s to the difference x โˆ– y. +func (s *Sparse) Difference(x, y *Sparse) { + switch { + case x == y: + s.Clear() + return + case s == x: + s.DifferenceWith(y) + return + case s == y: + var y2 Sparse + y2.Copy(y) + s.Difference(x, &y2) + return + } + + xb := x.first() + yb := y.first() + sb := s.first() + for xb != &none && yb != &none { + if xb.offset > yb.offset { + // y has block, x has &none + yb = y.next(yb) + continue + } + + if sb == &none { + sb = s.insertBlockBefore(sb) + } + sb.offset = xb.offset + + switch { + case xb.offset < yb.offset: + // x has block, y has &none + sb.bits = xb.bits + + sb = s.next(sb) + + default: + // x and y have corresponding blocks + var sum word + for i := range sb.bits { + r := xb.bits[i] & ^yb.bits[i] + sb.bits[i] = r + sum |= r + } + if sum != 0 { + sb = s.next(sb) + } else { + // sb will be overwritten or removed + } + + yb = y.next(yb) + } + xb = x.next(xb) + } + + for xb != &none { + if sb == &none { + sb = s.insertBlockBefore(sb) + } + sb.offset = xb.offset + sb.bits = xb.bits + sb = s.next(sb) + + xb = x.next(xb) + } + + s.discardTail(sb) +} + +// SymmetricDifferenceWith sets s to the symmetric difference s โˆ† x. +func (s *Sparse) SymmetricDifferenceWith(x *Sparse) { + if s == x { + s.Clear() + return + } + + sb := s.first() + xb := x.first() + for xb != &none && sb != &none { + switch { + case sb.offset < xb.offset: + sb = s.next(sb) + case xb.offset < sb.offset: + nb := s.insertBlockBefore(sb) + nb.offset = xb.offset + nb.bits = xb.bits + xb = x.next(xb) + default: + var sum word + for i := range sb.bits { + r := sb.bits[i] ^ xb.bits[i] + sb.bits[i] = r + sum |= r + } + if sum == 0 { + sb = s.removeBlock(sb) + } else { + sb = s.next(sb) + } + xb = x.next(xb) + } + } + + for xb != &none { // append the tail of x to s + sb = s.insertBlockBefore(sb) + sb.offset = xb.offset + sb.bits = xb.bits + sb = s.next(sb) + xb = x.next(xb) + } +} + +// SymmetricDifference sets s to the symmetric difference x โˆ† y. +func (s *Sparse) SymmetricDifference(x, y *Sparse) { + switch { + case x == y: + s.Clear() + return + case s == x: + s.SymmetricDifferenceWith(y) + return + case s == y: + s.SymmetricDifferenceWith(x) + return + } + + sb := s.first() + xb := x.first() + yb := y.first() + for xb != &none && yb != &none { + if sb == &none { + sb = s.insertBlockBefore(sb) + } + switch { + case yb.offset < xb.offset: + sb.offset = yb.offset + sb.bits = yb.bits + sb = s.next(sb) + yb = y.next(yb) + case xb.offset < yb.offset: + sb.offset = xb.offset + sb.bits = xb.bits + sb = s.next(sb) + xb = x.next(xb) + default: + var sum word + for i := range sb.bits { + r := xb.bits[i] ^ yb.bits[i] + sb.bits[i] = r + sum |= r + } + if sum != 0 { + sb.offset = xb.offset + sb = s.next(sb) + } + xb = x.next(xb) + yb = y.next(yb) + } + } + + for xb != &none { // append the tail of x to s + if sb == &none { + sb = s.insertBlockBefore(sb) + } + sb.offset = xb.offset + sb.bits = xb.bits + sb = s.next(sb) + xb = x.next(xb) + } + + for yb != &none { // append the tail of y to s + if sb == &none { + sb = s.insertBlockBefore(sb) + } + sb.offset = yb.offset + sb.bits = yb.bits + sb = s.next(sb) + yb = y.next(yb) + } + + s.discardTail(sb) +} + +// SubsetOf reports whether s โˆ– x = โˆ…. +func (s *Sparse) SubsetOf(x *Sparse) bool { + if s == x { + return true + } + + sb := s.first() + xb := x.first() + for sb != &none { + switch { + case xb == &none || xb.offset > sb.offset: + return false + case xb.offset < sb.offset: + xb = x.next(xb) + default: + for i := range sb.bits { + if sb.bits[i]&^xb.bits[i] != 0 { + return false + } + } + sb = s.next(sb) + xb = x.next(xb) + } + } + return true +} + +// Equals reports whether the sets s and t have the same elements. +func (s *Sparse) Equals(t *Sparse) bool { + if s == t { + return true + } + sb := s.first() + tb := t.first() + for { + switch { + case sb == &none && tb == &none: + return true + case sb == &none || tb == &none: + return false + case sb.offset != tb.offset: + return false + case sb.bits != tb.bits: + return false + } + + sb = s.next(sb) + tb = t.next(tb) + } +} + +// String returns a human-readable description of the set s. +func (s *Sparse) String() string { + var buf bytes.Buffer + buf.WriteByte('{') + s.forEach(func(x int) { + if buf.Len() > 1 { + buf.WriteByte(' ') + } + fmt.Fprintf(&buf, "%d", x) + }) + buf.WriteByte('}') + return buf.String() +} + +// BitString returns the set as a string of 1s and 0s denoting the sum +// of the i'th powers of 2, for each i in s. A radix point, always +// preceded by a digit, appears if the sum is non-integral. +// +// Examples: +// {}.BitString() = "0" +// {4,5}.BitString() = "110000" +// {-3}.BitString() = "0.001" +// {-3,0,4,5}.BitString() = "110001.001" +// +func (s *Sparse) BitString() string { + if s.IsEmpty() { + return "0" + } + + min, max := s.Min(), s.Max() + var nbytes int + if max > 0 { + nbytes = max + } + nbytes++ // zero bit + radix := nbytes + if min < 0 { + nbytes += len(".") - min + } + + b := make([]byte, nbytes) + for i := range b { + b[i] = '0' + } + if radix < nbytes { + b[radix] = '.' + } + s.forEach(func(x int) { + if x >= 0 { + x += len(".") + } + b[radix-x] = '1' + }) + return string(b) +} + +// GoString returns a string showing the internal representation of +// the set s. +// +func (s *Sparse) GoString() string { + var buf bytes.Buffer + for b := s.first(); b != &none; b = s.next(b) { + fmt.Fprintf(&buf, "block %p {offset=%d next=%p prev=%p", + b, b.offset, b.next, b.prev) + for _, w := range b.bits { + fmt.Fprintf(&buf, " 0%016x", w) + } + fmt.Fprintf(&buf, "}\n") + } + return buf.String() +} + +// AppendTo returns the result of appending the elements of s to slice +// in order. +func (s *Sparse) AppendTo(slice []int) []int { + s.forEach(func(x int) { + slice = append(slice, x) + }) + return slice +} + +// -- Testing/debugging ------------------------------------------------ + +// check returns an error if the representation invariants of s are violated. +func (s *Sparse) check() error { + s.init() + if s.root.empty() { + // An empty set must have only the root block with offset MaxInt. + if s.root.next != &s.root { + return fmt.Errorf("multiple blocks with empty root block") + } + if s.root.offset != MaxInt { + return fmt.Errorf("empty set has offset %d, should be MaxInt", s.root.offset) + } + return nil + } + for b := s.first(); ; b = s.next(b) { + if b.offset%bitsPerBlock != 0 { + return fmt.Errorf("bad offset modulo: %d", b.offset) + } + if b.empty() { + return fmt.Errorf("empty block") + } + if b.prev.next != b { + return fmt.Errorf("bad prev.next link") + } + if b.next.prev != b { + return fmt.Errorf("bad next.prev link") + } + if b.next == &s.root { + break + } + if b.offset >= b.next.offset { + return fmt.Errorf("bad offset order: b.offset=%d, b.next.offset=%d", + b.offset, b.next.offset) + } + } + return nil +} diff --git a/vendor/golang.org/x/tools/container/intsets/util.go b/vendor/golang.org/x/tools/container/intsets/util.go new file mode 100644 index 0000000000..dd1db86b1c --- /dev/null +++ b/vendor/golang.org/x/tools/container/intsets/util.go @@ -0,0 +1,84 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package intsets + +// From Hacker's Delight, fig 5.2. +func popcountHD(x uint32) int { + x -= (x >> 1) & 0x55555555 + x = (x & 0x33333333) + ((x >> 2) & 0x33333333) + x = (x + (x >> 4)) & 0x0f0f0f0f + x = x + (x >> 8) + x = x + (x >> 16) + return int(x & 0x0000003f) +} + +var a [1 << 8]byte + +func init() { + for i := range a { + var n byte + for x := i; x != 0; x >>= 1 { + if x&1 != 0 { + n++ + } + } + a[i] = n + } +} + +func popcountTable(x word) int { + return int(a[byte(x>>(0*8))] + + a[byte(x>>(1*8))] + + a[byte(x>>(2*8))] + + a[byte(x>>(3*8))] + + a[byte(x>>(4*8))] + + a[byte(x>>(5*8))] + + a[byte(x>>(6*8))] + + a[byte(x>>(7*8))]) +} + +// nlz returns the number of leading zeros of x. +// From Hacker's Delight, fig 5.11. +func nlz(x word) int { + x |= (x >> 1) + x |= (x >> 2) + x |= (x >> 4) + x |= (x >> 8) + x |= (x >> 16) + x |= (x >> 32) + return popcount(^x) +} + +// ntz returns the number of trailing zeros of x. +// From Hacker's Delight, fig 5.13. +func ntz(x word) int { + if x == 0 { + return bitsPerWord + } + n := 1 + if bitsPerWord == 64 { + if (x & 0xffffffff) == 0 { + n = n + 32 + x = x >> 32 + } + } + if (x & 0x0000ffff) == 0 { + n = n + 16 + x = x >> 16 + } + if (x & 0x000000ff) == 0 { + n = n + 8 + x = x >> 8 + } + if (x & 0x0000000f) == 0 { + n = n + 4 + x = x >> 4 + } + if (x & 0x00000003) == 0 { + n = n + 2 + x = x >> 2 + } + return n - int(x&1) +} diff --git a/vendor/golang.org/x/tools/go.mod b/vendor/golang.org/x/tools/go.mod new file mode 100644 index 0000000000..0984a835e9 --- /dev/null +++ b/vendor/golang.org/x/tools/go.mod @@ -0,0 +1,8 @@ +module golang.org/x/tools + +go 1.11 + +require ( + golang.org/x/net v0.0.0-20190311183353-d8887717615a + golang.org/x/sync v0.0.0-20190423024810-112230192c58 +) diff --git a/vendor/google.golang.org/api/README.md b/vendor/google.golang.org/api/README.md index 9df50a7ab0..cd938c19fd 100644 --- a/vendor/google.golang.org/api/README.md +++ b/vendor/google.golang.org/api/README.md @@ -29,9 +29,9 @@ func main() { * For a longer tutorial, see the [Getting Started guide](https://github.com/google/google-api-go-client/blob/master/GettingStarted.md). * For examples, see the [examples directory](https://github.com/google/google-api-go-client/tree/master/examples). * For support, use the [golang-nuts](https://groups.google.com/group/golang-nuts) mailing list. +* The code review instance may be found [here](https://code-review.googlesource.com). ## Status -[![Build Status](https://travis-ci.org/google/google-api-go-client.png)](https://travis-ci.org/google/google-api-go-client) [![GoDoc](https://godoc.org/google.golang.org/api?status.svg)](https://godoc.org/google.golang.org/api) These are auto-generated Go libraries from the Google Discovery Service's JSON description files of the available "new style" Google APIs. @@ -43,7 +43,7 @@ These client libraries are officially supported by Google. However, the librari If you're working with Google Cloud Platform APIs such as Datastore or Pub/Sub, consider using the -[Cloud Client Libraries for Go](https://github.com/GoogleCloudPlatform/google-cloud-go) +[Cloud Client Libraries for Go](https://github.com/googleapis/google-cloud-go) instead. These are the new and idiomatic Go libraries targeted specifically at Google Cloud Platform Services. @@ -71,7 +71,7 @@ Some credentials types require you to specify scopes, and service entry points m ```go import ( - "golang.org/x/net/context" + "context" "golang.org/x/oauth2/google" "google.golang.org/api/compute/v1" ) diff --git a/vendor/google.golang.org/api/go.mod b/vendor/google.golang.org/api/go.mod new file mode 100644 index 0000000000..63abbca79d --- /dev/null +++ b/vendor/google.golang.org/api/go.mod @@ -0,0 +1,23 @@ +module google.golang.org/api + +go 1.9 + +require ( + cloud.google.com/go v0.38.0 // indirect + github.com/golang/protobuf v1.3.1 // indirect + github.com/google/go-cmp v0.3.0 + github.com/googleapis/gax-go/v2 v2.0.5 + github.com/hashicorp/golang-lru v0.5.1 // indirect + go.opencensus.io v0.21.0 + golang.org/x/lint v0.0.0-20190409202823-959b441ac422 + golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c // indirect + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + golang.org/x/sync v0.0.0-20190423024810-112230192c58 + golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b + golang.org/x/text v0.3.2 // indirect + golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c + google.golang.org/appengine v1.5.0 + google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 + google.golang.org/grpc v1.20.1 + honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a +) diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index c16b7b629b..69b8659fdd 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,28 +15,88 @@ package internal import ( + "context" + "encoding/json" "fmt" "io/ioutil" - "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" ) // Creds returns credential information obtained from DialSettings, or if none, then // it returns default credential information. -func Creds(ctx context.Context, ds *DialSettings) (*google.DefaultCredentials, error) { +func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { if ds.Credentials != nil { return ds.Credentials, nil } + if ds.CredentialsJSON != nil { + return credentialsFromJSON(ctx, ds.CredentialsJSON, ds.Endpoint, ds.Scopes, ds.Audiences) + } if ds.CredentialsFile != "" { data, err := ioutil.ReadFile(ds.CredentialsFile) if err != nil { return nil, fmt.Errorf("cannot read credentials file: %v", err) } - return google.CredentialsFromJSON(ctx, data, ds.Scopes...) + return credentialsFromJSON(ctx, data, ds.Endpoint, ds.Scopes, ds.Audiences) } if ds.TokenSource != nil { - return &google.DefaultCredentials{TokenSource: ds.TokenSource}, nil + return &google.Credentials{TokenSource: ds.TokenSource}, nil } - return google.FindDefaultCredentials(ctx, ds.Scopes...) + cred, err := google.FindDefaultCredentials(ctx, ds.Scopes...) + if err != nil { + return nil, err + } + if len(cred.JSON) > 0 { + return credentialsFromJSON(ctx, cred.JSON, ds.Endpoint, ds.Scopes, ds.Audiences) + } + // For GAE and GCE, the JSON is empty so return the default credentials directly. + return cred, nil +} + +// JSON key file type. +const ( + serviceAccountKey = "service_account" +) + +// credentialsFromJSON returns a google.Credentials based on the input. +// +// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow +// - Otherwise, returns OAuth 2.0 flow. +func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scopes []string, audiences []string) (*google.Credentials, error) { + cred, err := google.CredentialsFromJSON(ctx, data, scopes...) + if err != nil { + return nil, err + } + if len(data) > 0 && len(scopes) == 0 { + var f struct { + Type string `json:"type"` + // The rest JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(cred.JSON, &f); err != nil { + return nil, err + } + if f.Type == serviceAccountKey { + ts, err := selfSignedJWTTokenSource(data, endpoint, audiences) + if err != nil { + return nil, err + } + cred.TokenSource = ts + } + } + return cred, err +} + +func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) (oauth2.TokenSource, error) { + // Use the API endpoint as the default audience + audience := endpoint + if len(audiences) > 0 { + // TODO(shinfan): Update golang oauth to support multiple audiences. + if len(audiences) > 1 { + return nil, fmt.Errorf("multiple audiences support is not implemented") + } + audience = audiences[0] + } + return google.JWTAccessTokenSourceFromJSON(data, audience) } diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go index 4150feb6bb..a4426dcb70 100644 --- a/vendor/google.golang.org/api/internal/pool.go +++ b/vendor/google.golang.org/api/internal/pool.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ package internal import ( "errors" + "google.golang.org/grpc/naming" ) @@ -37,7 +38,7 @@ func NewPoolResolver(size int, o *DialSettings) *PoolResolver { // provided to NewPoolResolver. func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { if r.dialOpt.Endpoint == "" { - return nil, errors.New("No endpoint configured") + return nil, errors.New("no endpoint configured") } addrs := make([]*naming.Update, 0, r.poolSize) for i := 0; i < r.poolSize; i++ { @@ -54,6 +55,7 @@ func (r *PoolResolver) Next() ([]*naming.Update, error) { return <-r.ch, nil } +// Close releases resources associated with the pool and causes Next to unblock. func (r *PoolResolver) Close() { close(r.ch) } diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 34dfa5a821..062301c65f 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -30,14 +30,21 @@ type DialSettings struct { Endpoint string Scopes []string TokenSource oauth2.TokenSource - Credentials *google.DefaultCredentials + Credentials *google.Credentials CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte UserAgent string APIKey string + Audiences []string HTTPClient *http.Client GRPCDialOpts []grpc.DialOption GRPCConn *grpc.ClientConn NoAuth bool + + // Google API system parameters. For more information please read: + // https://cloud.google.com/apis/docs/system-parameters + QuotaProject string + RequestReason string } // Validate reports an error if ds is invalid. @@ -49,7 +56,27 @@ func (ds *DialSettings) Validate() error { // Credentials should not appear with other options. // We currently allow TokenSource and CredentialsFile to coexist. // TODO(jba): make TokenSource & CredentialsFile an error (breaking change). - if ds.Credentials != nil && (ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "") { + nCreds := 0 + if ds.Credentials != nil { + nCreds++ + } + if ds.CredentialsJSON != nil { + nCreds++ + } + if ds.CredentialsFile != "" { + nCreds++ + } + if ds.APIKey != "" { + nCreds++ + } + if ds.TokenSource != nil { + nCreds++ + } + if len(ds.Scopes) > 0 && len(ds.Audiences) > 0 { + return errors.New("WithScopes is incompatible with WithAudience") + } + // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility. + if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { return errors.New("multiple credential options provided") } if ds.HTTPClient != nil && ds.GRPCConn != nil { @@ -58,5 +85,12 @@ func (ds *DialSettings) Validate() error { if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { return errors.New("WithHTTPClient is incompatible with gRPC dial options") } + if ds.HTTPClient != nil && ds.QuotaProject != "" { + return errors.New("WithHTTPClient is incompatible with QuotaProject") + } + if ds.HTTPClient != nil && ds.RequestReason != "" { + return errors.New("WithHTTPClient is incompatible with RequestReason") + } + return nil } diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go index e34e520734..3c8ea7732a 100644 --- a/vendor/google.golang.org/api/iterator/iterator.go +++ b/vendor/google.golang.org/api/iterator/iterator.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go index c08c1149d2..0636a82945 100644 --- a/vendor/google.golang.org/api/option/credentials_go19.go +++ b/vendor/google.golang.org/api/option/credentials_go19.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All Rights Reserved. +// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -27,6 +27,7 @@ func (w *withCreds) Apply(o *internal.DialSettings) { o.Credentials = (*google.Credentials)(w) } +// WithCredentials returns a ClientOption that authenticates API calls. func WithCredentials(creds *google.Credentials) ClientOption { return (*withCreds)(creds) } diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go index 90d2290010..74d3a4b5b9 100644 --- a/vendor/google.golang.org/api/option/credentials_notgo19.go +++ b/vendor/google.golang.org/api/option/credentials_notgo19.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All Rights Reserved. +// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index ffbee32951..0a1c2dba9e 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -61,6 +61,20 @@ func WithServiceAccountFile(filename string) ClientOption { return WithCredentialsFile(filename) } +// WithCredentialsJSON returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials. +func WithCredentialsJSON(p []byte) ClientOption { + return withCredentialsJSON(p) +} + +type withCredentialsJSON []byte + +func (w withCredentialsJSON) Apply(o *internal.DialSettings) { + o.CredentialsJSON = make([]byte, len(w)) + copy(o.CredentialsJSON, w) +} + // WithEndpoint returns a ClientOption that overrides the default endpoint // to be used for a service. func WithEndpoint(url string) ClientOption { @@ -82,9 +96,8 @@ func WithScopes(scope ...string) ClientOption { type withScopes []string func (w withScopes) Apply(o *internal.DialSettings) { - s := make([]string, len(w)) - copy(s, w) - o.Scopes = s + o.Scopes = make([]string, len(w)) + copy(o.Scopes, w) } // WithUserAgent returns a ClientOption that sets the User-Agent. @@ -153,6 +166,9 @@ func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { // WithAPIKey returns a ClientOption that specifies an API key to be used // as the basis for authentication. +// +// API Keys can only be used for JSON-over-HTTP APIs, including those under +// the import path google.golang.org/api/.... func WithAPIKey(apiKey string) ClientOption { return withAPIKey(apiKey) } @@ -161,6 +177,19 @@ type withAPIKey string func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } +// WithAudiences returns a ClientOption that specifies an audience to be used +// as the audience field ("aud") for the JWT token authentication. +func WithAudiences(audience ...string) ClientOption { + return withAudiences(audience) +} + +type withAudiences []string + +func (w withAudiences) Apply(o *internal.DialSettings) { + o.Audiences = make([]string, len(w)) + copy(o.Audiences, w) +} + // WithoutAuthentication returns a ClientOption that specifies that no // authentication should be used. It is suitable only for testing and for // accessing public resources, like public Google Cloud Storage buckets. @@ -173,3 +202,34 @@ func WithoutAuthentication() ClientOption { type withoutAuthentication struct{} func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } + +// WithQuotaProject returns a ClientOption that specifies the project used +// for quota and billing purposes. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithQuotaProject(quotaProject string) ClientOption { + return withQuotaProject(quotaProject) +} + +type withQuotaProject string + +func (w withQuotaProject) Apply(o *internal.DialSettings) { + o.QuotaProject = string(w) +} + +// WithRequestReason returns a ClientOption that specifies a reason for +// making the request, which is intended to be recorded in audit logging. +// An example reason would be a support-case ticket number. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithRequestReason(requestReason string) ClientOption { + return withRequestReason(requestReason) +} + +type withRequestReason string + +func (w withRequestReason) Apply(o *internal.DialSettings) { + o.RequestReason = string(w) +} diff --git a/vendor/google.golang.org/api/support/bundler/bundler.go b/vendor/google.golang.org/api/support/bundler/bundler.go index 8d8fb7f047..c553271190 100644 --- a/vendor/google.golang.org/api/support/bundler/bundler.go +++ b/vendor/google.golang.org/api/support/bundler/bundler.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,13 +22,13 @@ package bundler import ( + "context" "errors" "math" "reflect" "sync" "time" - "golang.org/x/net/context" "golang.org/x/sync/semaphore" ) diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go index c4c37191bf..1fb7cf905d 100644 --- a/vendor/google.golang.org/api/transport/dial.go +++ b/vendor/google.golang.org/api/transport/dial.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google Inc. All Rights Reserved. +// Copyright 2015 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,15 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package transport supports network connections to HTTP and GRPC servers. -// This package is not intended for use by end developers. Use the -// google.golang.org/api/option package to configure API clients. package transport import ( + "context" "net/http" - "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/api/option" diff --git a/vendor/google.golang.org/api/transport/http/go18.go b/vendor/google.golang.org/api/transport/doc.go similarity index 59% rename from vendor/google.golang.org/api/transport/http/go18.go rename to vendor/google.golang.org/api/transport/doc.go index 1d4bb8e7fb..4915036c35 100644 --- a/vendor/google.golang.org/api/transport/http/go18.go +++ b/vendor/google.golang.org/api/transport/doc.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All Rights Reserved. +// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,20 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build go1.8 - -package http - -import ( - "net/http" - - "go.opencensus.io/exporter/stackdriver/propagation" - "go.opencensus.io/plugin/ochttp" -) - -func addOCTransport(trans http.RoundTripper) http.RoundTripper { - return &ochttp.Transport{ - Base: trans, - Propagation: &propagation.HTTPFormat{}, - } -} +// Package transport provides utility methods for creating authenticated +// transports to Google's HTTP and gRPC APIs. It is intended to be used in +// conjunction with google.golang.org/api/option. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package transport diff --git a/vendor/google.golang.org/api/transport/go19.go b/vendor/google.golang.org/api/transport/go19.go index 0177e5600c..3e89f93287 100644 --- a/vendor/google.golang.org/api/transport/go19.go +++ b/vendor/google.golang.org/api/transport/go19.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All Rights Reserved. +// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,7 +17,8 @@ package transport import ( - "golang.org/x/net/context" + "context" + "golang.org/x/oauth2/google" "google.golang.org/api/internal" "google.golang.org/api/option" diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 2b8ed6b0b8..b850246ce2 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google Inc. All Rights Reserved. +// Copyright 2015 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,25 +12,37 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package transport/grpc supports network connections to GRPC servers. +// Package grpc supports network connections to GRPC servers. // This package is not intended for use by end developers. Use the // google.golang.org/api/option package to configure API clients. package grpc import ( + "context" "errors" + "log" + "os" + "strings" - "golang.org/x/net/context" + "go.opencensus.io/plugin/ocgrpc" + "golang.org/x/oauth2" "google.golang.org/api/internal" "google.golang.org/api/option" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + grpcgoogle "google.golang.org/grpc/credentials/google" "google.golang.org/grpc/credentials/oauth" + + // Install grpclb, which is required for direct path. + _ "google.golang.org/grpc/balancer/grpclb" ) // Set at init time by dial_appengine.go. If nil, we're not on App Engine. var appengineDialerHook func(context.Context) grpc.DialOption +// Set at init time by dial_socketopt.go. If nil, socketopt is not supported. +var timeoutDialerOption grpc.DialOption + // Dial returns a GRPC connection for use communicating with a Google cloud // service, configured with the given ClientOptions. func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { @@ -62,19 +74,47 @@ func dial(ctx context.Context, insecure bool, opts []option.ClientOption) (*grpc if insecure { grpcOpts = []grpc.DialOption{grpc.WithInsecure()} } else if !o.NoAuth { + if o.APIKey != "" { + log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") + } creds, err := internal.Creds(ctx, &o) if err != nil { return nil, err } - grpcOpts = []grpc.DialOption{ - grpc.WithPerRPCCredentials(oauth.TokenSource{creds.TokenSource}), - grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + // Attempt Direct Path only if: + // * The endpoint is a host:port (or dns:///host:port). + // * Credentials are obtained via GCE metadata server, using the default + // service account. + // * Opted in via GOOGLE_CLOUD_ENABLE_DIRECT_PATH environment variable. + // For example, GOOGLE_CLOUD_ENABLE_DIRECT_PATH=spanner,pubsub + if isDirectPathEnabled(o.Endpoint) && isTokenSourceDirectPathCompatible(creds.TokenSource) { + if !strings.HasPrefix(o.Endpoint, "dns:///") { + o.Endpoint = "dns:///" + o.Endpoint + } + grpcOpts = []grpc.DialOption{ + grpc.WithCredentialsBundle( + grpcgoogle.NewComputeEngineCredentials(), + ), + } + // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. + } else { + grpcOpts = []grpc.DialOption{ + grpc.WithPerRPCCredentials(grpcTokenSource{ + TokenSource: oauth.TokenSource{creds.TokenSource}, + quotaProject: o.QuotaProject, + requestReason: o.RequestReason, + }), + grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + } } } + if appengineDialerHook != nil { // Use the Socket API on App Engine. + // appengine dialer will override socketopt dialer grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) } + // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. @@ -83,5 +123,87 @@ func dial(ctx context.Context, insecure bool, opts []option.ClientOption) (*grpc if o.UserAgent != "" { grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) } + + // TODO(weiranf): This socketopt dialer will be used by default at some + // point when isDirectPathEnabled will default to true, we guard it by + // the Directpath env var for now once we can introspect user defined + // dialer (https://github.com/grpc/grpc-go/issues/2795). + if timeoutDialerOption != nil && isDirectPathEnabled(o.Endpoint) { + grpcOpts = append(grpcOpts, timeoutDialerOption) + } + return grpc.DialContext(ctx, o.Endpoint, grpcOpts...) } + +func addOCStatsHandler(opts []grpc.DialOption) []grpc.DialOption { + return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) +} + +// grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource. +type grpcTokenSource struct { + oauth.TokenSource + + // Additional metadata attached as headers. + quotaProject string + requestReason string +} + +// GetRequestMetadata gets the request metadata as a map from a grpcTokenSource. +func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) ( + map[string]string, error) { + metadata, err := ts.TokenSource.GetRequestMetadata(ctx, uri...) + if err != nil { + return nil, err + } + + // Attach system parameter + if ts.quotaProject != "" { + metadata["X-goog-user-project"] = ts.quotaProject + } + if ts.requestReason != "" { + metadata["X-goog-request-reason"] = ts.requestReason + } + return metadata, nil +} + +func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource) bool { + if ts == nil { + return false + } + tok, err := ts.Token() + if err != nil { + return false + } + if tok == nil { + return false + } + if source, _ := tok.Extra("oauth2.google.tokenSource").(string); source != "compute-metadata" { + return false + } + if acct, _ := tok.Extra("oauth2.google.serviceAccount").(string); acct != "default" { + return false + } + return true +} + +func isDirectPathEnabled(endpoint string) bool { + // Only host:port is supported, not other schemes (e.g., "tcp://" or "unix://"). + // Also don't try direct path if the user has chosen an alternate name resolver + // (i.e., via ":///" prefix). + // + // TODO(cbro): once gRPC has introspectible options, check the user hasn't + // provided a custom dialer in gRPC options. + if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") { + return false + } + + // Only try direct path if the user has opted in via the environment variable. + whitelist := strings.Split(os.Getenv("GOOGLE_CLOUD_ENABLE_DIRECT_PATH"), ",") + for _, api := range whitelist { + // Ignore empty string since an empty env variable splits into [""] + if api != "" && strings.Contains(endpoint, api) { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go index a40cef2506..87819d4e10 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go +++ b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,10 +17,10 @@ package grpc import ( + "context" "net" "time" - "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/socket" "google.golang.org/grpc" diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go new file mode 100644 index 0000000000..2b1d9e99b1 --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go @@ -0,0 +1,59 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.11,linux + +package grpc + +import ( + "context" + "net" + "syscall" + + "golang.org/x/sys/unix" + "google.golang.org/grpc" +) + +const ( + // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By + // default is 20 seconds. + tcpUserTimeoutMilliseconds = 20000 +) + +func init() { + // timeoutDialerOption is a grpc.DialOption that contains dialer with + // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+. + timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout) +} + +func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { + control := func(network, address string, c syscall.RawConn) error { + var syscallErr error + controlErr := c.Control(func(fd uintptr) { + syscallErr = syscall.SetsockoptInt( + int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, tcpUserTimeoutMilliseconds) + }) + if syscallErr != nil { + return syscallErr + } + if controlErr != nil { + return controlErr + } + return nil + } + d := &net.Dialer{ + Control: control, + } + return d.DialContext(ctx, "tcp", addr) +} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index df34c6810d..c0d8bf20b0 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -1,4 +1,4 @@ -// Copyright 2015 Google Inc. All Rights Reserved. +// Copyright 2015 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,73 +12,109 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package transport/http supports network connections to HTTP servers. +// Package http supports network connections to HTTP servers. // This package is not intended for use by end developers. Use the // google.golang.org/api/option package to configure API clients. package http import ( + "context" "errors" "net/http" - "golang.org/x/net/context" + "go.opencensus.io/plugin/ochttp" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" "google.golang.org/api/internal" "google.golang.org/api/option" + "google.golang.org/api/transport/http/internal/propagation" ) // NewClient returns an HTTP client for use communicating with a Google cloud // service, configured with the given ClientOptions. It also returns the endpoint // for the service as specified in the options. func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { - var o internal.DialSettings - for _, opt := range opts { - opt.Apply(&o) - } - if err := o.Validate(); err != nil { + settings, err := newSettings(opts) + if err != nil { return nil, "", err } - if o.GRPCConn != nil { - return nil, "", errors.New("unsupported gRPC connection specified") - } // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? - if o.HTTPClient != nil { - return o.HTTPClient, o.Endpoint, nil + if settings.HTTPClient != nil { + return settings.HTTPClient, settings.Endpoint, nil } - trans := baseTransport(ctx) - trans = userAgentTransport{ - base: trans, - userAgent: o.UserAgent, + trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings) + if err != nil { + return nil, "", err + } + return &http.Client{Transport: trans}, settings.Endpoint, nil +} + +// NewTransport creates an http.RoundTripper for use communicating with a Google +// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base. +func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, err + } + if settings.HTTPClient != nil { + return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") + } + return newTransport(ctx, base, settings) +} + +func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { + trans := base + trans = parameterTransport{ + base: trans, + userAgent: settings.UserAgent, + quotaProject: settings.QuotaProject, + requestReason: settings.RequestReason, } trans = addOCTransport(trans) switch { - case o.NoAuth: + case settings.NoAuth: // Do nothing. - case o.APIKey != "": + case settings.APIKey != "": trans = &transport.APIKey{ Transport: trans, - Key: o.APIKey, + Key: settings.APIKey, } default: - creds, err := internal.Creds(ctx, &o) + creds, err := internal.Creds(ctx, settings) if err != nil { - return nil, "", err + return nil, err } trans = &oauth2.Transport{ Base: trans, Source: creds.TokenSource, } } - return &http.Client{Transport: trans}, o.Endpoint, nil + return trans, nil } -type userAgentTransport struct { - userAgent string - base http.RoundTripper +func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + if o.GRPCConn != nil { + return nil, errors.New("unsupported gRPC connection specified") + } + return &o, nil } -func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) { +type parameterTransport struct { + userAgent string + quotaProject string + requestReason string + + base http.RoundTripper +} + +func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.base if rt == nil { return nil, errors.New("transport: no Transport specified") @@ -92,18 +128,34 @@ func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) newReq.Header[k] = vv } // TODO(cbro): append to existing User-Agent header? - newReq.Header["User-Agent"] = []string{t.userAgent} + newReq.Header.Set("User-Agent", t.userAgent) + + // Attach system parameters into the header + if t.quotaProject != "" { + newReq.Header.Set("X-Goog-User-Project", t.quotaProject) + } + if t.requestReason != "" { + newReq.Header.Set("X-Goog-Request-Reason", t.requestReason) + } + return rt.RoundTrip(&newReq) } // Set at init time by dial_appengine.go. If nil, we're not on App Engine. var appengineUrlfetchHook func(context.Context) http.RoundTripper -// baseTransport returns the base HTTP transport. +// defaultBaseTransport returns the base HTTP transport. // On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport. -func baseTransport(ctx context.Context) http.RoundTripper { +func defaultBaseTransport(ctx context.Context) http.RoundTripper { if appengineUrlfetchHook != nil { return appengineUrlfetchHook(ctx) } return http.DefaultTransport } + +func addOCTransport(trans http.RoundTripper) http.RoundTripper { + return &ochttp.Transport{ + Base: trans, + Propagation: &propagation.HTTPFormat{}, + } +} diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go index 0cdef74ac1..04c81413c5 100644 --- a/vendor/google.golang.org/api/transport/http/dial_appengine.go +++ b/vendor/google.golang.org/api/transport/http/dial_appengine.go @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,9 +17,9 @@ package http import ( + "context" "net/http" - "golang.org/x/net/context" "google.golang.org/appengine/urlfetch" ) diff --git a/vendor/go.opencensus.io/exporter/stackdriver/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go similarity index 91% rename from vendor/go.opencensus.io/exporter/stackdriver/propagation/http.go rename to vendor/google.golang.org/api/transport/http/internal/propagation/http.go index 7cc02a1104..24b4f0d291 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/propagation/http.go +++ b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go @@ -1,10 +1,10 @@ -// Copyright 2018, OpenCensus Authors +// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,9 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package propagation implement X-Cloud-Trace-Context header propagation used +// +build go1.8 + +// Package propagation implements X-Cloud-Trace-Context header propagation used // by Google Cloud products. -package propagation // import "go.opencensus.io/exporter/stackdriver/propagation" +package propagation import ( "encoding/binary" diff --git a/vendor/google.golang.org/api/transport/not_go19.go b/vendor/google.golang.org/api/transport/not_go19.go index 4489bc9b6e..0cb6275944 100644 --- a/vendor/google.golang.org/api/transport/not_go19.go +++ b/vendor/google.golang.org/api/transport/not_go19.go @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. All Rights Reserved. +// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,7 +17,8 @@ package transport import ( - "golang.org/x/net/context" + "context" + "golang.org/x/oauth2/google" "google.golang.org/api/internal" "google.golang.org/api/option" diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go new file mode 100644 index 0000000000..930f90ca57 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -0,0 +1,772 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/lb/v1/load_balancer.proto + +package grpc_lb_v1 + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type LoadBalanceRequest struct { + // Types that are valid to be assigned to LoadBalanceRequestType: + // *LoadBalanceRequest_InitialRequest + // *LoadBalanceRequest_ClientStats + LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} } +func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceRequest) ProtoMessage() {} +func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{0} +} + +func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoadBalanceRequest.Unmarshal(m, b) +} +func (m *LoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoadBalanceRequest.Marshal(b, m, deterministic) +} +func (m *LoadBalanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalanceRequest.Merge(m, src) +} +func (m *LoadBalanceRequest) XXX_Size() int { + return xxx_messageInfo_LoadBalanceRequest.Size(m) +} +func (m *LoadBalanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalanceRequest proto.InternalMessageInfo + +type isLoadBalanceRequest_LoadBalanceRequestType interface { + isLoadBalanceRequest_LoadBalanceRequestType() +} + +type LoadBalanceRequest_InitialRequest struct { + InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,proto3,oneof"` +} + +type LoadBalanceRequest_ClientStats struct { + ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,proto3,oneof"` +} + +func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { + if m != nil { + return m.LoadBalanceRequestType + } + return nil +} + +func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { + return x.InitialRequest + } + return nil +} + +func (m *LoadBalanceRequest) GetClientStats() *ClientStats { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { + return x.ClientStats + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*LoadBalanceRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*LoadBalanceRequest_InitialRequest)(nil), + (*LoadBalanceRequest_ClientStats)(nil), + } +} + +type InitialLoadBalanceRequest struct { + // The name of the load balanced service (e.g., service.googleapis.com). Its + // length should be less than 256 bytes. + // The name might include a port number. How to handle the port number is up + // to the balancer. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} } +func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceRequest) ProtoMessage() {} +func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{1} +} + +func (m *InitialLoadBalanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitialLoadBalanceRequest.Unmarshal(m, b) +} +func (m *InitialLoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitialLoadBalanceRequest.Marshal(b, m, deterministic) +} +func (m *InitialLoadBalanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitialLoadBalanceRequest.Merge(m, src) +} +func (m *InitialLoadBalanceRequest) XXX_Size() int { + return xxx_messageInfo_InitialLoadBalanceRequest.Size(m) +} +func (m *InitialLoadBalanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitialLoadBalanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitialLoadBalanceRequest proto.InternalMessageInfo + +func (m *InitialLoadBalanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Contains the number of calls finished for a particular load balance token. +type ClientStatsPerToken struct { + // See Server.load_balance_token. + LoadBalanceToken string `protobuf:"bytes,1,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` + // The total number of RPCs that finished associated with the token. + NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientStatsPerToken) Reset() { *m = ClientStatsPerToken{} } +func (m *ClientStatsPerToken) String() string { return proto.CompactTextString(m) } +func (*ClientStatsPerToken) ProtoMessage() {} +func (*ClientStatsPerToken) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{2} +} + +func (m *ClientStatsPerToken) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientStatsPerToken.Unmarshal(m, b) +} +func (m *ClientStatsPerToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientStatsPerToken.Marshal(b, m, deterministic) +} +func (m *ClientStatsPerToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStatsPerToken.Merge(m, src) +} +func (m *ClientStatsPerToken) XXX_Size() int { + return xxx_messageInfo_ClientStatsPerToken.Size(m) +} +func (m *ClientStatsPerToken) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStatsPerToken.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStatsPerToken proto.InternalMessageInfo + +func (m *ClientStatsPerToken) GetLoadBalanceToken() string { + if m != nil { + return m.LoadBalanceToken + } + return "" +} + +func (m *ClientStatsPerToken) GetNumCalls() int64 { + if m != nil { + return m.NumCalls + } + return 0 +} + +// Contains client level statistics that are useful to load balancing. Each +// count except the timestamp should be reset to zero after reporting the stats. +type ClientStats struct { + // The timestamp of generating the report. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The total number of RPCs that started. + NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted,proto3" json:"num_calls_started,omitempty"` + // The total number of RPCs that finished. + NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished,proto3" json:"num_calls_finished,omitempty"` + // The total number of RPCs that failed to reach a server except dropped RPCs. + NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend,proto3" json:"num_calls_finished_with_client_failed_to_send,omitempty"` + // The total number of RPCs that finished and are known to have been received + // by a server. + NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived,proto3" json:"num_calls_finished_known_received,omitempty"` + // The list of dropped calls. + CallsFinishedWithDrop []*ClientStatsPerToken `protobuf:"bytes,8,rep,name=calls_finished_with_drop,json=callsFinishedWithDrop,proto3" json:"calls_finished_with_drop,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} +func (*ClientStats) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{3} +} + +func (m *ClientStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientStats.Unmarshal(m, b) +} +func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic) +} +func (m *ClientStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStats.Merge(m, src) +} +func (m *ClientStats) XXX_Size() int { + return xxx_messageInfo_ClientStats.Size(m) +} +func (m *ClientStats) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStats.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStats proto.InternalMessageInfo + +func (m *ClientStats) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *ClientStats) GetNumCallsStarted() int64 { + if m != nil { + return m.NumCallsStarted + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinished() int64 { + if m != nil { + return m.NumCallsFinished + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { + if m != nil { + return m.NumCallsFinishedWithClientFailedToSend + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 { + if m != nil { + return m.NumCallsFinishedKnownReceived + } + return 0 +} + +func (m *ClientStats) GetCallsFinishedWithDrop() []*ClientStatsPerToken { + if m != nil { + return m.CallsFinishedWithDrop + } + return nil +} + +type LoadBalanceResponse struct { + // Types that are valid to be assigned to LoadBalanceResponseType: + // *LoadBalanceResponse_InitialResponse + // *LoadBalanceResponse_ServerList + // *LoadBalanceResponse_FallbackResponse + LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} } +func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceResponse) ProtoMessage() {} +func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{4} +} + +func (m *LoadBalanceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoadBalanceResponse.Unmarshal(m, b) +} +func (m *LoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoadBalanceResponse.Marshal(b, m, deterministic) +} +func (m *LoadBalanceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalanceResponse.Merge(m, src) +} +func (m *LoadBalanceResponse) XXX_Size() int { + return xxx_messageInfo_LoadBalanceResponse.Size(m) +} +func (m *LoadBalanceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalanceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalanceResponse proto.InternalMessageInfo + +type isLoadBalanceResponse_LoadBalanceResponseType interface { + isLoadBalanceResponse_LoadBalanceResponseType() +} + +type LoadBalanceResponse_InitialResponse struct { + InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,proto3,oneof"` +} + +type LoadBalanceResponse_ServerList struct { + ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,proto3,oneof"` +} + +type LoadBalanceResponse_FallbackResponse struct { + FallbackResponse *FallbackResponse `protobuf:"bytes,3,opt,name=fallback_response,json=fallbackResponse,proto3,oneof"` +} + +func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (*LoadBalanceResponse_FallbackResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { + if m != nil { + return m.LoadBalanceResponseType + } + return nil +} + +func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { + return x.InitialResponse + } + return nil +} + +func (m *LoadBalanceResponse) GetServerList() *ServerList { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { + return x.ServerList + } + return nil +} + +func (m *LoadBalanceResponse) GetFallbackResponse() *FallbackResponse { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_FallbackResponse); ok { + return x.FallbackResponse + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*LoadBalanceResponse) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*LoadBalanceResponse_InitialResponse)(nil), + (*LoadBalanceResponse_ServerList)(nil), + (*LoadBalanceResponse_FallbackResponse)(nil), + } +} + +type FallbackResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FallbackResponse) Reset() { *m = FallbackResponse{} } +func (m *FallbackResponse) String() string { return proto.CompactTextString(m) } +func (*FallbackResponse) ProtoMessage() {} +func (*FallbackResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{5} +} + +func (m *FallbackResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FallbackResponse.Unmarshal(m, b) +} +func (m *FallbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FallbackResponse.Marshal(b, m, deterministic) +} +func (m *FallbackResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FallbackResponse.Merge(m, src) +} +func (m *FallbackResponse) XXX_Size() int { + return xxx_messageInfo_FallbackResponse.Size(m) +} +func (m *FallbackResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FallbackResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FallbackResponse proto.InternalMessageInfo + +type InitialLoadBalanceResponse struct { + // This is an application layer redirect that indicates the client should use + // the specified server for load balancing. When this field is non-empty in + // the response, the client should open a separate connection to the + // load_balancer_delegate and call the BalanceLoad method. Its length should + // be less than 64 bytes. + LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate,proto3" json:"load_balancer_delegate,omitempty"` + // This interval defines how often the client should send the client stats + // to the load balancer. Stats should only be reported when the duration is + // positive. + ClientStatsReportInterval *duration.Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval,proto3" json:"client_stats_report_interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} } +func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceResponse) ProtoMessage() {} +func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{6} +} + +func (m *InitialLoadBalanceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitialLoadBalanceResponse.Unmarshal(m, b) +} +func (m *InitialLoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitialLoadBalanceResponse.Marshal(b, m, deterministic) +} +func (m *InitialLoadBalanceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitialLoadBalanceResponse.Merge(m, src) +} +func (m *InitialLoadBalanceResponse) XXX_Size() int { + return xxx_messageInfo_InitialLoadBalanceResponse.Size(m) +} +func (m *InitialLoadBalanceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InitialLoadBalanceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InitialLoadBalanceResponse proto.InternalMessageInfo + +func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string { + if m != nil { + return m.LoadBalancerDelegate + } + return "" +} + +func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *duration.Duration { + if m != nil { + return m.ClientStatsReportInterval + } + return nil +} + +type ServerList struct { + // Contains a list of servers selected by the load balancer. The list will + // be updated when server resolutions change or as needed to balance load + // across more servers. The client should consume the server list in order + // unless instructed otherwise via the client_config. + Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerList) Reset() { *m = ServerList{} } +func (m *ServerList) String() string { return proto.CompactTextString(m) } +func (*ServerList) ProtoMessage() {} +func (*ServerList) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{7} +} + +func (m *ServerList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerList.Unmarshal(m, b) +} +func (m *ServerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerList.Marshal(b, m, deterministic) +} +func (m *ServerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerList.Merge(m, src) +} +func (m *ServerList) XXX_Size() int { + return xxx_messageInfo_ServerList.Size(m) +} +func (m *ServerList) XXX_DiscardUnknown() { + xxx_messageInfo_ServerList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerList proto.InternalMessageInfo + +func (m *ServerList) GetServers() []*Server { + if m != nil { + return m.Servers + } + return nil +} + +// Contains server information. When the drop field is not true, use the other +// fields. +type Server struct { + // A resolved address for the server, serialized in network-byte-order. It may + // either be an IPv4 or IPv6 address. + IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // A resolved port number for the server. + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // An opaque but printable token for load reporting. The client must include + // the token of the picked server into the initial metadata when it starts a + // call to that server. The token is used by the server to verify the request + // and to allow the server to report load to the gRPC LB system. The token is + // also used in client stats for reporting dropped calls. + // + // Its length can be variable but must be less than 50 bytes. + LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` + // Indicates whether this particular request should be dropped by the client. + // If the request is dropped, there will be a corresponding entry in + // ClientStats.calls_finished_with_drop. + Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Server) Reset() { *m = Server{} } +func (m *Server) String() string { return proto.CompactTextString(m) } +func (*Server) ProtoMessage() {} +func (*Server) Descriptor() ([]byte, []int) { + return fileDescriptor_7cd3f6d792743fdf, []int{8} +} + +func (m *Server) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Server.Unmarshal(m, b) +} +func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Server.Marshal(b, m, deterministic) +} +func (m *Server) XXX_Merge(src proto.Message) { + xxx_messageInfo_Server.Merge(m, src) +} +func (m *Server) XXX_Size() int { + return xxx_messageInfo_Server.Size(m) +} +func (m *Server) XXX_DiscardUnknown() { + xxx_messageInfo_Server.DiscardUnknown(m) +} + +var xxx_messageInfo_Server proto.InternalMessageInfo + +func (m *Server) GetIpAddress() []byte { + if m != nil { + return m.IpAddress + } + return nil +} + +func (m *Server) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Server) GetLoadBalanceToken() string { + if m != nil { + return m.LoadBalanceToken + } + return "" +} + +func (m *Server) GetDrop() bool { + if m != nil { + return m.Drop + } + return false +} + +func init() { + proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") + proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") + proto.RegisterType((*ClientStatsPerToken)(nil), "grpc.lb.v1.ClientStatsPerToken") + proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") + proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") + proto.RegisterType((*FallbackResponse)(nil), "grpc.lb.v1.FallbackResponse") + proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") + proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") + proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") +} + +func init() { proto.RegisterFile("grpc/lb/v1/load_balancer.proto", fileDescriptor_7cd3f6d792743fdf) } + +var fileDescriptor_7cd3f6d792743fdf = []byte{ + // 785 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x55, 0xdd, 0x6e, 0xdb, 0x36, + 0x14, 0x8e, 0x6a, 0x27, 0x75, 0x8e, 0xb3, 0xc5, 0x61, 0xb7, 0x4e, 0x71, 0xd3, 0x24, 0x13, 0xb0, + 0x22, 0x18, 0x3a, 0x79, 0xc9, 0x76, 0xb1, 0x01, 0xbb, 0xd8, 0xdc, 0x20, 0x48, 0xd3, 0x5e, 0x04, + 0x74, 0x80, 0x0e, 0x05, 0x06, 0x8e, 0x92, 0x68, 0x87, 0x08, 0x4d, 0x6a, 0x14, 0xed, 0x62, 0xd7, + 0x7b, 0x81, 0x3d, 0xc9, 0xb0, 0x57, 0xd8, 0x9b, 0x0d, 0x22, 0x29, 0x4b, 0xb1, 0x63, 0xf4, 0x4a, + 0xe4, 0x39, 0x1f, 0xbf, 0xf3, 0x7f, 0x04, 0x87, 0x13, 0x9d, 0xa7, 0x03, 0x91, 0x0c, 0xe6, 0xa7, + 0x03, 0xa1, 0x68, 0x46, 0x12, 0x2a, 0xa8, 0x4c, 0x99, 0x8e, 0x73, 0xad, 0x8c, 0x42, 0x50, 0xea, + 0x63, 0x91, 0xc4, 0xf3, 0xd3, 0xfe, 0xe1, 0x44, 0xa9, 0x89, 0x60, 0x03, 0xab, 0x49, 0x66, 0xe3, + 0x41, 0x36, 0xd3, 0xd4, 0x70, 0x25, 0x1d, 0xb6, 0x7f, 0xb4, 0xac, 0x37, 0x7c, 0xca, 0x0a, 0x43, + 0xa7, 0xb9, 0x03, 0x44, 0xff, 0x05, 0x80, 0xde, 0x2a, 0x9a, 0x0d, 0x9d, 0x0d, 0xcc, 0xfe, 0x98, + 0xb1, 0xc2, 0xa0, 0x6b, 0xd8, 0xe5, 0x92, 0x1b, 0x4e, 0x05, 0xd1, 0x4e, 0x14, 0x06, 0xc7, 0xc1, + 0x49, 0xf7, 0xec, 0xab, 0xb8, 0xb6, 0x1e, 0xbf, 0x76, 0x90, 0xd5, 0xf7, 0x97, 0x1b, 0xf8, 0x53, + 0xff, 0xbe, 0x62, 0xfc, 0x09, 0x76, 0x52, 0xc1, 0x99, 0x34, 0xa4, 0x30, 0xd4, 0x14, 0xe1, 0x23, + 0x4b, 0xf7, 0x45, 0x93, 0xee, 0x95, 0xd5, 0x8f, 0x4a, 0xf5, 0xe5, 0x06, 0xee, 0xa6, 0xf5, 0x75, + 0xf8, 0x0c, 0xf6, 0x9b, 0xa9, 0xa8, 0x9c, 0x22, 0xe6, 0xcf, 0x9c, 0x45, 0x03, 0xd8, 0x5f, 0xeb, + 0x09, 0x42, 0xd0, 0x96, 0x74, 0xca, 0xac, 0xfb, 0xdb, 0xd8, 0x9e, 0xa3, 0xdf, 0xe1, 0x49, 0xc3, + 0xd6, 0x35, 0xd3, 0x37, 0xea, 0x8e, 0x49, 0xf4, 0x12, 0xd0, 0x3d, 0x23, 0xa6, 0x94, 0xfa, 0x87, + 0x3d, 0x51, 0x53, 0x3b, 0xf4, 0x33, 0xd8, 0x96, 0xb3, 0x29, 0x49, 0xa9, 0x10, 0x2e, 0x9a, 0x16, + 0xee, 0xc8, 0xd9, 0xf4, 0x55, 0x79, 0x8f, 0xfe, 0x6d, 0x41, 0xb7, 0x61, 0x02, 0xfd, 0x00, 0xdb, + 0x8b, 0xcc, 0xfb, 0x4c, 0xf6, 0x63, 0x57, 0x9b, 0xb8, 0xaa, 0x4d, 0x7c, 0x53, 0x21, 0x70, 0x0d, + 0x46, 0x5f, 0xc3, 0xde, 0xc2, 0x4c, 0x99, 0x3a, 0x6d, 0x58, 0xe6, 0xcd, 0xed, 0x56, 0xe6, 0x46, + 0x4e, 0x5c, 0x06, 0x50, 0x63, 0xc7, 0x5c, 0xf2, 0xe2, 0x96, 0x65, 0x61, 0xcb, 0x82, 0x7b, 0x15, + 0xf8, 0xc2, 0xcb, 0xd1, 0x6f, 0xf0, 0xcd, 0x2a, 0x9a, 0x7c, 0xe0, 0xe6, 0x96, 0xf8, 0x4a, 0x8d, + 0x29, 0x17, 0x2c, 0x23, 0x46, 0x91, 0x82, 0xc9, 0x2c, 0xdc, 0xb2, 0x44, 0x2f, 0x96, 0x89, 0xde, + 0x71, 0x73, 0xeb, 0x62, 0xbd, 0xb0, 0xf8, 0x1b, 0x35, 0x62, 0x32, 0x43, 0x97, 0xf0, 0xe5, 0x03, + 0xf4, 0x77, 0x52, 0x7d, 0x90, 0x44, 0xb3, 0x94, 0xf1, 0x39, 0xcb, 0xc2, 0xc7, 0x96, 0xf2, 0xf9, + 0x32, 0xe5, 0x9b, 0x12, 0x85, 0x3d, 0x08, 0xfd, 0x0a, 0xe1, 0x43, 0x4e, 0x66, 0x5a, 0xe5, 0x61, + 0xe7, 0xb8, 0x75, 0xd2, 0x3d, 0x3b, 0x5a, 0xd3, 0x46, 0x55, 0x69, 0xf1, 0xe7, 0xe9, 0xb2, 0xc7, + 0xe7, 0x5a, 0xe5, 0x57, 0xed, 0x4e, 0xbb, 0xb7, 0x79, 0xd5, 0xee, 0x6c, 0xf6, 0xb6, 0xa2, 0xbf, + 0x1f, 0xc1, 0x93, 0x7b, 0xfd, 0x53, 0xe4, 0x4a, 0x16, 0x0c, 0x8d, 0xa0, 0x57, 0x8f, 0x82, 0x93, + 0xf9, 0x0a, 0xbe, 0xf8, 0xd8, 0x2c, 0x38, 0xf4, 0xe5, 0x06, 0xde, 0x5d, 0x0c, 0x83, 0x27, 0xfd, + 0x11, 0xba, 0x05, 0xd3, 0x73, 0xa6, 0x89, 0xe0, 0x85, 0xf1, 0xc3, 0xf0, 0xb4, 0xc9, 0x37, 0xb2, + 0xea, 0xb7, 0xdc, 0x0e, 0x13, 0x14, 0x8b, 0x1b, 0x7a, 0x03, 0x7b, 0x63, 0x2a, 0x44, 0x42, 0xd3, + 0xbb, 0xda, 0xa1, 0x96, 0x25, 0x38, 0x68, 0x12, 0x5c, 0x78, 0x50, 0xc3, 0x8d, 0xde, 0x78, 0x49, + 0x36, 0x3c, 0x80, 0xfe, 0xd2, 0x5c, 0x39, 0x85, 0x1b, 0x2c, 0x04, 0xbd, 0x65, 0x96, 0xe8, 0x9f, + 0x00, 0xfa, 0xeb, 0x63, 0x45, 0xdf, 0xc3, 0xd3, 0x7b, 0x3b, 0x8b, 0x64, 0x4c, 0xb0, 0x09, 0x35, + 0xd5, 0x00, 0x7e, 0xd6, 0x98, 0x23, 0x7d, 0xee, 0x75, 0xe8, 0x3d, 0x1c, 0x34, 0x97, 0x03, 0xd1, + 0x2c, 0x57, 0xda, 0x10, 0x2e, 0x0d, 0xd3, 0x73, 0x2a, 0x7c, 0x7e, 0xf6, 0x57, 0x26, 0xe6, 0xdc, + 0x6f, 0x3b, 0xbc, 0xdf, 0x58, 0x16, 0xd8, 0x3e, 0x7e, 0xed, 0xdf, 0x46, 0x3f, 0x03, 0xd4, 0xb9, + 0x44, 0x2f, 0xe1, 0xb1, 0xcb, 0x65, 0x11, 0x06, 0xb6, 0x75, 0xd0, 0x6a, 0xd2, 0x71, 0x05, 0xb9, + 0x6a, 0x77, 0x5a, 0xbd, 0x76, 0xf4, 0x57, 0x00, 0x5b, 0x4e, 0x83, 0x9e, 0x03, 0xf0, 0x9c, 0xd0, + 0x2c, 0xd3, 0xac, 0x28, 0x6c, 0x48, 0x3b, 0x78, 0x9b, 0xe7, 0xbf, 0x38, 0x41, 0xb9, 0x6c, 0x4a, + 0xdb, 0xd6, 0xdf, 0x4d, 0x6c, 0xcf, 0x6b, 0xb6, 0x4a, 0x6b, 0xcd, 0x56, 0x41, 0xd0, 0xb6, 0x7d, + 0xdd, 0x3e, 0x0e, 0x4e, 0x3a, 0xd8, 0x9e, 0x5d, 0x7f, 0x9e, 0x25, 0xb0, 0xd3, 0x48, 0xb8, 0x46, + 0x18, 0xba, 0xfe, 0x5c, 0x8a, 0xd1, 0x61, 0x33, 0x8e, 0xd5, 0x3d, 0xd8, 0x3f, 0x5a, 0xab, 0x77, + 0x95, 0x3b, 0x09, 0xbe, 0x0d, 0x86, 0xef, 0xe0, 0x13, 0xae, 0x1a, 0xc0, 0xe1, 0x5e, 0xd3, 0xe4, + 0x75, 0x99, 0xf6, 0xeb, 0xe0, 0xfd, 0xa9, 0x2f, 0xc3, 0x44, 0x09, 0x2a, 0x27, 0xb1, 0xd2, 0x93, + 0x81, 0xfd, 0x65, 0x55, 0x35, 0xb7, 0x37, 0x91, 0xd8, 0x0f, 0x11, 0x09, 0x99, 0x9f, 0x26, 0x5b, + 0xb6, 0x64, 0xdf, 0xfd, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x09, 0x7b, 0x39, 0x1e, 0xdc, 0x06, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// LoadBalancerClient is the client API for LoadBalancer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LoadBalancerClient interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) +} + +type loadBalancerClient struct { + cc *grpc.ClientConn +} + +func NewLoadBalancerClient(cc *grpc.ClientConn) LoadBalancerClient { + return &loadBalancerClient{cc} +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { + stream, err := c.cc.NewStream(ctx, &_LoadBalancer_serviceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &loadBalancerBalanceLoadClient{stream} + return x, nil +} + +type LoadBalancer_BalanceLoadClient interface { + Send(*LoadBalanceRequest) error + Recv() (*LoadBalanceResponse, error) + grpc.ClientStream +} + +type loadBalancerBalanceLoadClient struct { + grpc.ClientStream +} + +func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { + m := new(LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// LoadBalancerServer is the server API for LoadBalancer service. +type LoadBalancerServer interface { + // Bidirectional rpc to get a list of servers. + BalanceLoad(LoadBalancer_BalanceLoadServer) error +} + +// UnimplementedLoadBalancerServer can be embedded to have forward compatible implementations. +type UnimplementedLoadBalancerServer struct { +} + +func (*UnimplementedLoadBalancerServer) BalanceLoad(srv LoadBalancer_BalanceLoadServer) error { + return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") +} + +func RegisterLoadBalancerServer(s *grpc.Server, srv LoadBalancerServer) { + s.RegisterService(&_LoadBalancer_serviceDesc, srv) +} + +func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) +} + +type LoadBalancer_BalanceLoadServer interface { + Send(*LoadBalanceResponse) error + Recv() (*LoadBalanceRequest, error) + grpc.ServerStream +} + +type loadBalancerBalanceLoadServer struct { + grpc.ServerStream +} + +func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { + m := new(LoadBalanceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _LoadBalancer_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.lb.v1.LoadBalancer", + HandlerType: (*LoadBalancerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "BalanceLoad", + Handler: _LoadBalancer_BalanceLoad_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/lb/v1/load_balancer.proto", +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go new file mode 100644 index 0000000000..219ca7235b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -0,0 +1,488 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +// Package grpclb defines a grpclb balancer. +// +// To install grpclb balancer, import this package as: +// import _ "google.golang.org/grpc/balancer/grpclb" +package grpclb + +import ( + "context" + "errors" + "sync" + "time" + + durationpb "github.com/golang/protobuf/ptypes/duration" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +const ( + lbTokenKey = "lb-token" + defaultFallbackTimeout = 10 * time.Second + grpclbName = "grpclb" +) + +var errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection") + +func convertDuration(d *durationpb.Duration) time.Duration { + if d == nil { + return 0 + } + return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond +} + +// Client API for LoadBalancer service. +// Mostly copied from generated pb.go file. +// To avoid circular dependency. +type loadBalancerClient struct { + cc *grpc.ClientConn +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (*balanceLoadClientStream, error) { + desc := &grpc.StreamDesc{ + StreamName: "BalanceLoad", + ServerStreams: true, + ClientStreams: true, + } + stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &balanceLoadClientStream{stream} + return x, nil +} + +type balanceLoadClientStream struct { + grpc.ClientStream +} + +func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { + m := new(lbpb.LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func init() { + balancer.Register(newLBBuilder()) + dns.EnableSRVLookups = true +} + +// newLBBuilder creates a builder for grpclb. +func newLBBuilder() balancer.Builder { + return newLBBuilderWithFallbackTimeout(defaultFallbackTimeout) +} + +// newLBBuilderWithFallbackTimeout creates a grpclb builder with the given +// fallbackTimeout. If no response is received from the remote balancer within +// fallbackTimeout, the backend addresses from the resolved address list will be +// used. +// +// Only call this function when a non-default fallback timeout is needed. +func newLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder { + return &lbBuilder{ + fallbackTimeout: fallbackTimeout, + } +} + +type lbBuilder struct { + fallbackTimeout time.Duration +} + +func (b *lbBuilder) Name() string { + return grpclbName +} + +func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + // This generates a manual resolver builder with a fixed scheme. This + // scheme will be used to dial to remote LB, so we can send filtered + // address updates to remote LB ClientConn using this manual resolver. + r := &lbManualResolver{scheme: "grpclb-internal", ccb: cc} + + lb := &lbBalancer{ + cc: newLBCacheClientConn(cc), + target: opt.Target.Endpoint, + opt: opt, + fallbackTimeout: b.fallbackTimeout, + doneCh: make(chan struct{}), + + manualResolver: r, + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, + clientStats: newRPCStats(), + backoff: backoff.DefaultExponential, // TODO: make backoff configurable. + } + + var err error + if opt.CredsBundle != nil { + lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) + if err != nil { + grpclog.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) + } + lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) + if err != nil { + grpclog.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) + } + } + + return lb +} + +var _ balancer.V2Balancer = (*lbBalancer)(nil) // Assert that we implement V2Balancer + +type lbBalancer struct { + cc *lbCacheClientConn + target string + opt balancer.BuildOptions + + usePickFirst bool + + // grpclbClientConnCreds is the creds bundle to be used to connect to grpclb + // servers. If it's nil, use the TransportCredentials from BuildOptions + // instead. + grpclbClientConnCreds credentials.Bundle + // grpclbBackendCreds is the creds bundle to be used for addresses that are + // returned by grpclb server. If it's nil, don't set anything when creating + // SubConns. + grpclbBackendCreds credentials.Bundle + + fallbackTimeout time.Duration + doneCh chan struct{} + + // manualResolver is used in the remote LB ClientConn inside grpclb. When + // resolved address updates are received by grpclb, filtered updates will be + // send to remote LB ClientConn through this resolver. + manualResolver *lbManualResolver + // The ClientConn to talk to the remote balancer. + ccRemoteLB *remoteBalancerCCWrapper + // backoff for calling remote balancer. + backoff backoff.Strategy + + // Support client side load reporting. Each picker gets a reference to this, + // and will update its content. + clientStats *rpcStats + + mu sync.Mutex // guards everything following. + // The full server list including drops, used to check if the newly received + // serverList contains anything new. Each generate picker will also have + // reference to this list to do the first layer pick. + fullServerList []*lbpb.Server + // Backend addresses. It's kept so the addresses are available when + // switching between round_robin and pickfirst. + backendAddrs []resolver.Address + // All backends addresses, with metadata set to nil. This list contains all + // backend addresses in the same order and with the same duplicates as in + // serverlist. When generating picker, a SubConn slice with the same order + // but with only READY SCs will be gerenated. + backendAddrsWithoutMetadata []resolver.Address + // Roundrobin functionalities. + state connectivity.State + subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. + scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. + picker balancer.V2Picker + // Support fallback to resolved backend addresses if there's no response + // from remote balancer within fallbackTimeout. + remoteBalancerConnected bool + serverListReceived bool + inFallback bool + // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set + // when resolved address updates are received, and read in the goroutine + // handling fallback. + resolvedBackendAddrs []resolver.Address +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker from +// it. The picker +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - does two layer roundrobin pick otherwise. +// Caller must hold lb.mu. +func (lb *lbBalancer) regeneratePicker(resetDrop bool) { + if lb.state == connectivity.TransientFailure { + lb.picker = &errPicker{err: balancer.ErrTransientFailure} + return + } + + if lb.state == connectivity.Connecting { + lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + return + } + + var readySCs []balancer.SubConn + if lb.usePickFirst { + for _, sc := range lb.subConns { + readySCs = append(readySCs, sc) + break + } + } else { + for _, a := range lb.backendAddrsWithoutMetadata { + if sc, ok := lb.subConns[a]; ok { + if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready { + readySCs = append(readySCs, sc) + } + } + } + } + + if len(readySCs) <= 0 { + // If there's no ready SubConns, always re-pick. This is to avoid drops + // unless at least one SubConn is ready. Otherwise we may drop more + // often than want because of drops + re-picks(which become re-drops). + // + // This doesn't seem to be necessary after the connecting check above. + // Kept for safety. + lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + return + } + if lb.inFallback { + lb.picker = newRRPicker(readySCs) + return + } + if resetDrop { + lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) + return + } + prevLBPicker, ok := lb.picker.(*lbPicker) + if !ok { + lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) + return + } + prevLBPicker.updateReadySCs(readySCs) +} + +// aggregateSubConnStats calculate the aggregated state of SubConns in +// lb.SubConns. These SubConns are subconns in use (when switching between +// fallback and grpclb). lb.scState contains states for all SubConns, including +// those in cache (SubConns are cached for 10 seconds after remove). +// +// The aggregated state is: +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { + var numConnecting uint64 + + for _, sc := range lb.subConns { + if state, ok := lb.scStates[sc]; ok { + switch state { + case connectivity.Ready: + return connectivity.Ready + case connectivity.Connecting: + numConnecting++ + } + } + } + if numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} + +func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + panic("not used") +} + +func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + s := scs.ConnectivityState + if grpclog.V(2) { + grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) + } + lb.mu.Lock() + defer lb.mu.Unlock() + + oldS, ok := lb.scStates[sc] + if !ok { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + lb.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(lb.scStates, sc) + } + // Force regenerate picker if + // - this sc became ready from not-ready + // - this sc became not-ready from ready + lb.updateStateAndPicker((oldS == connectivity.Ready) != (s == connectivity.Ready), false) + + // Enter fallback when the aggregated state is not Ready and the connection + // to remote balancer is lost. + if lb.state != connectivity.Ready { + if !lb.inFallback && !lb.remoteBalancerConnected { + // Enter fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + } +} + +// updateStateAndPicker re-calculate the aggregated state, and regenerate picker +// if overall state is changed. +// +// If forceRegeneratePicker is true, picker will be regenerated. +func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop bool) { + oldAggrState := lb.state + lb.state = lb.aggregateSubConnStates() + // Regenerate picker when one of the following happens: + // - caller wants to regenerate + // - the aggregated state changed + if forceRegeneratePicker || (lb.state != oldAggrState) { + lb.regeneratePicker(resetDrop) + } + + lb.cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) +} + +// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use +// resolved backends (backends received from resolver, not from remote balancer) +// if no connection to remote balancers was successful. +func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) { + timer := time.NewTimer(fallbackTimeout) + defer timer.Stop() + select { + case <-timer.C: + case <-lb.doneCh: + return + } + lb.mu.Lock() + if lb.inFallback || lb.serverListReceived { + lb.mu.Unlock() + return + } + // Enter fallback. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + lb.mu.Unlock() +} + +// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB +// clientConn. The remoteLB clientConn will handle creating/removing remoteLB +// connections. +func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + panic("not used") +} + +func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { + lb.mu.Lock() + defer lb.mu.Unlock() + + newUsePickFirst := childIsPickFirst(gc) + if lb.usePickFirst == newUsePickFirst { + return + } + if grpclog.V(2) { + grpclog.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) + } + lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) +} + +func (lb *lbBalancer) ResolverError(error) { + // Ignore resolver errors. GRPCLB is not selected unless the resolver + // works at least once. +} + +func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) + } + gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) + lb.handleServiceConfig(gc) + + addrs := ccs.ResolverState.Addresses + if len(addrs) == 0 { + // There should be at least one address, either grpclb server or + // fallback. Empty address is not valid. + return balancer.ErrBadResolverState + } + + var remoteBalancerAddrs, backendAddrs []resolver.Address + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + a.Type = resolver.Backend + remoteBalancerAddrs = append(remoteBalancerAddrs, a) + } else { + backendAddrs = append(backendAddrs, a) + } + } + + if len(remoteBalancerAddrs) == 0 { + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.close() + lb.ccRemoteLB = nil + } + } else if lb.ccRemoteLB == nil { + // First time receiving resolved addresses, create a cc to remote + // balancers. + lb.newRemoteBalancerCCWrapper() + // Start the fallback goroutine. + go lb.fallbackToBackendsAfter(lb.fallbackTimeout) + } + + if lb.ccRemoteLB != nil { + // cc to remote balancers uses lb.manualResolver. Send the updated remote + // balancer addresses to it through manualResolver. + lb.manualResolver.UpdateState(resolver.State{Addresses: remoteBalancerAddrs}) + } + + lb.mu.Lock() + lb.resolvedBackendAddrs = backendAddrs + if len(remoteBalancerAddrs) == 0 || lb.inFallback { + // If there's no remote balancer address in ClientConn update, grpclb + // enters fallback mode immediately. + // + // If a new update is received while grpclb is in fallback, update the + // list of backends being used to the new fallback backends. + lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) + } + lb.mu.Unlock() + return nil +} + +func (lb *lbBalancer) Close() { + select { + case <-lb.doneCh: + return + default: + } + close(lb.doneCh) + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.close() + } + lb.cc.close() +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go new file mode 100644 index 0000000000..aac3719631 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "encoding/json" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/serviceconfig" +) + +const ( + roundRobinName = roundrobin.Name + pickFirstName = grpc.PickFirstBalancerName +) + +type grpclbServiceConfig struct { + serviceconfig.LoadBalancingConfig + ChildPolicy *[]map[string]json.RawMessage +} + +func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + ret := &grpclbServiceConfig{} + if err := json.Unmarshal(lbConfig, ret); err != nil { + return nil, err + } + return ret, nil +} + +func childIsPickFirst(sc *grpclbServiceConfig) bool { + if sc == nil { + return false + } + childConfigs := sc.ChildPolicy + if childConfigs == nil { + return false + } + for _, childC := range *childConfigs { + // If round_robin exists before pick_first, return false + if _, ok := childC[roundRobinName]; ok { + return false + } + // If pick_first is before round_robin, return true + if _, ok := childC[pickFirstName]; ok { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go new file mode 100644 index 0000000000..39bc5cc71e --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -0,0 +1,202 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/status" +) + +// rpcStats is same as lbpb.ClientStats, except that numCallsDropped is a map +// instead of a slice. +type rpcStats struct { + // Only access the following fields atomically. + numCallsStarted int64 + numCallsFinished int64 + numCallsFinishedWithClientFailedToSend int64 + numCallsFinishedKnownReceived int64 + + mu sync.Mutex + // map load_balance_token -> num_calls_dropped + numCallsDropped map[string]int64 +} + +func newRPCStats() *rpcStats { + return &rpcStats{ + numCallsDropped: make(map[string]int64), + } +} + +func isZeroStats(stats *lbpb.ClientStats) bool { + return len(stats.CallsFinishedWithDrop) == 0 && + stats.NumCallsStarted == 0 && + stats.NumCallsFinished == 0 && + stats.NumCallsFinishedWithClientFailedToSend == 0 && + stats.NumCallsFinishedKnownReceived == 0 +} + +// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats. +func (s *rpcStats) toClientStats() *lbpb.ClientStats { + stats := &lbpb.ClientStats{ + NumCallsStarted: atomic.SwapInt64(&s.numCallsStarted, 0), + NumCallsFinished: atomic.SwapInt64(&s.numCallsFinished, 0), + NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.numCallsFinishedWithClientFailedToSend, 0), + NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.numCallsFinishedKnownReceived, 0), + } + s.mu.Lock() + dropped := s.numCallsDropped + s.numCallsDropped = make(map[string]int64) + s.mu.Unlock() + for token, count := range dropped { + stats.CallsFinishedWithDrop = append(stats.CallsFinishedWithDrop, &lbpb.ClientStatsPerToken{ + LoadBalanceToken: token, + NumCalls: count, + }) + } + return stats +} + +func (s *rpcStats) drop(token string) { + atomic.AddInt64(&s.numCallsStarted, 1) + s.mu.Lock() + s.numCallsDropped[token]++ + s.mu.Unlock() + atomic.AddInt64(&s.numCallsFinished, 1) +} + +func (s *rpcStats) failedToSend() { + atomic.AddInt64(&s.numCallsStarted, 1) + atomic.AddInt64(&s.numCallsFinishedWithClientFailedToSend, 1) + atomic.AddInt64(&s.numCallsFinished, 1) +} + +func (s *rpcStats) knownReceived() { + atomic.AddInt64(&s.numCallsStarted, 1) + atomic.AddInt64(&s.numCallsFinishedKnownReceived, 1) + atomic.AddInt64(&s.numCallsFinished, 1) +} + +type errPicker struct { + // Pick always returns this err. + err error +} + +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} + +// rrPicker does roundrobin on subConns. It's typically used when there's no +// response from remote balancer, and grpclb falls back to the resolved +// backends. +// +// It guaranteed that len(subConns) > 0. +type rrPicker struct { + mu sync.Mutex + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int +} + +func newRRPicker(readySCs []balancer.SubConn) *rrPicker { + return &rrPicker{ + subConns: readySCs, + subConnsNext: grpcrand.Intn(len(readySCs)), + } +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + defer p.mu.Unlock() + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + return balancer.PickResult{SubConn: sc}, nil +} + +// lbPicker does two layers of picks: +// +// First layer: roundrobin on all servers in serverList, including drops and backends. +// - If it picks a drop, the RPC will fail as being dropped. +// - If it picks a backend, do a second layer pick to pick the real backend. +// +// Second layer: roundrobin on all READY backends. +// +// It's guaranteed that len(serverList) > 0. +type lbPicker struct { + mu sync.Mutex + serverList []*lbpb.Server + serverListNext int + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int + + stats *rpcStats +} + +func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats *rpcStats) *lbPicker { + return &lbPicker{ + serverList: serverList, + subConns: readySCs, + subConnsNext: grpcrand.Intn(len(readySCs)), + stats: stats, + } +} + +func (p *lbPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + defer p.mu.Unlock() + + // Layer one roundrobin on serverList. + s := p.serverList[p.serverListNext] + p.serverListNext = (p.serverListNext + 1) % len(p.serverList) + + // If it's a drop, return an error and fail the RPC. + if s.Drop { + p.stats.drop(s.LoadBalanceToken) + return balancer.PickResult{}, status.Errorf(codes.Unavailable, "request dropped by grpclb") + } + + // If not a drop but there's no ready subConns. + if len(p.subConns) <= 0 { + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + + // Return the next ready subConn in the list, also collect rpc stats. + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + done := func(info balancer.DoneInfo) { + if !info.BytesSent { + p.stats.failedToSend() + } else if info.BytesReceived { + p.stats.knownReceived() + } + } + return balancer.PickResult{SubConn: sc, Done: done}, nil +} + +func (p *lbPicker) updateReadySCs(readySCs []balancer.SubConn) { + p.mu.Lock() + defer p.mu.Unlock() + + p.subConns = readySCs + p.subConnsNext = p.subConnsNext % len(readySCs) +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go new file mode 100644 index 0000000000..e70ce75007 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -0,0 +1,407 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "context" + "fmt" + "io" + "net" + "sync" + "time" + + "github.com/golang/protobuf/proto" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// processServerList updates balaner's internal state, create/remove SubConns +// and regenerates picker using the received serverList. +func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: processing server list: %+v", l) + } + lb.mu.Lock() + defer lb.mu.Unlock() + + // Set serverListReceived to true so fallback will not take effect if it has + // not hit timeout. + lb.serverListReceived = true + + // If the new server list == old server list, do nothing. + if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) { + if grpclog.V(2) { + grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring") + } + return + } + lb.fullServerList = l.Servers + + var backendAddrs []resolver.Address + for i, s := range l.Servers { + if s.Drop { + continue + } + + md := metadata.Pairs(lbTokenKey, s.LoadBalanceToken) + ip := net.IP(s.IpAddress) + ipStr := ip.String() + if ip.To4() == nil { + // Add square brackets to ipv6 addresses, otherwise net.Dial() and + // net.SplitHostPort() will return too many colons error. + ipStr = fmt.Sprintf("[%s]", ipStr) + } + addr := resolver.Address{ + Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), + Metadata: &md, + } + if grpclog.V(2) { + grpclog.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", + i, ipStr, s.Port, s.LoadBalanceToken) + } + backendAddrs = append(backendAddrs, addr) + } + + // Call refreshSubConns to create/remove SubConns. If we are in fallback, + // this is also exiting fallback. + lb.refreshSubConns(backendAddrs, false, lb.usePickFirst) +} + +// refreshSubConns creates/removes SubConns with backendAddrs, and refreshes +// balancer state and picker. +// +// Caller must hold lb.mu. +func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback bool, pickFirst bool) { + opts := balancer.NewSubConnOptions{} + if !fallback { + opts.CredsBundle = lb.grpclbBackendCreds + } + + lb.backendAddrs = backendAddrs + lb.backendAddrsWithoutMetadata = nil + + fallbackModeChanged := lb.inFallback != fallback + lb.inFallback = fallback + if fallbackModeChanged && lb.inFallback { + // Clear previous received list when entering fallback, so if the server + // comes back and sends the same list again, the new addresses will be + // used. + lb.fullServerList = nil + } + + balancingPolicyChanged := lb.usePickFirst != pickFirst + oldUsePickFirst := lb.usePickFirst + lb.usePickFirst = pickFirst + + if fallbackModeChanged || balancingPolicyChanged { + // Remove all SubConns when switching balancing policy or switching + // fallback mode. + // + // For fallback mode switching with pickfirst, we want to recreate the + // SubConn because the creds could be different. + for a, sc := range lb.subConns { + if oldUsePickFirst { + // If old SubConn were created for pickfirst, bypass cache and + // remove directly. + lb.cc.cc.RemoveSubConn(sc) + } else { + lb.cc.RemoveSubConn(sc) + } + delete(lb.subConns, a) + } + } + + if lb.usePickFirst { + var sc balancer.SubConn + for _, sc = range lb.subConns { + break + } + if sc != nil { + sc.UpdateAddresses(backendAddrs) + sc.Connect() + return + } + // This bypasses the cc wrapper with SubConn cache. + sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) + if err != nil { + grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) + return + } + sc.Connect() + lb.subConns[backendAddrs[0]] = sc + lb.scStates[sc] = connectivity.Idle + return + } + + // addrsSet is the set converted from backendAddrsWithoutMetadata, it's used to quick + // lookup for an address. + addrsSet := make(map[resolver.Address]struct{}) + // Create new SubConns. + for _, addr := range backendAddrs { + addrWithoutMD := addr + addrWithoutMD.Metadata = nil + addrsSet[addrWithoutMD] = struct{}{} + lb.backendAddrsWithoutMetadata = append(lb.backendAddrsWithoutMetadata, addrWithoutMD) + + if _, ok := lb.subConns[addrWithoutMD]; !ok { + // Use addrWithMD to create the SubConn. + sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) + if err != nil { + grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) + continue + } + lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map. + if _, ok := lb.scStates[sc]; !ok { + // Only set state of new sc to IDLE. The state could already be + // READY for cached SubConns. + lb.scStates[sc] = connectivity.Idle + } + sc.Connect() + } + } + + for a, sc := range lb.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + lb.cc.RemoveSubConn(sc) + delete(lb.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } + + // Regenerate and update picker after refreshing subconns because with + // cache, even if SubConn was newed/removed, there might be no state + // changes (the subconn will be kept in cache, not actually + // newed/removed). + lb.updateStateAndPicker(true, true) +} + +type remoteBalancerCCWrapper struct { + cc *grpc.ClientConn + lb *lbBalancer + backoff backoff.Strategy + done chan struct{} + + // waitgroup to wait for all goroutines to exit. + wg sync.WaitGroup +} + +func (lb *lbBalancer) newRemoteBalancerCCWrapper() { + var dopts []grpc.DialOption + if creds := lb.opt.DialCreds; creds != nil { + dopts = append(dopts, grpc.WithTransportCredentials(creds)) + } else if bundle := lb.grpclbClientConnCreds; bundle != nil { + dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) + } else { + dopts = append(dopts, grpc.WithInsecure()) + } + if lb.opt.Dialer != nil { + dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) + } + // Explicitly set pickfirst as the balancer. + dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) + dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) + if channelz.IsOn() { + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) + } + + // Enable Keepalive for grpclb client. + dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 20 * time.Second, + Timeout: 10 * time.Second, + PermitWithoutStream: true, + })) + + // The dial target is not important. + // + // The grpclb server addresses will set field ServerName, and creds will + // receive ServerName as authority. + cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...) + if err != nil { + grpclog.Fatalf("failed to dial: %v", err) + } + ccw := &remoteBalancerCCWrapper{ + cc: cc, + lb: lb, + backoff: lb.backoff, + done: make(chan struct{}), + } + lb.ccRemoteLB = ccw + ccw.wg.Add(1) + go ccw.watchRemoteBalancer() +} + +// close closed the ClientConn to remote balancer, and waits until all +// goroutines to finish. +func (ccw *remoteBalancerCCWrapper) close() { + close(ccw.done) + ccw.cc.Close() + ccw.wg.Wait() +} + +func (ccw *remoteBalancerCCWrapper) readServerList(s *balanceLoadClientStream) error { + for { + reply, err := s.Recv() + if err != nil { + if err == io.EOF { + return errServerTerminatedConnection + } + return fmt.Errorf("grpclb: failed to recv server list: %v", err) + } + if serverList := reply.GetServerList(); serverList != nil { + ccw.lb.processServerList(serverList) + } + } +} + +func (ccw *remoteBalancerCCWrapper) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + lastZero := false + for { + select { + case <-ticker.C: + case <-s.Context().Done(): + return + } + stats := ccw.lb.clientStats.toClientStats() + zero := isZeroStats(stats) + if zero && lastZero { + // Quash redundant empty load reports. + continue + } + lastZero = zero + t := time.Now() + stats.Timestamp = ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := s.Send(&lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ + ClientStats: stats, + }, + }); err != nil { + return + } + } +} + +func (ccw *remoteBalancerCCWrapper) callRemoteBalancer() (backoff bool, _ error) { + lbClient := &loadBalancerClient{cc: ccw.cc} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) + if err != nil { + return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) + } + ccw.lb.mu.Lock() + ccw.lb.remoteBalancerConnected = true + ccw.lb.mu.Unlock() + + // grpclb handshake on the stream. + initReq := &lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbpb.InitialLoadBalanceRequest{ + Name: ccw.lb.target, + }, + }, + } + if err := stream.Send(initReq); err != nil { + return true, fmt.Errorf("grpclb: failed to send init request: %v", err) + } + reply, err := stream.Recv() + if err != nil { + return true, fmt.Errorf("grpclb: failed to recv init response: %v", err) + } + initResp := reply.GetInitialResponse() + if initResp == nil { + return true, fmt.Errorf("grpclb: reply from remote balancer did not include initial response") + } + if initResp.LoadBalancerDelegate != "" { + return true, fmt.Errorf("grpclb: Delegation is not supported") + } + + ccw.wg.Add(1) + go func() { + defer ccw.wg.Done() + if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { + ccw.sendLoadReport(stream, d) + } + }() + // No backoff if init req/resp handshake was successful. + return false, ccw.readServerList(stream) +} + +func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { + defer ccw.wg.Done() + var retryCount int + for { + doBackoff, err := ccw.callRemoteBalancer() + select { + case <-ccw.done: + return + default: + if err != nil { + if err == errServerTerminatedConnection { + grpclog.Info(err) + } else { + grpclog.Warning(err) + } + } + } + // Trigger a re-resolve when the stream errors. + ccw.lb.cc.cc.ResolveNow(resolver.ResolveNowOptions{}) + + ccw.lb.mu.Lock() + ccw.lb.remoteBalancerConnected = false + ccw.lb.fullServerList = nil + // Enter fallback when connection to remote balancer is lost, and the + // aggregated state is not Ready. + if !ccw.lb.inFallback && ccw.lb.state != connectivity.Ready { + // Entering fallback. + ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst) + } + ccw.lb.mu.Unlock() + + if !doBackoff { + retryCount = 0 + continue + } + + timer := time.NewTimer(ccw.backoff.Backoff(retryCount)) // Copy backoff + select { + case <-timer.C: + case <-ccw.done: + timer.Stop() + return + } + retryCount++ + } +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go new file mode 100644 index 0000000000..636725e541 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclb + +import ( + "fmt" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// The parent ClientConn should re-resolve when grpclb loses connection to the +// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, +// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's +// ResolveNow, and eventually results in re-resolve happening in parent +// ClientConn's resolver (DNS for example). +// +// parent +// ClientConn +// +-----------------------------------------------------------------+ +// | parent +---------------------------------+ | +// | DNS ClientConn | grpclb | | +// | resolver balancerWrapper | | | +// | + + | grpclb grpclb | | +// | | | | ManualResolver ClientConn | | +// | | | | + + | | +// | | | | | | Transient | | +// | | | | | | Failure | | +// | | | | | <--------- | | | +// | | | <--------------- | ResolveNow | | | +// | | <--------- | ResolveNow | | | | | +// | | ResolveNow | | | | | | +// | | | | | | | | +// | + + | + + | | +// | +---------------------------------+ | +// +-----------------------------------------------------------------+ + +// lbManualResolver is used by the ClientConn inside grpclb. It's a manual +// resolver with a special ResolveNow() function. +// +// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, +// so when grpclb client lose contact with remote balancers, the parent +// ClientConn's resolver will re-resolve. +type lbManualResolver struct { + scheme string + ccr resolver.ClientConn + + ccb balancer.ClientConn +} + +func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + r.ccr = cc + return r, nil +} + +func (r *lbManualResolver) Scheme() string { + return r.scheme +} + +// ResolveNow calls resolveNow on the parent ClientConn. +func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOptions) { + r.ccb.ResolveNow(o) +} + +// Close is a noop for Resolver. +func (*lbManualResolver) Close() {} + +// UpdateState calls cc.UpdateState. +func (r *lbManualResolver) UpdateState(s resolver.State) { + r.ccr.UpdateState(s) +} + +const subConnCacheTime = time.Second * 10 + +// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. +// SubConns will be kept in cache for subConnCacheTime before being removed. +// +// Its new and remove methods are updated to do cache first. +type lbCacheClientConn struct { + cc balancer.ClientConn + timeout time.Duration + + mu sync.Mutex + // subConnCache only keeps subConns that are being deleted. + subConnCache map[resolver.Address]*subConnCacheEntry + subConnToAddr map[balancer.SubConn]resolver.Address +} + +type subConnCacheEntry struct { + sc balancer.SubConn + + cancel func() + abortDeleting bool +} + +func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { + return &lbCacheClientConn{ + cc: cc, + timeout: subConnCacheTime, + subConnCache: make(map[resolver.Address]*subConnCacheEntry), + subConnToAddr: make(map[balancer.SubConn]resolver.Address), + } +} + +func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) != 1 { + return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs)) + } + addrWithoutMD := addrs[0] + addrWithoutMD.Metadata = nil + + ccc.mu.Lock() + defer ccc.mu.Unlock() + if entry, ok := ccc.subConnCache[addrWithoutMD]; ok { + // If entry is in subConnCache, the SubConn was being deleted. + // cancel function will never be nil. + entry.cancel() + delete(ccc.subConnCache, addrWithoutMD) + return entry.sc, nil + } + + scNew, err := ccc.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + + ccc.subConnToAddr[scNew] = addrWithoutMD + return scNew, nil +} + +func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { + ccc.mu.Lock() + defer ccc.mu.Unlock() + addr, ok := ccc.subConnToAddr[sc] + if !ok { + return + } + + if entry, ok := ccc.subConnCache[addr]; ok { + if entry.sc != sc { + // This could happen if NewSubConn was called multiple times for the + // same address, and those SubConns are all removed. We remove sc + // immediately here. + delete(ccc.subConnToAddr, sc) + ccc.cc.RemoveSubConn(sc) + } + return + } + + entry := &subConnCacheEntry{ + sc: sc, + } + ccc.subConnCache[addr] = entry + + timer := time.AfterFunc(ccc.timeout, func() { + ccc.mu.Lock() + defer ccc.mu.Unlock() + if entry.abortDeleting { + return + } + ccc.cc.RemoveSubConn(sc) + delete(ccc.subConnToAddr, sc) + delete(ccc.subConnCache, addr) + }) + entry.cancel = func() { + if !timer.Stop() { + // If stop was not successful, the timer has fired (this can only + // happen in a race). But the deleting function is blocked on ccc.mu + // because the mutex was held by the caller of this function. + // + // Set abortDeleting to true to abort the deleting function. When + // the lock is released, the deleting function will acquire the + // lock, check the value of abortDeleting and return. + entry.abortDeleting = true + } + } +} + +func (ccc *lbCacheClientConn) UpdateState(s balancer.State) { + ccc.cc.UpdateState(s) +} + +func (ccc *lbCacheClientConn) close() { + ccc.mu.Lock() + // Only cancel all existing timers. There's no need to remove SubConns. + for _, entry := range ccc.subConnCache { + entry.cancel() + } + ccc.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/alts.go b/vendor/google.golang.org/grpc/credentials/alts/alts.go new file mode 100644 index 0000000000..72c7f0b23f --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/alts.go @@ -0,0 +1,330 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package alts implements the ALTS credential support by gRPC library, which +// encapsulates all the state needed by a client to authenticate with a server +// using ALTS and make various assertions, e.g., about the client's identity, +// role, or whether it is authorized to make a particular call. +// This package is experimental. +package alts + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + "time" + + "google.golang.org/grpc/credentials" + core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/credentials/alts/internal/handshaker" + "google.golang.org/grpc/credentials/alts/internal/handshaker/service" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/grpclog" +) + +const ( + // hypervisorHandshakerServiceAddress represents the default ALTS gRPC + // handshaker service address in the hypervisor. + hypervisorHandshakerServiceAddress = "metadata.google.internal:8080" + // defaultTimeout specifies the server handshake timeout. + defaultTimeout = 30.0 * time.Second + // The following constants specify the minimum and maximum acceptable + // protocol versions. + protocolVersionMaxMajor = 2 + protocolVersionMaxMinor = 1 + protocolVersionMinMajor = 2 + protocolVersionMinMinor = 1 +) + +var ( + once sync.Once + maxRPCVersion = &altspb.RpcProtocolVersions_Version{ + Major: protocolVersionMaxMajor, + Minor: protocolVersionMaxMinor, + } + minRPCVersion = &altspb.RpcProtocolVersions_Version{ + Major: protocolVersionMinMajor, + Minor: protocolVersionMinMinor, + } + // ErrUntrustedPlatform is returned from ClientHandshake and + // ServerHandshake is running on a platform where the trustworthiness of + // the handshaker service is not guaranteed. + ErrUntrustedPlatform = errors.New("ALTS: untrusted platform. ALTS is only supported on GCP") +) + +// AuthInfo exposes security information from the ALTS handshake to the +// application. This interface is to be implemented by ALTS. Users should not +// need a brand new implementation of this interface. For situations like +// testing, any new implementation should embed this interface. This allows +// ALTS to add new methods to this interface. +type AuthInfo interface { + // ApplicationProtocol returns application protocol negotiated for the + // ALTS connection. + ApplicationProtocol() string + // RecordProtocol returns the record protocol negotiated for the ALTS + // connection. + RecordProtocol() string + // SecurityLevel returns the security level of the created ALTS secure + // channel. + SecurityLevel() altspb.SecurityLevel + // PeerServiceAccount returns the peer service account. + PeerServiceAccount() string + // LocalServiceAccount returns the local service account. + LocalServiceAccount() string + // PeerRPCVersions returns the RPC version supported by the peer. + PeerRPCVersions() *altspb.RpcProtocolVersions +} + +// ClientOptions contains the client-side options of an ALTS channel. These +// options will be passed to the underlying ALTS handshaker. +type ClientOptions struct { + // TargetServiceAccounts contains a list of expected target service + // accounts. + TargetServiceAccounts []string + // HandshakerServiceAddress represents the ALTS handshaker gRPC service + // address to connect to. + HandshakerServiceAddress string +} + +// DefaultClientOptions creates a new ClientOptions object with the default +// values. +func DefaultClientOptions() *ClientOptions { + return &ClientOptions{ + HandshakerServiceAddress: hypervisorHandshakerServiceAddress, + } +} + +// ServerOptions contains the server-side options of an ALTS channel. These +// options will be passed to the underlying ALTS handshaker. +type ServerOptions struct { + // HandshakerServiceAddress represents the ALTS handshaker gRPC service + // address to connect to. + HandshakerServiceAddress string +} + +// DefaultServerOptions creates a new ServerOptions object with the default +// values. +func DefaultServerOptions() *ServerOptions { + return &ServerOptions{ + HandshakerServiceAddress: hypervisorHandshakerServiceAddress, + } +} + +// altsTC is the credentials required for authenticating a connection using ALTS. +// It implements credentials.TransportCredentials interface. +type altsTC struct { + info *credentials.ProtocolInfo + side core.Side + accounts []string + hsAddress string +} + +// NewClientCreds constructs a client-side ALTS TransportCredentials object. +func NewClientCreds(opts *ClientOptions) credentials.TransportCredentials { + return newALTS(core.ClientSide, opts.TargetServiceAccounts, opts.HandshakerServiceAddress) +} + +// NewServerCreds constructs a server-side ALTS TransportCredentials object. +func NewServerCreds(opts *ServerOptions) credentials.TransportCredentials { + return newALTS(core.ServerSide, nil, opts.HandshakerServiceAddress) +} + +func newALTS(side core.Side, accounts []string, hsAddress string) credentials.TransportCredentials { + once.Do(func() { + vmOnGCP = isRunningOnGCP() + }) + + if hsAddress == "" { + hsAddress = hypervisorHandshakerServiceAddress + } + return &altsTC{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: "alts", + SecurityVersion: "1.0", + }, + side: side, + accounts: accounts, + hsAddress: hsAddress, + } +} + +// ClientHandshake implements the client side handshake protocol. +func (g *altsTC) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + if !vmOnGCP { + return nil, nil, ErrUntrustedPlatform + } + + // Connecting to ALTS handshaker service. + hsConn, err := service.Dial(g.hsAddress) + if err != nil { + return nil, nil, err + } + // Do not close hsConn since it is shared with other handshakes. + + // Possible context leak: + // The cancel function for the child context we create will only be + // called a non-nil error is returned. + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + opts := handshaker.DefaultClientHandshakerOptions() + opts.TargetName = addr + opts.TargetServiceAccounts = g.accounts + opts.RPCVersions = &altspb.RpcProtocolVersions{ + MaxRpcVersion: maxRPCVersion, + MinRpcVersion: minRPCVersion, + } + chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + chs.Close() + } + }() + secConn, authInfo, err := chs.ClientHandshake(ctx) + if err != nil { + return nil, nil, err + } + altsAuthInfo, ok := authInfo.(AuthInfo) + if !ok { + return nil, nil, errors.New("client-side auth info is not of type alts.AuthInfo") + } + match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + if !match { + return nil, nil, fmt.Errorf("server-side RPC versions are not compatible with this client, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + } + return secConn, authInfo, nil +} + +// ServerHandshake implements the server side ALTS handshaker. +func (g *altsTC) ServerHandshake(rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { + if !vmOnGCP { + return nil, nil, ErrUntrustedPlatform + } + // Connecting to ALTS handshaker service. + hsConn, err := service.Dial(g.hsAddress) + if err != nil { + return nil, nil, err + } + // Do not close hsConn since it's shared with other handshakes. + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + opts := handshaker.DefaultServerHandshakerOptions() + opts.RPCVersions = &altspb.RpcProtocolVersions{ + MaxRpcVersion: maxRPCVersion, + MinRpcVersion: minRPCVersion, + } + shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err != nil { + shs.Close() + } + }() + secConn, authInfo, err := shs.ServerHandshake(ctx) + if err != nil { + return nil, nil, err + } + altsAuthInfo, ok := authInfo.(AuthInfo) + if !ok { + return nil, nil, errors.New("server-side auth info is not of type alts.AuthInfo") + } + match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + if !match { + return nil, nil, fmt.Errorf("client-side RPC versions is not compatible with this server, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) + } + return secConn, authInfo, nil +} + +func (g *altsTC) Info() credentials.ProtocolInfo { + return *g.info +} + +func (g *altsTC) Clone() credentials.TransportCredentials { + info := *g.info + var accounts []string + if g.accounts != nil { + accounts = make([]string, len(g.accounts)) + copy(accounts, g.accounts) + } + return &altsTC{ + info: &info, + side: g.side, + hsAddress: g.hsAddress, + accounts: accounts, + } +} + +func (g *altsTC) OverrideServerName(serverNameOverride string) error { + g.info.ServerName = serverNameOverride + return nil +} + +// compareRPCVersion returns 0 if v1 == v2, 1 if v1 > v2 and -1 if v1 < v2. +func compareRPCVersions(v1, v2 *altspb.RpcProtocolVersions_Version) int { + switch { + case v1.GetMajor() > v2.GetMajor(), + v1.GetMajor() == v2.GetMajor() && v1.GetMinor() > v2.GetMinor(): + return 1 + case v1.GetMajor() < v2.GetMajor(), + v1.GetMajor() == v2.GetMajor() && v1.GetMinor() < v2.GetMinor(): + return -1 + } + return 0 +} + +// checkRPCVersions performs a version check between local and peer rpc protocol +// versions. This function returns true if the check passes which means both +// parties agreed on a common rpc protocol to use, and false otherwise. The +// function also returns the highest common RPC protocol version both parties +// agreed on. +func checkRPCVersions(local, peer *altspb.RpcProtocolVersions) (bool, *altspb.RpcProtocolVersions_Version) { + if local == nil || peer == nil { + grpclog.Error("invalid checkRPCVersions argument, either local or peer is nil.") + return false, nil + } + + // maxCommonVersion is MIN(local.max, peer.max). + maxCommonVersion := local.GetMaxRpcVersion() + if compareRPCVersions(local.GetMaxRpcVersion(), peer.GetMaxRpcVersion()) > 0 { + maxCommonVersion = peer.GetMaxRpcVersion() + } + + // minCommonVersion is MAX(local.min, peer.min). + minCommonVersion := peer.GetMinRpcVersion() + if compareRPCVersions(local.GetMinRpcVersion(), peer.GetMinRpcVersion()) > 0 { + minCommonVersion = local.GetMinRpcVersion() + } + + if compareRPCVersions(maxCommonVersion, minCommonVersion) < 0 { + return false, nil + } + return true, maxCommonVersion +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go new file mode 100644 index 0000000000..9c53d6b53f --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go @@ -0,0 +1,89 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package authinfo provide authentication information returned by handshakers. +package authinfo + +import ( + "google.golang.org/grpc/credentials" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" +) + +var _ credentials.AuthInfo = (*altsAuthInfo)(nil) + +// altsAuthInfo exposes security information from the ALTS handshake to the +// application. altsAuthInfo is immutable and implements credentials.AuthInfo. +type altsAuthInfo struct { + p *altspb.AltsContext + credentials.CommonAuthInfo +} + +// New returns a new altsAuthInfo object given handshaker results. +func New(result *altspb.HandshakerResult) credentials.AuthInfo { + return newAuthInfo(result) +} + +func newAuthInfo(result *altspb.HandshakerResult) *altsAuthInfo { + return &altsAuthInfo{ + p: &altspb.AltsContext{ + ApplicationProtocol: result.GetApplicationProtocol(), + RecordProtocol: result.GetRecordProtocol(), + // TODO: assign security level from result. + SecurityLevel: altspb.SecurityLevel_INTEGRITY_AND_PRIVACY, + PeerServiceAccount: result.GetPeerIdentity().GetServiceAccount(), + LocalServiceAccount: result.GetLocalIdentity().GetServiceAccount(), + PeerRpcVersions: result.GetPeerRpcVersions(), + }, + CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, + } +} + +// AuthType identifies the context as providing ALTS authentication information. +func (s *altsAuthInfo) AuthType() string { + return "alts" +} + +// ApplicationProtocol returns the context's application protocol. +func (s *altsAuthInfo) ApplicationProtocol() string { + return s.p.GetApplicationProtocol() +} + +// RecordProtocol returns the context's record protocol. +func (s *altsAuthInfo) RecordProtocol() string { + return s.p.GetRecordProtocol() +} + +// SecurityLevel returns the context's security level. +func (s *altsAuthInfo) SecurityLevel() altspb.SecurityLevel { + return s.p.GetSecurityLevel() +} + +// PeerServiceAccount returns the context's peer service account. +func (s *altsAuthInfo) PeerServiceAccount() string { + return s.p.GetPeerServiceAccount() +} + +// LocalServiceAccount returns the context's local service account. +func (s *altsAuthInfo) LocalServiceAccount() string { + return s.p.GetLocalServiceAccount() +} + +// PeerRPCVersions returns the context's peer RPC versions. +func (s *altsAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions { + return s.p.GetPeerRpcVersions() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go new file mode 100644 index 0000000000..33fba81239 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/common.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +// Package internal contains common core functionality for ALTS. +package internal + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +const ( + // ClientSide identifies the client in this communication. + ClientSide Side = iota + // ServerSide identifies the server in this communication. + ServerSide +) + +// PeerNotRespondingError is returned when a peer server is not responding +// after a channel has been established. It is treated as a temporary connection +// error and re-connection to the server should be attempted. +var PeerNotRespondingError = &peerNotRespondingError{} + +// Side identifies the party's role: client or server. +type Side int + +type peerNotRespondingError struct{} + +// Return an error message for the purpose of logging. +func (e *peerNotRespondingError) Error() string { + return "peer server is not responding and re-connection should be attempted." +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e *peerNotRespondingError) Temporary() bool { + return true +} + +// Handshaker defines a ALTS handshaker interface. +type Handshaker interface { + // ClientHandshake starts and completes a client-side handshaking and + // returns a secure connection and corresponding auth information. + ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // ServerHandshake starts and completes a server-side handshaking and + // returns a secure connection and corresponding auth information. + ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // Close terminates the Handshaker. It should be called when the caller + // obtains the secure connection. + Close() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go new file mode 100644 index 0000000000..43726e877b --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "encoding/binary" + "fmt" + "strconv" +) + +// rekeyAEAD holds the necessary information for an AEAD based on +// AES-GCM that performs nonce-based key derivation and XORs the +// nonce with a random mask. +type rekeyAEAD struct { + kdfKey []byte + kdfCounter []byte + nonceMask []byte + nonceBuf []byte + gcmAEAD cipher.AEAD +} + +// KeySizeError signals that the given key does not have the correct size. +type KeySizeError int + +func (k KeySizeError) Error() string { + return "alts/conn: invalid key size " + strconv.Itoa(int(k)) +} + +// newRekeyAEAD creates a new instance of aes128gcm with rekeying. +// The key argument should be 44 bytes, the first 32 bytes are used as a key +// for HKDF-expand and the remainining 12 bytes are used as a random mask for +// the counter. +func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { + k := len(key) + if k != kdfKeyLen+nonceLen { + return nil, KeySizeError(k) + } + return &rekeyAEAD{ + kdfKey: key[:kdfKeyLen], + kdfCounter: make([]byte, kdfCounterLen), + nonceMask: key[kdfKeyLen:], + nonceBuf: make([]byte, nonceLen), + gcmAEAD: nil, + }, nil +} + +// Seal rekeys if nonce[2:8] is different than in the last call, masks the nonce, +// and calls Seal for aes128gcm. +func (s *rekeyAEAD) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if err := s.rekeyIfRequired(nonce); err != nil { + panic(fmt.Sprintf("Rekeying failed with: %s", err.Error())) + } + maskNonce(s.nonceBuf, nonce, s.nonceMask) + return s.gcmAEAD.Seal(dst, s.nonceBuf, plaintext, additionalData) +} + +// Open rekeys if nonce[2:8] is different than in the last call, masks the nonce, +// and calls Open for aes128gcm. +func (s *rekeyAEAD) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if err := s.rekeyIfRequired(nonce); err != nil { + return nil, err + } + maskNonce(s.nonceBuf, nonce, s.nonceMask) + return s.gcmAEAD.Open(dst, s.nonceBuf, ciphertext, additionalData) +} + +// rekeyIfRequired creates a new aes128gcm AEAD if the existing AEAD is nil +// or cannot be used with given nonce. +func (s *rekeyAEAD) rekeyIfRequired(nonce []byte) error { + newKdfCounter := nonce[kdfCounterOffset : kdfCounterOffset+kdfCounterLen] + if s.gcmAEAD != nil && bytes.Equal(newKdfCounter, s.kdfCounter) { + return nil + } + copy(s.kdfCounter, newKdfCounter) + a, err := aes.NewCipher(hkdfExpand(s.kdfKey, s.kdfCounter)) + if err != nil { + return err + } + s.gcmAEAD, err = cipher.NewGCM(a) + return err +} + +// maskNonce XORs the given nonce with the mask and stores the result in dst. +func maskNonce(dst, nonce, mask []byte) { + nonce1 := binary.LittleEndian.Uint64(nonce[:sizeUint64]) + nonce2 := binary.LittleEndian.Uint32(nonce[sizeUint64:]) + mask1 := binary.LittleEndian.Uint64(mask[:sizeUint64]) + mask2 := binary.LittleEndian.Uint32(mask[sizeUint64:]) + binary.LittleEndian.PutUint64(dst[:sizeUint64], nonce1^mask1) + binary.LittleEndian.PutUint32(dst[sizeUint64:], nonce2^mask2) +} + +// NonceSize returns the required nonce size. +func (s *rekeyAEAD) NonceSize() int { + return s.gcmAEAD.NonceSize() +} + +// Overhead returns the ciphertext overhead. +func (s *rekeyAEAD) Overhead() int { + return s.gcmAEAD.Overhead() +} + +// hkdfExpand computes the first 16 bytes of the HKDF-expand function +// defined in RFC5869. +func hkdfExpand(key, info []byte) []byte { + mac := hmac.New(sha256.New, key) + mac.Write(info) + mac.Write([]byte{0x01}[:]) + return mac.Sum(nil)[:aeadKeyLen] +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go new file mode 100644 index 0000000000..04e0adb6c9 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "crypto/aes" + "crypto/cipher" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +const ( + // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in + // each direction). + overflowLenAES128GCM = 5 +) + +// aes128gcm is the struct that holds necessary information for ALTS record. +// The counter value is NOT included in the payload during the encryption and +// decryption operations. +type aes128gcm struct { + // inCounter is used in ALTS record to check that incoming counters are + // as expected, since ALTS record guarantees that messages are unwrapped + // in the same order that the peer wrapped them. + inCounter Counter + outCounter Counter + aead cipher.AEAD +} + +// NewAES128GCM creates an instance that uses aes128gcm for ALTS record. +func NewAES128GCM(side core.Side, key []byte) (ALTSRecordCrypto, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + a, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + return &aes128gcm{ + inCounter: NewInCounter(side, overflowLenAES128GCM), + outCounter: NewOutCounter(side, overflowLenAES128GCM), + aead: a, + }, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext do not +// overlap. +func (s *aes128gcm) Encrypt(dst, plaintext []byte) ([]byte, error) { + // If we need to allocate an output buffer, we want to include space for + // GCM tag to avoid forcing ALTS record to reallocate as well. + dlen := len(dst) + dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) + seq, err := s.outCounter.Value() + if err != nil { + return nil, err + } + data := out[:len(plaintext)] + copy(data, plaintext) // data may alias plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, SliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = s.aead.Seal(dst[:dlen], seq, data, nil) + s.outCounter.Inc() + return dst, nil +} + +func (s *aes128gcm) EncryptionOverhead() int { + return GcmTagSize +} + +func (s *aes128gcm) Decrypt(dst, ciphertext []byte) ([]byte, error) { + seq, err := s.inCounter.Value() + if err != nil { + return nil, err + } + // If dst is equal to ciphertext[:0], ciphertext storage is reused. + plaintext, err := s.aead.Open(dst, seq, ciphertext, nil) + if err != nil { + return nil, ErrAuth + } + s.inCounter.Inc() + return plaintext, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go new file mode 100644 index 0000000000..6a9035ea25 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go @@ -0,0 +1,116 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "crypto/cipher" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +const ( + // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in + // each direction). + overflowLenAES128GCMRekey = 8 + nonceLen = 12 + aeadKeyLen = 16 + kdfKeyLen = 32 + kdfCounterOffset = 2 + kdfCounterLen = 6 + sizeUint64 = 8 +) + +// aes128gcmRekey is the struct that holds necessary information for ALTS record. +// The counter value is NOT included in the payload during the encryption and +// decryption operations. +type aes128gcmRekey struct { + // inCounter is used in ALTS record to check that incoming counters are + // as expected, since ALTS record guarantees that messages are unwrapped + // in the same order that the peer wrapped them. + inCounter Counter + outCounter Counter + inAEAD cipher.AEAD + outAEAD cipher.AEAD +} + +// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying +// for ALTS record. The key argument should be 44 bytes, the first 32 bytes +// are used as a key for HKDF-expand and the remainining 12 bytes are used +// as a random mask for the counter. +func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { + inCounter := NewInCounter(side, overflowLenAES128GCMRekey) + outCounter := NewOutCounter(side, overflowLenAES128GCMRekey) + inAEAD, err := newRekeyAEAD(key) + if err != nil { + return nil, err + } + outAEAD, err := newRekeyAEAD(key) + if err != nil { + return nil, err + } + return &aes128gcmRekey{ + inCounter, + outCounter, + inAEAD, + outAEAD, + }, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext do not +// overlap. +func (s *aes128gcmRekey) Encrypt(dst, plaintext []byte) ([]byte, error) { + // If we need to allocate an output buffer, we want to include space for + // GCM tag to avoid forcing ALTS record to reallocate as well. + dlen := len(dst) + dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) + seq, err := s.outCounter.Value() + if err != nil { + return nil, err + } + data := out[:len(plaintext)] + copy(data, plaintext) // data may alias plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, SliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = s.outAEAD.Seal(dst[:dlen], seq, data, nil) + s.outCounter.Inc() + return dst, nil +} + +func (s *aes128gcmRekey) EncryptionOverhead() int { + return GcmTagSize +} + +func (s *aes128gcmRekey) Decrypt(dst, ciphertext []byte) ([]byte, error) { + seq, err := s.inCounter.Value() + if err != nil { + return nil, err + } + plaintext, err := s.inAEAD.Open(dst, seq, ciphertext, nil) + if err != nil { + return nil, ErrAuth + } + s.inCounter.Inc() + return plaintext, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go new file mode 100644 index 0000000000..1795d0c9e3 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "encoding/binary" + "errors" + "fmt" +) + +const ( + // GcmTagSize is the GCM tag size is the difference in length between + // plaintext and ciphertext. From crypto/cipher/gcm.go in Go crypto + // library. + GcmTagSize = 16 +) + +// ErrAuth occurs on authentication failure. +var ErrAuth = errors.New("message authentication failed") + +// SliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func SliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return head, tail +} + +// ParseFramedMsg parse the provided buffer and returns a frame of the format +// msgLength+msg and any remaining bytes in that buffer. +func ParseFramedMsg(b []byte, maxLen uint32) ([]byte, []byte, error) { + // If the size field is not complete, return the provided buffer as + // remaining buffer. + if len(b) < MsgLenFieldSize { + return nil, b, nil + } + msgLenField := b[:MsgLenFieldSize] + length := binary.LittleEndian.Uint32(msgLenField) + if length > maxLen { + return nil, nil, fmt.Errorf("received the frame length %d larger than the limit %d", length, maxLen) + } + if len(b) < int(length)+4 { // account for the first 4 msg length bytes. + // Frame is not complete yet. + return nil, b, nil + } + return b[:MsgLenFieldSize+length], b[MsgLenFieldSize+length:], nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go new file mode 100644 index 0000000000..9f00aca0b6 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import ( + "errors" +) + +const counterLen = 12 + +var ( + errInvalidCounter = errors.New("invalid counter") +) + +// Counter is a 96-bit, little-endian counter. +type Counter struct { + value [counterLen]byte + invalid bool + overflowLen int +} + +// Value returns the current value of the counter as a byte slice. +func (c *Counter) Value() ([]byte, error) { + if c.invalid { + return nil, errInvalidCounter + } + return c.value[:], nil +} + +// Inc increments the counter and checks for overflow. +func (c *Counter) Inc() { + // If the counter is already invalid, there is no need to increase it. + if c.invalid { + return + } + i := 0 + for ; i < c.overflowLen; i++ { + c.value[i]++ + if c.value[i] != 0 { + break + } + } + if i == c.overflowLen { + c.invalid = true + } +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go new file mode 100644 index 0000000000..fd5a53d9a7 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -0,0 +1,271 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package conn contains an implementation of a secure channel created by gRPC +// handshakers. +package conn + +import ( + "encoding/binary" + "fmt" + "math" + "net" + + core "google.golang.org/grpc/credentials/alts/internal" +) + +// ALTSRecordCrypto is the interface for gRPC ALTS record protocol. +type ALTSRecordCrypto interface { + // Encrypt encrypts the plaintext and computes the tag (if any) of dst + // and plaintext, dst and plaintext do not overlap. + Encrypt(dst, plaintext []byte) ([]byte, error) + // EncryptionOverhead returns the tag size (if any) in bytes. + EncryptionOverhead() int + // Decrypt decrypts ciphertext and verify the tag (if any). dst and + // ciphertext may alias exactly or not at all. To reuse ciphertext's + // storage for the decrypted output, use ciphertext[:0] as dst. + Decrypt(dst, ciphertext []byte) ([]byte, error) +} + +// ALTSRecordFunc is a function type for factory functions that create +// ALTSRecordCrypto instances. +type ALTSRecordFunc func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) + +const ( + // MsgLenFieldSize is the byte size of the frame length field of a + // framed message. + MsgLenFieldSize = 4 + // The byte size of the message type field of a framed message. + msgTypeFieldSize = 4 + // The bytes size limit for a ALTS record message. + altsRecordLengthLimit = 1024 * 1024 // 1 MiB + // The default bytes size of a ALTS record message. + altsRecordDefaultLength = 4 * 1024 // 4KiB + // Message type value included in ALTS record framing. + altsRecordMsgType = uint32(0x06) + // The initial write buffer size. + altsWriteBufferInitialSize = 32 * 1024 // 32KiB + // The maximum write buffer size. This *must* be multiple of + // altsRecordDefaultLength. + altsWriteBufferMaxSize = 512 * 1024 // 512KiB +) + +var ( + protocols = make(map[string]ALTSRecordFunc) +) + +// RegisterProtocol register a ALTS record encryption protocol. +func RegisterProtocol(protocol string, f ALTSRecordFunc) error { + if _, ok := protocols[protocol]; ok { + return fmt.Errorf("protocol %v is already registered", protocol) + } + protocols[protocol] = f + return nil +} + +// conn represents a secured connection. It implements the net.Conn interface. +type conn struct { + net.Conn + crypto ALTSRecordCrypto + // buf holds data that has been read from the connection and decrypted, + // but has not yet been returned by Read. + buf []byte + payloadLengthLimit int + // protected holds data read from the network but have not yet been + // decrypted. This data might not compose a complete frame. + protected []byte + // writeBuf is a buffer used to contain encrypted frames before being + // written to the network. + writeBuf []byte + // nextFrame stores the next frame (in protected buffer) info. + nextFrame []byte + // overhead is the calculated overhead of each frame. + overhead int +} + +// NewConn creates a new secure channel instance given the other party role and +// handshaking result. +func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, protected []byte) (net.Conn, error) { + newCrypto := protocols[recordProtocol] + if newCrypto == nil { + return nil, fmt.Errorf("negotiated unknown next_protocol %q", recordProtocol) + } + crypto, err := newCrypto(side, key) + if err != nil { + return nil, fmt.Errorf("protocol %q: %v", recordProtocol, err) + } + overhead := MsgLenFieldSize + msgTypeFieldSize + crypto.EncryptionOverhead() + payloadLengthLimit := altsRecordDefaultLength - overhead + if protected == nil { + // We pre-allocate protected to be of size + // 2*altsRecordDefaultLength-1 during initialization. We only + // read from the network into protected when protected does not + // contain a complete frame, which is at most + // altsRecordDefaultLength-1 (bytes). And we read at most + // altsRecordDefaultLength (bytes) data into protected at one + // time. Therefore, 2*altsRecordDefaultLength-1 is large enough + // to buffer data read from the network. + protected = make([]byte, 0, 2*altsRecordDefaultLength-1) + } + + altsConn := &conn{ + Conn: c, + crypto: crypto, + payloadLengthLimit: payloadLengthLimit, + protected: protected, + writeBuf: make([]byte, altsWriteBufferInitialSize), + nextFrame: protected, + overhead: overhead, + } + return altsConn, nil +} + +// Read reads and decrypts a frame from the underlying connection, and copies the +// decrypted payload into b. If the size of the payload is greater than len(b), +// Read retains the remaining bytes in an internal buffer, and subsequent calls +// to Read will read from this buffer until it is exhausted. +func (p *conn) Read(b []byte) (n int, err error) { + if len(p.buf) == 0 { + var framedMsg []byte + framedMsg, p.nextFrame, err = ParseFramedMsg(p.nextFrame, altsRecordLengthLimit) + if err != nil { + return n, err + } + // Check whether the next frame to be decrypted has been + // completely received yet. + if len(framedMsg) == 0 { + copy(p.protected, p.nextFrame) + p.protected = p.protected[:len(p.nextFrame)] + // Always copy next incomplete frame to the beginning of + // the protected buffer and reset nextFrame to it. + p.nextFrame = p.protected + } + // Check whether a complete frame has been received yet. + for len(framedMsg) == 0 { + if len(p.protected) == cap(p.protected) { + tmp := make([]byte, len(p.protected), cap(p.protected)+altsRecordDefaultLength) + copy(tmp, p.protected) + p.protected = tmp + } + n, err = p.Conn.Read(p.protected[len(p.protected):min(cap(p.protected), len(p.protected)+altsRecordDefaultLength)]) + if err != nil { + return 0, err + } + p.protected = p.protected[:len(p.protected)+n] + framedMsg, p.nextFrame, err = ParseFramedMsg(p.protected, altsRecordLengthLimit) + if err != nil { + return 0, err + } + } + // Now we have a complete frame, decrypted it. + msg := framedMsg[MsgLenFieldSize:] + msgType := binary.LittleEndian.Uint32(msg[:msgTypeFieldSize]) + if msgType&0xff != altsRecordMsgType { + return 0, fmt.Errorf("received frame with incorrect message type %v, expected lower byte %v", + msgType, altsRecordMsgType) + } + ciphertext := msg[msgTypeFieldSize:] + + // Decrypt requires that if the dst and ciphertext alias, they + // must alias exactly. Code here used to use msg[:0], but msg + // starts MsgLenFieldSize+msgTypeFieldSize bytes earlier than + // ciphertext, so they alias inexactly. Using ciphertext[:0] + // arranges the appropriate aliasing without needing to copy + // ciphertext or use a separate destination buffer. For more info + // check: https://golang.org/pkg/crypto/cipher/#AEAD. + p.buf, err = p.crypto.Decrypt(ciphertext[:0], ciphertext) + if err != nil { + return 0, err + } + } + + n = copy(b, p.buf) + p.buf = p.buf[n:] + return n, nil +} + +// Write encrypts, frames, and writes bytes from b to the underlying connection. +func (p *conn) Write(b []byte) (n int, err error) { + n = len(b) + // Calculate the output buffer size with framing and encryption overhead. + numOfFrames := int(math.Ceil(float64(len(b)) / float64(p.payloadLengthLimit))) + size := len(b) + numOfFrames*p.overhead + // If writeBuf is too small, increase its size up to the maximum size. + partialBSize := len(b) + if size > altsWriteBufferMaxSize { + size = altsWriteBufferMaxSize + const numOfFramesInMaxWriteBuf = altsWriteBufferMaxSize / altsRecordDefaultLength + partialBSize = numOfFramesInMaxWriteBuf * p.payloadLengthLimit + } + if len(p.writeBuf) < size { + p.writeBuf = make([]byte, size) + } + + for partialBStart := 0; partialBStart < len(b); partialBStart += partialBSize { + partialBEnd := partialBStart + partialBSize + if partialBEnd > len(b) { + partialBEnd = len(b) + } + partialB := b[partialBStart:partialBEnd] + writeBufIndex := 0 + for len(partialB) > 0 { + payloadLen := len(partialB) + if payloadLen > p.payloadLengthLimit { + payloadLen = p.payloadLengthLimit + } + buf := partialB[:payloadLen] + partialB = partialB[payloadLen:] + + // Write buffer contains: length, type, payload, and tag + // if any. + + // 1. Fill in type field. + msg := p.writeBuf[writeBufIndex+MsgLenFieldSize:] + binary.LittleEndian.PutUint32(msg, altsRecordMsgType) + + // 2. Encrypt the payload and create a tag if any. + msg, err = p.crypto.Encrypt(msg[:msgTypeFieldSize], buf) + if err != nil { + return n, err + } + + // 3. Fill in the size field. + binary.LittleEndian.PutUint32(p.writeBuf[writeBufIndex:], uint32(len(msg))) + + // 4. Increase writeBufIndex. + writeBufIndex += len(buf) + p.overhead + } + nn, err := p.Conn.Write(p.writeBuf[:writeBufIndex]) + if err != nil { + // We need to calculate the actual data size that was + // written. This means we need to remove header, + // encryption overheads, and any partially-written + // frame data. + numOfWrittenFrames := int(math.Floor(float64(nn) / float64(altsRecordDefaultLength))) + return partialBStart + numOfWrittenFrames*p.payloadLengthLimit, err + } + } + return n, nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go new file mode 100644 index 0000000000..84821fa254 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conn + +import core "google.golang.org/grpc/credentials/alts/internal" + +// NewOutCounter returns an outgoing counter initialized to the starting sequence +// number for the client/server side of a connection. +func NewOutCounter(s core.Side, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + if s == core.ServerSide { + // Server counters in ALTS record have the little-endian high bit + // set. + c.value[counterLen-1] = 0x80 + } + return +} + +// NewInCounter returns an incoming counter initialized to the starting sequence +// number for the client/server side of a connection. This is used in ALTS record +// to check that incoming counters are as expected, since ALTS record guarantees +// that messages are unwrapped in the same order that the peer wrapped them. +func NewInCounter(s core.Side, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + if s == core.ClientSide { + // Server counters in ALTS record have the little-endian high bit + // set. + c.value[counterLen-1] = 0x80 + } + return +} + +// CounterFromValue creates a new counter given an initial value. +func CounterFromValue(value []byte, overflowLen int) (c Counter) { + c.overflowLen = overflowLen + copy(c.value[:], value) + return +} + +// CounterSide returns the connection side (client/server) a sequence counter is +// associated with. +func CounterSide(c []byte) core.Side { + if c[counterLen-1]&0x80 == 0x80 { + return core.ServerSide + } + return core.ClientSide +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go new file mode 100644 index 0000000000..8bc7ceee0a --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -0,0 +1,375 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package handshaker provides ALTS handshaking functionality for GCP. +package handshaker + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + + grpc "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + core "google.golang.org/grpc/credentials/alts/internal" + "google.golang.org/grpc/credentials/alts/internal/authinfo" + "google.golang.org/grpc/credentials/alts/internal/conn" + altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" +) + +const ( + // The maximum byte size of receive frames. + frameLimit = 64 * 1024 // 64 KB + rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" + // maxPendingHandshakes represents the maximum number of concurrent + // handshakes. + maxPendingHandshakes = 100 +) + +var ( + hsProtocol = altspb.HandshakeProtocol_ALTS + appProtocols = []string{"grpc"} + recordProtocols = []string{rekeyRecordProtocolName} + keyLength = map[string]int{ + rekeyRecordProtocolName: 44, + } + altsRecordFuncs = map[string]conn.ALTSRecordFunc{ + // ALTS handshaker protocols. + rekeyRecordProtocolName: func(s core.Side, keyData []byte) (conn.ALTSRecordCrypto, error) { + return conn.NewAES128GCMRekey(s, keyData) + }, + } + // control number of concurrent created (but not closed) handshakers. + mu sync.Mutex + concurrentHandshakes = int64(0) + // errDropped occurs when maxPendingHandshakes is reached. + errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") + // errOutOfBound occurs when the handshake service returns a consumed + // bytes value larger than the buffer that was passed to it originally. + errOutOfBound = errors.New("handshaker service consumed bytes value is out-of-bound") +) + +func init() { + for protocol, f := range altsRecordFuncs { + if err := conn.RegisterProtocol(protocol, f); err != nil { + panic(err) + } + } +} + +func acquire() bool { + mu.Lock() + // If we need n to be configurable, we can pass it as an argument. + n := int64(1) + success := maxPendingHandshakes-concurrentHandshakes >= n + if success { + concurrentHandshakes += n + } + mu.Unlock() + return success +} + +func release() { + mu.Lock() + // If we need n to be configurable, we can pass it as an argument. + n := int64(1) + concurrentHandshakes -= n + if concurrentHandshakes < 0 { + mu.Unlock() + panic("bad release") + } + mu.Unlock() +} + +// ClientHandshakerOptions contains the client handshaker options that can +// provided by the caller. +type ClientHandshakerOptions struct { + // ClientIdentity is the handshaker client local identity. + ClientIdentity *altspb.Identity + // TargetName is the server service account name for secure name + // checking. + TargetName string + // TargetServiceAccounts contains a list of expected target service + // accounts. One of these accounts should match one of the accounts in + // the handshaker results. Otherwise, the handshake fails. + TargetServiceAccounts []string + // RPCVersions specifies the gRPC versions accepted by the client. + RPCVersions *altspb.RpcProtocolVersions +} + +// ServerHandshakerOptions contains the server handshaker options that can +// provided by the caller. +type ServerHandshakerOptions struct { + // RPCVersions specifies the gRPC versions accepted by the server. + RPCVersions *altspb.RpcProtocolVersions +} + +// DefaultClientHandshakerOptions returns the default client handshaker options. +func DefaultClientHandshakerOptions() *ClientHandshakerOptions { + return &ClientHandshakerOptions{} +} + +// DefaultServerHandshakerOptions returns the default client handshaker options. +func DefaultServerHandshakerOptions() *ServerHandshakerOptions { + return &ServerHandshakerOptions{} +} + +// TODO: add support for future local and remote endpoint in both client options +// and server options (server options struct does not exist now. When +// caller can provide endpoints, it should be created. + +// altsHandshaker is used to complete a ALTS handshaking between client and +// server. This handshaker talks to the ALTS handshaker service in the metadata +// server. +type altsHandshaker struct { + // RPC stream used to access the ALTS Handshaker service. + stream altsgrpc.HandshakerService_DoHandshakeClient + // the connection to the peer. + conn net.Conn + // client handshake options. + clientOpts *ClientHandshakerOptions + // server handshake options. + serverOpts *ServerHandshakerOptions + // defines the side doing the handshake, client or server. + side core.Side +} + +// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC +// stub created using the passed conn and used to talk to the ALTS Handshaker +// service in the metadata server. +func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + return &altsHandshaker{ + stream: stream, + conn: c, + clientOpts: opts, + side: core.ClientSide, + }, nil +} + +// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC +// stub created using the passed conn and used to talk to the ALTS Handshaker +// service in the metadata server. +func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { + stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + return &altsHandshaker{ + stream: stream, + conn: c, + serverOpts: opts, + side: core.ServerSide, + }, nil +} + +// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once +// done, ClientHandshake returns a secure connection. +func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { + if !acquire() { + return nil, nil, errDropped + } + defer release() + + if h.side != core.ClientSide { + return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") + } + + // Create target identities from service account list. + targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) + for _, account := range h.clientOpts.TargetServiceAccounts { + targetIdentities = append(targetIdentities, &altspb.Identity{ + IdentityOneof: &altspb.Identity_ServiceAccount{ + ServiceAccount: account, + }, + }) + } + req := &altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_ClientStart{ + ClientStart: &altspb.StartClientHandshakeReq{ + HandshakeSecurityProtocol: hsProtocol, + ApplicationProtocols: appProtocols, + RecordProtocols: recordProtocols, + TargetIdentities: targetIdentities, + LocalIdentity: h.clientOpts.ClientIdentity, + TargetName: h.clientOpts.TargetName, + RpcVersions: h.clientOpts.RPCVersions, + }, + }, + } + + conn, result, err := h.doHandshake(req) + if err != nil { + return nil, nil, err + } + authInfo := authinfo.New(result) + return conn, authInfo, nil +} + +// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once +// done, ServerHandshake returns a secure connection. +func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { + if !acquire() { + return nil, nil, errDropped + } + defer release() + + if h.side != core.ServerSide { + return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") + } + + p := make([]byte, frameLimit) + n, err := h.conn.Read(p) + if err != nil { + return nil, nil, err + } + + // Prepare server parameters. + // TODO: currently only ALTS parameters are provided. Might need to use + // more options in the future. + params := make(map[int32]*altspb.ServerHandshakeParameters) + params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ + RecordProtocols: recordProtocols, + } + req := &altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_ServerStart{ + ServerStart: &altspb.StartServerHandshakeReq{ + ApplicationProtocols: appProtocols, + HandshakeParameters: params, + InBytes: p[:n], + RpcVersions: h.serverOpts.RPCVersions, + }, + }, + } + + conn, result, err := h.doHandshake(req) + if err != nil { + return nil, nil, err + } + authInfo := authinfo.New(result) + return conn, authInfo, nil +} + +func (h *altsHandshaker) doHandshake(req *altspb.HandshakerReq) (net.Conn, *altspb.HandshakerResult, error) { + resp, err := h.accessHandshakerService(req) + if err != nil { + return nil, nil, err + } + // Check of the returned status is an error. + if resp.GetStatus() != nil { + if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { + return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) + } + } + + var extra []byte + if req.GetServerStart() != nil { + if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { + return nil, nil, errOutOfBound + } + extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] + } + result, extra, err := h.processUntilDone(resp, extra) + if err != nil { + return nil, nil, err + } + // The handshaker returns a 128 bytes key. It should be truncated based + // on the returned record protocol. + keyLen, ok := keyLength[result.RecordProtocol] + if !ok { + return nil, nil, fmt.Errorf("unknown resulted record protocol %v", result.RecordProtocol) + } + sc, err := conn.NewConn(h.conn, h.side, result.GetRecordProtocol(), result.KeyData[:keyLen], extra) + if err != nil { + return nil, nil, err + } + return sc, result, nil +} + +func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*altspb.HandshakerResp, error) { + if err := h.stream.Send(req); err != nil { + return nil, err + } + resp, err := h.stream.Recv() + if err != nil { + return nil, err + } + return resp, nil +} + +// processUntilDone processes the handshake until the handshaker service returns +// the results. Handshaker service takes care of frame parsing, so we read +// whatever received from the network and send it to the handshaker service. +func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) { + for { + if len(resp.OutFrames) > 0 { + if _, err := h.conn.Write(resp.OutFrames); err != nil { + return nil, nil, err + } + } + if resp.Result != nil { + return resp.Result, extra, nil + } + buf := make([]byte, frameLimit) + n, err := h.conn.Read(buf) + if err != nil && err != io.EOF { + return nil, nil, err + } + // If there is nothing to send to the handshaker service, and + // nothing is received from the peer, then we are stuck. + // This covers the case when the peer is not responding. Note + // that handshaker service connection issues are caught in + // accessHandshakerService before we even get here. + if len(resp.OutFrames) == 0 && n == 0 { + return nil, nil, core.PeerNotRespondingError + } + // Append extra bytes from the previous interaction with the + // handshaker service with the current buffer read from conn. + p := append(extra, buf[:n]...) + // From here on, p and extra point to the same slice. + resp, err = h.accessHandshakerService(&altspb.HandshakerReq{ + ReqOneof: &altspb.HandshakerReq_Next{ + Next: &altspb.NextHandshakeMessageReq{ + InBytes: p, + }, + }, + }) + if err != nil { + return nil, nil, err + } + // Set extra based on handshaker service response. + if resp.GetBytesConsumed() > uint32(len(p)) { + return nil, nil, errOutOfBound + } + extra = p[resp.GetBytesConsumed():] + } +} + +// Close terminates the Handshaker. It should be called when the caller obtains +// the secure connection. +func (h *altsHandshaker) Close() { + h.stream.CloseSend() +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go new file mode 100644 index 0000000000..0c7b568354 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -0,0 +1,54 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package service manages connections between the VM application and the ALTS +// handshaker service. +package service + +import ( + "sync" + + grpc "google.golang.org/grpc" +) + +var ( + // hsConn represents a connection to hypervisor handshaker service. + hsConn *grpc.ClientConn + mu sync.Mutex + // hsDialer will be reassigned in tests. + hsDialer = grpc.Dial +) + +// Dial dials the handshake service in the hypervisor. If a connection has +// already been established, this function returns it. Otherwise, a new +// connection is created. +func Dial(hsAddress string) (*grpc.ClientConn, error) { + mu.Lock() + defer mu.Unlock() + + if hsConn == nil { + // Create a new connection to the handshaker service. Note that + // this connection stays open until the application is closed. + var err error + hsConn, err = hsDialer(hsAddress, grpc.WithInsecure()) + if err != nil { + return nil, err + } + } + return hsConn, nil +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go new file mode 100644 index 0000000000..38c4832dfd --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -0,0 +1,152 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/gcp/altscontext.proto + +package grpc_gcp + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type AltsContext struct { + // The application protocol negotiated for this connection. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The record protocol negotiated for this connection. + RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` + // The security level of the created secure channel. + SecurityLevel SecurityLevel `protobuf:"varint,3,opt,name=security_level,json=securityLevel,proto3,enum=grpc.gcp.SecurityLevel" json:"security_level,omitempty"` + // The peer service account. + PeerServiceAccount string `protobuf:"bytes,4,opt,name=peer_service_account,json=peerServiceAccount,proto3" json:"peer_service_account,omitempty"` + // The local service account. + LocalServiceAccount string `protobuf:"bytes,5,opt,name=local_service_account,json=localServiceAccount,proto3" json:"local_service_account,omitempty"` + // The RPC protocol versions supported by the peer. + PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` + // Additional attributes of the peer. + PeerAttributes map[string]string `protobuf:"bytes,7,rep,name=peer_attributes,json=peerAttributes,proto3" json:"peer_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AltsContext) Reset() { *m = AltsContext{} } +func (m *AltsContext) String() string { return proto.CompactTextString(m) } +func (*AltsContext) ProtoMessage() {} +func (*AltsContext) Descriptor() ([]byte, []int) { + return fileDescriptor_6647a41e53a575a3, []int{0} +} + +func (m *AltsContext) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AltsContext.Unmarshal(m, b) +} +func (m *AltsContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AltsContext.Marshal(b, m, deterministic) +} +func (m *AltsContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_AltsContext.Merge(m, src) +} +func (m *AltsContext) XXX_Size() int { + return xxx_messageInfo_AltsContext.Size(m) +} +func (m *AltsContext) XXX_DiscardUnknown() { + xxx_messageInfo_AltsContext.DiscardUnknown(m) +} + +var xxx_messageInfo_AltsContext proto.InternalMessageInfo + +func (m *AltsContext) GetApplicationProtocol() string { + if m != nil { + return m.ApplicationProtocol + } + return "" +} + +func (m *AltsContext) GetRecordProtocol() string { + if m != nil { + return m.RecordProtocol + } + return "" +} + +func (m *AltsContext) GetSecurityLevel() SecurityLevel { + if m != nil { + return m.SecurityLevel + } + return SecurityLevel_SECURITY_NONE +} + +func (m *AltsContext) GetPeerServiceAccount() string { + if m != nil { + return m.PeerServiceAccount + } + return "" +} + +func (m *AltsContext) GetLocalServiceAccount() string { + if m != nil { + return m.LocalServiceAccount + } + return "" +} + +func (m *AltsContext) GetPeerRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.PeerRpcVersions + } + return nil +} + +func (m *AltsContext) GetPeerAttributes() map[string]string { + if m != nil { + return m.PeerAttributes + } + return nil +} + +func init() { + proto.RegisterType((*AltsContext)(nil), "grpc.gcp.AltsContext") + proto.RegisterMapType((map[string]string)(nil), "grpc.gcp.AltsContext.PeerAttributesEntry") +} + +func init() { proto.RegisterFile("grpc/gcp/altscontext.proto", fileDescriptor_6647a41e53a575a3) } + +var fileDescriptor_6647a41e53a575a3 = []byte{ + // 411 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x4d, 0x6f, 0x13, 0x31, + 0x10, 0x86, 0xb5, 0x0d, 0x2d, 0xe0, 0x88, 0xb4, 0xb8, 0xa9, 0x58, 0x45, 0x42, 0x8a, 0xb8, 0xb0, + 0x5c, 0x76, 0x21, 0x5c, 0x10, 0x07, 0x50, 0x8a, 0x38, 0x20, 0x71, 0x88, 0xb6, 0x12, 0x07, 0x2e, + 0x2b, 0x77, 0x3a, 0xb2, 0x2c, 0x5c, 0x8f, 0x35, 0x76, 0x22, 0xf2, 0xb3, 0xf9, 0x07, 0x68, 0xed, + 0xcd, 0x07, 0x1f, 0xb7, 0x9d, 0x79, 0x9f, 0x19, 0xbf, 0xb3, 0x33, 0x62, 0xa6, 0xd9, 0x43, 0xa3, + 0xc1, 0x37, 0xca, 0xc6, 0x00, 0xe4, 0x22, 0xfe, 0x8c, 0xb5, 0x67, 0x8a, 0x24, 0x1f, 0xf5, 0x5a, + 0xad, 0xc1, 0xcf, 0xaa, 0x3d, 0x15, 0x59, 0xb9, 0xe0, 0x89, 0x63, 0x17, 0x10, 0xd6, 0x6c, 0xe2, + 0xb6, 0x03, 0xba, 0xbf, 0x27, 0x97, 0x6b, 0x5e, 0xfc, 0x1a, 0x89, 0xf1, 0xd2, 0xc6, 0xf0, 0x29, + 0x77, 0x92, 0x6f, 0xc4, 0x54, 0x79, 0x6f, 0x0d, 0xa8, 0x68, 0xc8, 0x75, 0x09, 0x02, 0xb2, 0x65, + 0x31, 0x2f, 0xaa, 0xc7, 0xed, 0xe5, 0x91, 0xb6, 0x1a, 0x24, 0xf9, 0x52, 0x9c, 0x33, 0x02, 0xf1, + 0xdd, 0x81, 0x3e, 0x49, 0xf4, 0x24, 0xa7, 0xf7, 0xe0, 0x07, 0x31, 0xd9, 0x9b, 0xb0, 0xb8, 0x41, + 0x5b, 0x8e, 0xe6, 0x45, 0x35, 0x59, 0x3c, 0xab, 0x77, 0xc6, 0xeb, 0x9b, 0x41, 0xff, 0xda, 0xcb, + 0xed, 0x93, 0x70, 0x1c, 0xca, 0xd7, 0x62, 0xea, 0x11, 0xb9, 0x0b, 0xc8, 0x1b, 0x03, 0xd8, 0x29, + 0x00, 0x5a, 0xbb, 0x58, 0x3e, 0x48, 0xaf, 0xc9, 0x5e, 0xbb, 0xc9, 0xd2, 0x32, 0x2b, 0x72, 0x21, + 0xae, 0x2c, 0x81, 0xb2, 0xff, 0x94, 0x9c, 0xe6, 0x71, 0x92, 0xf8, 0x57, 0xcd, 0x17, 0xf1, 0x34, + 0xbd, 0xc2, 0x1e, 0xba, 0x0d, 0x72, 0x30, 0xe4, 0x42, 0x79, 0x36, 0x2f, 0xaa, 0xf1, 0xe2, 0xf9, + 0xc1, 0x68, 0xeb, 0x61, 0x37, 0xd7, 0xb7, 0x01, 0x6a, 0xcf, 0xfb, 0xba, 0xd6, 0xc3, 0x2e, 0x21, + 0x5b, 0x91, 0x52, 0x9d, 0x8a, 0x91, 0xcd, 0xed, 0x3a, 0x62, 0x28, 0x1f, 0xce, 0x47, 0xd5, 0x78, + 0xf1, 0xea, 0xd0, 0xe8, 0xe8, 0xe7, 0xd7, 0x2b, 0x44, 0x5e, 0xee, 0xd9, 0xcf, 0x2e, 0xf2, 0xb6, + 0x9d, 0xf8, 0x3f, 0x92, 0xb3, 0xa5, 0xb8, 0xfc, 0x0f, 0x26, 0x2f, 0xc4, 0xe8, 0x07, 0x6e, 0x87, + 0x35, 0xf5, 0x9f, 0x72, 0x2a, 0x4e, 0x37, 0xca, 0xae, 0x71, 0x58, 0x46, 0x0e, 0xde, 0x9f, 0xbc, + 0x2b, 0xae, 0xad, 0xb8, 0x32, 0x94, 0x1d, 0xf4, 0x47, 0x54, 0x1b, 0x17, 0x91, 0x9d, 0xb2, 0xd7, + 0x17, 0x47, 0x66, 0xd2, 0x74, 0xab, 0xe2, 0xfb, 0x47, 0x4d, 0xa4, 0x2d, 0xd6, 0x9a, 0xac, 0x72, + 0xba, 0x26, 0xd6, 0x4d, 0x3a, 0x2e, 0x60, 0xbc, 0x43, 0x17, 0x8d, 0xb2, 0x21, 0x9d, 0x62, 0xb3, + 0xeb, 0xd2, 0xa4, 0x2b, 0x48, 0x50, 0xa7, 0xc1, 0xdf, 0x9e, 0xa5, 0xf8, 0xed, 0xef, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x9b, 0x8c, 0xe4, 0x6a, 0xba, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go new file mode 100644 index 0000000000..4d1fc4c4d8 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -0,0 +1,1105 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/gcp/handshaker.proto + +package grpc_gcp + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type HandshakeProtocol int32 + +const ( + // Default value. + HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED HandshakeProtocol = 0 + // TLS handshake protocol. + HandshakeProtocol_TLS HandshakeProtocol = 1 + // Application Layer Transport Security handshake protocol. + HandshakeProtocol_ALTS HandshakeProtocol = 2 +) + +var HandshakeProtocol_name = map[int32]string{ + 0: "HANDSHAKE_PROTOCOL_UNSPECIFIED", + 1: "TLS", + 2: "ALTS", +} + +var HandshakeProtocol_value = map[string]int32{ + "HANDSHAKE_PROTOCOL_UNSPECIFIED": 0, + "TLS": 1, + "ALTS": 2, +} + +func (x HandshakeProtocol) String() string { + return proto.EnumName(HandshakeProtocol_name, int32(x)) +} + +func (HandshakeProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{0} +} + +type NetworkProtocol int32 + +const ( + NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED NetworkProtocol = 0 + NetworkProtocol_TCP NetworkProtocol = 1 + NetworkProtocol_UDP NetworkProtocol = 2 +) + +var NetworkProtocol_name = map[int32]string{ + 0: "NETWORK_PROTOCOL_UNSPECIFIED", + 1: "TCP", + 2: "UDP", +} + +var NetworkProtocol_value = map[string]int32{ + "NETWORK_PROTOCOL_UNSPECIFIED": 0, + "TCP": 1, + "UDP": 2, +} + +func (x NetworkProtocol) String() string { + return proto.EnumName(NetworkProtocol_name, int32(x)) +} + +func (NetworkProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{1} +} + +type Endpoint struct { + // IP address. It should contain an IPv4 or IPv6 string literal, e.g. + // "192.168.0.1" or "2001:db8::1". + IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // Port number. + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + // Network protocol (e.g., TCP, UDP) associated with this endpoint. + Protocol NetworkProtocol `protobuf:"varint,3,opt,name=protocol,proto3,enum=grpc.gcp.NetworkProtocol" json:"protocol,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (m *Endpoint) String() string { return proto.CompactTextString(m) } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{0} +} + +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Endpoint.Unmarshal(m, b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Endpoint.Marshal(b, m, deterministic) +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return xxx_messageInfo_Endpoint.Size(m) +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *Endpoint) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func (m *Endpoint) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Endpoint) GetProtocol() NetworkProtocol { + if m != nil { + return m.Protocol + } + return NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED +} + +type Identity struct { + // Types that are valid to be assigned to IdentityOneof: + // *Identity_ServiceAccount + // *Identity_Hostname + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional attributes of the identity. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Identity) Reset() { *m = Identity{} } +func (m *Identity) String() string { return proto.CompactTextString(m) } +func (*Identity) ProtoMessage() {} +func (*Identity) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{1} +} + +func (m *Identity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Identity.Unmarshal(m, b) +} +func (m *Identity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Identity.Marshal(b, m, deterministic) +} +func (m *Identity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Identity.Merge(m, src) +} +func (m *Identity) XXX_Size() int { + return xxx_messageInfo_Identity.Size(m) +} +func (m *Identity) XXX_DiscardUnknown() { + xxx_messageInfo_Identity.DiscardUnknown(m) +} + +var xxx_messageInfo_Identity proto.InternalMessageInfo + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_ServiceAccount struct { + ServiceAccount string `protobuf:"bytes,1,opt,name=service_account,json=serviceAccount,proto3,oneof"` +} + +type Identity_Hostname struct { + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +func (*Identity_ServiceAccount) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (m *Identity) GetServiceAccount() string { + if x, ok := m.GetIdentityOneof().(*Identity_ServiceAccount); ok { + return x.ServiceAccount + } + return "" +} + +func (m *Identity) GetHostname() string { + if x, ok := m.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (m *Identity) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Identity) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Identity_ServiceAccount)(nil), + (*Identity_Hostname)(nil), + } +} + +type StartClientHandshakeReq struct { + // Handshake security protocol requested by the client. + HandshakeSecurityProtocol HandshakeProtocol `protobuf:"varint,1,opt,name=handshake_security_protocol,json=handshakeSecurityProtocol,proto3,enum=grpc.gcp.HandshakeProtocol" json:"handshake_security_protocol,omitempty"` + // The application protocols supported by the client, e.g., "h2" (for http2), + // "grpc". + ApplicationProtocols []string `protobuf:"bytes,2,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // The record protocols supported by the client, e.g., + // "ALTSRP_GCM_AES128". + RecordProtocols []string `protobuf:"bytes,3,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` + // (Optional) Describes which server identities are acceptable by the client. + // If target identities are provided and none of them matches the peer + // identity of the server, handshake will fail. + TargetIdentities []*Identity `protobuf:"bytes,4,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` + // (Optional) Application may specify a local identity. Otherwise, the + // handshaker chooses a default local identity. + LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // (Optional) Local endpoint information of the connection to the server, + // such as local IP address, port number, and network protocol. + LocalEndpoint *Endpoint `protobuf:"bytes,6,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` + // (Optional) Endpoint information of the remote server, such as IP address, + // port number, and network protocol. + RemoteEndpoint *Endpoint `protobuf:"bytes,7,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` + // (Optional) If target name is provided, a secure naming check is performed + // to verify that the peer authenticated identity is indeed authorized to run + // the target name. + TargetName string `protobuf:"bytes,8,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` + // (Optional) RPC protocol versions supported by the client. + RpcVersions *RpcProtocolVersions `protobuf:"bytes,9,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` + // (Optional) Maximum frame size supported by the client. + MaxFrameSize uint32 `protobuf:"varint,10,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartClientHandshakeReq) Reset() { *m = StartClientHandshakeReq{} } +func (m *StartClientHandshakeReq) String() string { return proto.CompactTextString(m) } +func (*StartClientHandshakeReq) ProtoMessage() {} +func (*StartClientHandshakeReq) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{2} +} + +func (m *StartClientHandshakeReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartClientHandshakeReq.Unmarshal(m, b) +} +func (m *StartClientHandshakeReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartClientHandshakeReq.Marshal(b, m, deterministic) +} +func (m *StartClientHandshakeReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartClientHandshakeReq.Merge(m, src) +} +func (m *StartClientHandshakeReq) XXX_Size() int { + return xxx_messageInfo_StartClientHandshakeReq.Size(m) +} +func (m *StartClientHandshakeReq) XXX_DiscardUnknown() { + xxx_messageInfo_StartClientHandshakeReq.DiscardUnknown(m) +} + +var xxx_messageInfo_StartClientHandshakeReq proto.InternalMessageInfo + +func (m *StartClientHandshakeReq) GetHandshakeSecurityProtocol() HandshakeProtocol { + if m != nil { + return m.HandshakeSecurityProtocol + } + return HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED +} + +func (m *StartClientHandshakeReq) GetApplicationProtocols() []string { + if m != nil { + return m.ApplicationProtocols + } + return nil +} + +func (m *StartClientHandshakeReq) GetRecordProtocols() []string { + if m != nil { + return m.RecordProtocols + } + return nil +} + +func (m *StartClientHandshakeReq) GetTargetIdentities() []*Identity { + if m != nil { + return m.TargetIdentities + } + return nil +} + +func (m *StartClientHandshakeReq) GetLocalIdentity() *Identity { + if m != nil { + return m.LocalIdentity + } + return nil +} + +func (m *StartClientHandshakeReq) GetLocalEndpoint() *Endpoint { + if m != nil { + return m.LocalEndpoint + } + return nil +} + +func (m *StartClientHandshakeReq) GetRemoteEndpoint() *Endpoint { + if m != nil { + return m.RemoteEndpoint + } + return nil +} + +func (m *StartClientHandshakeReq) GetTargetName() string { + if m != nil { + return m.TargetName + } + return "" +} + +func (m *StartClientHandshakeReq) GetRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.RpcVersions + } + return nil +} + +func (m *StartClientHandshakeReq) GetMaxFrameSize() uint32 { + if m != nil { + return m.MaxFrameSize + } + return 0 +} + +type ServerHandshakeParameters struct { + // The record protocols supported by the server, e.g., + // "ALTSRP_GCM_AES128". + RecordProtocols []string `protobuf:"bytes,1,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` + // (Optional) A list of local identities supported by the server, if + // specified. Otherwise, the handshaker chooses a default local identity. + LocalIdentities []*Identity `protobuf:"bytes,2,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerHandshakeParameters) Reset() { *m = ServerHandshakeParameters{} } +func (m *ServerHandshakeParameters) String() string { return proto.CompactTextString(m) } +func (*ServerHandshakeParameters) ProtoMessage() {} +func (*ServerHandshakeParameters) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{3} +} + +func (m *ServerHandshakeParameters) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerHandshakeParameters.Unmarshal(m, b) +} +func (m *ServerHandshakeParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerHandshakeParameters.Marshal(b, m, deterministic) +} +func (m *ServerHandshakeParameters) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerHandshakeParameters.Merge(m, src) +} +func (m *ServerHandshakeParameters) XXX_Size() int { + return xxx_messageInfo_ServerHandshakeParameters.Size(m) +} +func (m *ServerHandshakeParameters) XXX_DiscardUnknown() { + xxx_messageInfo_ServerHandshakeParameters.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerHandshakeParameters proto.InternalMessageInfo + +func (m *ServerHandshakeParameters) GetRecordProtocols() []string { + if m != nil { + return m.RecordProtocols + } + return nil +} + +func (m *ServerHandshakeParameters) GetLocalIdentities() []*Identity { + if m != nil { + return m.LocalIdentities + } + return nil +} + +type StartServerHandshakeReq struct { + // The application protocols supported by the server, e.g., "h2" (for http2), + // "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // Handshake parameters (record protocols and local identities supported by + // the server) mapped by the handshake protocol. Each handshake security + // protocol (e.g., TLS or ALTS) has its own set of record protocols and local + // identities. Since protobuf does not support enum as key to the map, the key + // to handshake_parameters is the integer value of HandshakeProtocol enum. + HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Bytes in out_frames returned from the peer's HandshakerResp. It is possible + // that the peer's out_frames are split into multiple HandshakReq messages. + InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + // (Optional) Local endpoint information of the connection to the client, + // such as local IP address, port number, and network protocol. + LocalEndpoint *Endpoint `protobuf:"bytes,4,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` + // (Optional) Endpoint information of the remote client, such as IP address, + // port number, and network protocol. + RemoteEndpoint *Endpoint `protobuf:"bytes,5,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` + // (Optional) RPC protocol versions supported by the server. + RpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` + // (Optional) Maximum frame size supported by the server. + MaxFrameSize uint32 `protobuf:"varint,7,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartServerHandshakeReq) Reset() { *m = StartServerHandshakeReq{} } +func (m *StartServerHandshakeReq) String() string { return proto.CompactTextString(m) } +func (*StartServerHandshakeReq) ProtoMessage() {} +func (*StartServerHandshakeReq) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{4} +} + +func (m *StartServerHandshakeReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartServerHandshakeReq.Unmarshal(m, b) +} +func (m *StartServerHandshakeReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartServerHandshakeReq.Marshal(b, m, deterministic) +} +func (m *StartServerHandshakeReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartServerHandshakeReq.Merge(m, src) +} +func (m *StartServerHandshakeReq) XXX_Size() int { + return xxx_messageInfo_StartServerHandshakeReq.Size(m) +} +func (m *StartServerHandshakeReq) XXX_DiscardUnknown() { + xxx_messageInfo_StartServerHandshakeReq.DiscardUnknown(m) +} + +var xxx_messageInfo_StartServerHandshakeReq proto.InternalMessageInfo + +func (m *StartServerHandshakeReq) GetApplicationProtocols() []string { + if m != nil { + return m.ApplicationProtocols + } + return nil +} + +func (m *StartServerHandshakeReq) GetHandshakeParameters() map[int32]*ServerHandshakeParameters { + if m != nil { + return m.HandshakeParameters + } + return nil +} + +func (m *StartServerHandshakeReq) GetInBytes() []byte { + if m != nil { + return m.InBytes + } + return nil +} + +func (m *StartServerHandshakeReq) GetLocalEndpoint() *Endpoint { + if m != nil { + return m.LocalEndpoint + } + return nil +} + +func (m *StartServerHandshakeReq) GetRemoteEndpoint() *Endpoint { + if m != nil { + return m.RemoteEndpoint + } + return nil +} + +func (m *StartServerHandshakeReq) GetRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.RpcVersions + } + return nil +} + +func (m *StartServerHandshakeReq) GetMaxFrameSize() uint32 { + if m != nil { + return m.MaxFrameSize + } + return 0 +} + +type NextHandshakeMessageReq struct { + // Bytes in out_frames returned from the peer's HandshakerResp. It is possible + // that the peer's out_frames are split into multiple NextHandshakerMessageReq + // messages. + InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NextHandshakeMessageReq) Reset() { *m = NextHandshakeMessageReq{} } +func (m *NextHandshakeMessageReq) String() string { return proto.CompactTextString(m) } +func (*NextHandshakeMessageReq) ProtoMessage() {} +func (*NextHandshakeMessageReq) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{5} +} + +func (m *NextHandshakeMessageReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NextHandshakeMessageReq.Unmarshal(m, b) +} +func (m *NextHandshakeMessageReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NextHandshakeMessageReq.Marshal(b, m, deterministic) +} +func (m *NextHandshakeMessageReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_NextHandshakeMessageReq.Merge(m, src) +} +func (m *NextHandshakeMessageReq) XXX_Size() int { + return xxx_messageInfo_NextHandshakeMessageReq.Size(m) +} +func (m *NextHandshakeMessageReq) XXX_DiscardUnknown() { + xxx_messageInfo_NextHandshakeMessageReq.DiscardUnknown(m) +} + +var xxx_messageInfo_NextHandshakeMessageReq proto.InternalMessageInfo + +func (m *NextHandshakeMessageReq) GetInBytes() []byte { + if m != nil { + return m.InBytes + } + return nil +} + +type HandshakerReq struct { + // Types that are valid to be assigned to ReqOneof: + // *HandshakerReq_ClientStart + // *HandshakerReq_ServerStart + // *HandshakerReq_Next + ReqOneof isHandshakerReq_ReqOneof `protobuf_oneof:"req_oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerReq) Reset() { *m = HandshakerReq{} } +func (m *HandshakerReq) String() string { return proto.CompactTextString(m) } +func (*HandshakerReq) ProtoMessage() {} +func (*HandshakerReq) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{6} +} + +func (m *HandshakerReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerReq.Unmarshal(m, b) +} +func (m *HandshakerReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerReq.Marshal(b, m, deterministic) +} +func (m *HandshakerReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerReq.Merge(m, src) +} +func (m *HandshakerReq) XXX_Size() int { + return xxx_messageInfo_HandshakerReq.Size(m) +} +func (m *HandshakerReq) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerReq.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerReq proto.InternalMessageInfo + +type isHandshakerReq_ReqOneof interface { + isHandshakerReq_ReqOneof() +} + +type HandshakerReq_ClientStart struct { + ClientStart *StartClientHandshakeReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` +} + +type HandshakerReq_ServerStart struct { + ServerStart *StartServerHandshakeReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` +} + +type HandshakerReq_Next struct { + Next *NextHandshakeMessageReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` +} + +func (*HandshakerReq_ClientStart) isHandshakerReq_ReqOneof() {} + +func (*HandshakerReq_ServerStart) isHandshakerReq_ReqOneof() {} + +func (*HandshakerReq_Next) isHandshakerReq_ReqOneof() {} + +func (m *HandshakerReq) GetReqOneof() isHandshakerReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (m *HandshakerReq) GetClientStart() *StartClientHandshakeReq { + if x, ok := m.GetReqOneof().(*HandshakerReq_ClientStart); ok { + return x.ClientStart + } + return nil +} + +func (m *HandshakerReq) GetServerStart() *StartServerHandshakeReq { + if x, ok := m.GetReqOneof().(*HandshakerReq_ServerStart); ok { + return x.ServerStart + } + return nil +} + +func (m *HandshakerReq) GetNext() *NextHandshakeMessageReq { + if x, ok := m.GetReqOneof().(*HandshakerReq_Next); ok { + return x.Next + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*HandshakerReq) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*HandshakerReq_ClientStart)(nil), + (*HandshakerReq_ServerStart)(nil), + (*HandshakerReq_Next)(nil), + } +} + +type HandshakerResult struct { + // The application protocol negotiated for this connection. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The record protocol negotiated for this connection. + RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` + // Cryptographic key data. The key data may be more than the key length + // required for the record protocol, thus the client of the handshaker + // service needs to truncate the key data into the right key length. + KeyData []byte `protobuf:"bytes,3,opt,name=key_data,json=keyData,proto3" json:"key_data,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used in the handshake. + LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // Indicate whether the handshaker service client should keep the channel + // between the handshaker service open, e.g., in order to handle + // post-handshake messages in the future. + KeepChannelOpen bool `protobuf:"varint,6,opt,name=keep_channel_open,json=keepChannelOpen,proto3" json:"keep_channel_open,omitempty"` + // The RPC protocol versions supported by the peer. + PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,7,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` + // The maximum frame size of the peer. + MaxFrameSize uint32 `protobuf:"varint,8,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerResult) Reset() { *m = HandshakerResult{} } +func (m *HandshakerResult) String() string { return proto.CompactTextString(m) } +func (*HandshakerResult) ProtoMessage() {} +func (*HandshakerResult) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{7} +} + +func (m *HandshakerResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerResult.Unmarshal(m, b) +} +func (m *HandshakerResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerResult.Marshal(b, m, deterministic) +} +func (m *HandshakerResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerResult.Merge(m, src) +} +func (m *HandshakerResult) XXX_Size() int { + return xxx_messageInfo_HandshakerResult.Size(m) +} +func (m *HandshakerResult) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerResult.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerResult proto.InternalMessageInfo + +func (m *HandshakerResult) GetApplicationProtocol() string { + if m != nil { + return m.ApplicationProtocol + } + return "" +} + +func (m *HandshakerResult) GetRecordProtocol() string { + if m != nil { + return m.RecordProtocol + } + return "" +} + +func (m *HandshakerResult) GetKeyData() []byte { + if m != nil { + return m.KeyData + } + return nil +} + +func (m *HandshakerResult) GetPeerIdentity() *Identity { + if m != nil { + return m.PeerIdentity + } + return nil +} + +func (m *HandshakerResult) GetLocalIdentity() *Identity { + if m != nil { + return m.LocalIdentity + } + return nil +} + +func (m *HandshakerResult) GetKeepChannelOpen() bool { + if m != nil { + return m.KeepChannelOpen + } + return false +} + +func (m *HandshakerResult) GetPeerRpcVersions() *RpcProtocolVersions { + if m != nil { + return m.PeerRpcVersions + } + return nil +} + +func (m *HandshakerResult) GetMaxFrameSize() uint32 { + if m != nil { + return m.MaxFrameSize + } + return 0 +} + +type HandshakerStatus struct { + // The status code. This could be the gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerStatus) Reset() { *m = HandshakerStatus{} } +func (m *HandshakerStatus) String() string { return proto.CompactTextString(m) } +func (*HandshakerStatus) ProtoMessage() {} +func (*HandshakerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{8} +} + +func (m *HandshakerStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerStatus.Unmarshal(m, b) +} +func (m *HandshakerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerStatus.Marshal(b, m, deterministic) +} +func (m *HandshakerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerStatus.Merge(m, src) +} +func (m *HandshakerStatus) XXX_Size() int { + return xxx_messageInfo_HandshakerStatus.Size(m) +} +func (m *HandshakerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerStatus proto.InternalMessageInfo + +func (m *HandshakerStatus) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *HandshakerStatus) GetDetails() string { + if m != nil { + return m.Details + } + return "" +} + +type HandshakerResp struct { + // Frames to be given to the peer for the NextHandshakeMessageReq. May be + // empty if no out_frames have to be sent to the peer or if in_bytes in the + // HandshakerReq are incomplete. All the non-empty out frames must be sent to + // the peer even if the handshaker status is not OK as these frames may + // contain the alert frames. + OutFrames []byte `protobuf:"bytes,1,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` + // Number of bytes in the in_bytes consumed by the handshaker. It is possible + // that part of in_bytes in HandshakerReq was unrelated to the handshake + // process. + BytesConsumed uint32 `protobuf:"varint,2,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` + // This is set iff the handshake was successful. out_frames may still be set + // to frames that needs to be forwarded to the peer. + Result *HandshakerResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` + // Status of the handshaker. + Status *HandshakerStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HandshakerResp) Reset() { *m = HandshakerResp{} } +func (m *HandshakerResp) String() string { return proto.CompactTextString(m) } +func (*HandshakerResp) ProtoMessage() {} +func (*HandshakerResp) Descriptor() ([]byte, []int) { + return fileDescriptor_54c074f40c7c7e99, []int{9} +} + +func (m *HandshakerResp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HandshakerResp.Unmarshal(m, b) +} +func (m *HandshakerResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HandshakerResp.Marshal(b, m, deterministic) +} +func (m *HandshakerResp) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakerResp.Merge(m, src) +} +func (m *HandshakerResp) XXX_Size() int { + return xxx_messageInfo_HandshakerResp.Size(m) +} +func (m *HandshakerResp) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakerResp.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakerResp proto.InternalMessageInfo + +func (m *HandshakerResp) GetOutFrames() []byte { + if m != nil { + return m.OutFrames + } + return nil +} + +func (m *HandshakerResp) GetBytesConsumed() uint32 { + if m != nil { + return m.BytesConsumed + } + return 0 +} + +func (m *HandshakerResp) GetResult() *HandshakerResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *HandshakerResp) GetStatus() *HandshakerStatus { + if m != nil { + return m.Status + } + return nil +} + +func init() { + proto.RegisterEnum("grpc.gcp.HandshakeProtocol", HandshakeProtocol_name, HandshakeProtocol_value) + proto.RegisterEnum("grpc.gcp.NetworkProtocol", NetworkProtocol_name, NetworkProtocol_value) + proto.RegisterType((*Endpoint)(nil), "grpc.gcp.Endpoint") + proto.RegisterType((*Identity)(nil), "grpc.gcp.Identity") + proto.RegisterMapType((map[string]string)(nil), "grpc.gcp.Identity.AttributesEntry") + proto.RegisterType((*StartClientHandshakeReq)(nil), "grpc.gcp.StartClientHandshakeReq") + proto.RegisterType((*ServerHandshakeParameters)(nil), "grpc.gcp.ServerHandshakeParameters") + proto.RegisterType((*StartServerHandshakeReq)(nil), "grpc.gcp.StartServerHandshakeReq") + proto.RegisterMapType((map[int32]*ServerHandshakeParameters)(nil), "grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry") + proto.RegisterType((*NextHandshakeMessageReq)(nil), "grpc.gcp.NextHandshakeMessageReq") + proto.RegisterType((*HandshakerReq)(nil), "grpc.gcp.HandshakerReq") + proto.RegisterType((*HandshakerResult)(nil), "grpc.gcp.HandshakerResult") + proto.RegisterType((*HandshakerStatus)(nil), "grpc.gcp.HandshakerStatus") + proto.RegisterType((*HandshakerResp)(nil), "grpc.gcp.HandshakerResp") +} + +func init() { proto.RegisterFile("grpc/gcp/handshaker.proto", fileDescriptor_54c074f40c7c7e99) } + +var fileDescriptor_54c074f40c7c7e99 = []byte{ + // 1203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0xce, 0xda, 0x4e, 0xe2, 0x1c, 0xc7, 0x3f, 0x99, 0xa6, 0xea, 0x26, 0x6d, 0xc1, 0x18, 0x10, + 0x6e, 0x2f, 0x6c, 0x70, 0x41, 0xa5, 0x45, 0x55, 0x6b, 0x3b, 0x8e, 0x1c, 0x5a, 0x1c, 0x6b, 0x9d, + 0x82, 0x44, 0x2f, 0x56, 0xd3, 0xf5, 0xd4, 0x59, 0x79, 0x3d, 0xb3, 0x9d, 0x19, 0x87, 0xb8, 0xf7, + 0xbc, 0x04, 0xf7, 0xbc, 0x06, 0x2f, 0xc1, 0x33, 0x20, 0xf1, 0x18, 0x68, 0x67, 0x7f, 0x6d, 0xaf, + 0xab, 0x22, 0xb8, 0xdb, 0x39, 0xf3, 0x7d, 0x67, 0xce, 0x9c, 0xf3, 0x9d, 0xb3, 0x03, 0x47, 0x13, + 0xee, 0x5a, 0xcd, 0x89, 0xe5, 0x36, 0x2f, 0x31, 0x1d, 0x8b, 0x4b, 0x3c, 0x25, 0xbc, 0xe1, 0x72, + 0x26, 0x19, 0xca, 0x7b, 0x5b, 0x8d, 0x89, 0xe5, 0x1e, 0xd7, 0x23, 0x90, 0xe4, 0x98, 0x0a, 0x97, + 0x71, 0x69, 0x0a, 0x62, 0xcd, 0xb9, 0x2d, 0x17, 0xa6, 0xc5, 0x66, 0x33, 0x46, 0x7d, 0x4e, 0x4d, + 0x42, 0xbe, 0x47, 0xc7, 0x2e, 0xb3, 0xa9, 0x44, 0x77, 0x01, 0x6c, 0xd7, 0xc4, 0xe3, 0x31, 0x27, + 0x42, 0xe8, 0x5a, 0x55, 0xab, 0xef, 0x19, 0x7b, 0xb6, 0xdb, 0xf6, 0x0d, 0x08, 0x41, 0xce, 0x73, + 0xa4, 0x67, 0xaa, 0x5a, 0x7d, 0xdb, 0x50, 0xdf, 0xe8, 0x1b, 0xc8, 0x2b, 0x3f, 0x16, 0x73, 0xf4, + 0x6c, 0x55, 0xab, 0x97, 0x5a, 0x47, 0x8d, 0x30, 0x8a, 0xc6, 0x80, 0xc8, 0x5f, 0x18, 0x9f, 0x0e, + 0x03, 0x80, 0x11, 0x41, 0x6b, 0x7f, 0x6b, 0x90, 0x3f, 0x1b, 0x13, 0x2a, 0x6d, 0xb9, 0x40, 0xf7, + 0xa0, 0x2c, 0x08, 0xbf, 0xb2, 0x2d, 0x62, 0x62, 0xcb, 0x62, 0x73, 0x2a, 0xfd, 0xb3, 0xfb, 0x5b, + 0x46, 0x29, 0xd8, 0x68, 0xfb, 0x76, 0x74, 0x07, 0xf2, 0x97, 0x4c, 0x48, 0x8a, 0x67, 0x44, 0x85, + 0xe1, 0x61, 0x22, 0x0b, 0xea, 0x00, 0x60, 0x29, 0xb9, 0xfd, 0x7a, 0x2e, 0x89, 0xd0, 0xb3, 0xd5, + 0x6c, 0xbd, 0xd0, 0xaa, 0xc5, 0xe1, 0x84, 0x07, 0x36, 0xda, 0x11, 0xa8, 0x47, 0x25, 0x5f, 0x18, + 0x09, 0xd6, 0xf1, 0x13, 0x28, 0xaf, 0x6c, 0xa3, 0x0a, 0x64, 0xa7, 0x64, 0x11, 0xe4, 0xc3, 0xfb, + 0x44, 0x87, 0xb0, 0x7d, 0x85, 0x9d, 0x79, 0x10, 0x83, 0xe1, 0x2f, 0x1e, 0x67, 0xbe, 0xd5, 0x3a, + 0x15, 0x28, 0xd9, 0xc1, 0x31, 0x26, 0xa3, 0x84, 0xbd, 0xa9, 0xfd, 0x99, 0x83, 0x5b, 0x23, 0x89, + 0xb9, 0xec, 0x3a, 0x36, 0xa1, 0xb2, 0x1f, 0x16, 0xcd, 0x20, 0x6f, 0xd1, 0x2b, 0xb8, 0x1d, 0x15, + 0x31, 0xae, 0x4f, 0x94, 0x50, 0x4d, 0x25, 0xf4, 0x76, 0x7c, 0x83, 0x88, 0x1c, 0xa5, 0xf4, 0x28, + 0xe2, 0x8f, 0x02, 0x7a, 0xb8, 0x85, 0x1e, 0xc0, 0x4d, 0xec, 0xba, 0x8e, 0x6d, 0x61, 0x69, 0x33, + 0x1a, 0x79, 0x15, 0x7a, 0xa6, 0x9a, 0xad, 0xef, 0x19, 0x87, 0x89, 0xcd, 0x90, 0x23, 0xd0, 0x3d, + 0xa8, 0x70, 0x62, 0x31, 0x3e, 0x4e, 0xe0, 0xb3, 0x0a, 0x5f, 0xf6, 0xed, 0x31, 0xf4, 0x29, 0x1c, + 0x48, 0xcc, 0x27, 0x44, 0x9a, 0xc1, 0x8d, 0x6d, 0x22, 0xf4, 0x9c, 0x4a, 0x3a, 0x5a, 0x4f, 0xba, + 0x51, 0xf1, 0xc1, 0x67, 0x11, 0x16, 0x3d, 0x82, 0x92, 0xc3, 0x2c, 0xec, 0x84, 0xfc, 0x85, 0xbe, + 0x5d, 0xd5, 0x36, 0xb0, 0x8b, 0x0a, 0x19, 0x49, 0x26, 0xa2, 0x92, 0x40, 0xbb, 0xfa, 0xce, 0x2a, + 0x35, 0x54, 0x75, 0x40, 0x8d, 0x44, 0xfe, 0x1d, 0x94, 0x39, 0x99, 0x31, 0x49, 0x62, 0xee, 0xee, + 0x46, 0x6e, 0xc9, 0x87, 0x46, 0xe4, 0x8f, 0xa1, 0x10, 0xdc, 0x59, 0x49, 0x30, 0xaf, 0xca, 0x0f, + 0xbe, 0x69, 0xe0, 0x49, 0xf0, 0x19, 0xec, 0x73, 0xd7, 0x32, 0xaf, 0x08, 0x17, 0x36, 0xa3, 0x42, + 0xdf, 0x53, 0xae, 0xef, 0xc6, 0xae, 0x0d, 0xd7, 0x0a, 0x53, 0xf8, 0x63, 0x00, 0x32, 0x0a, 0xdc, + 0xb5, 0xc2, 0x05, 0xfa, 0x0c, 0x4a, 0x33, 0x7c, 0x6d, 0xbe, 0xe1, 0x78, 0x46, 0x4c, 0x61, 0xbf, + 0x23, 0x3a, 0x54, 0xb5, 0x7a, 0xd1, 0xd8, 0x9f, 0xe1, 0xeb, 0x53, 0xcf, 0x38, 0xb2, 0xdf, 0x91, + 0xda, 0xaf, 0x1a, 0x1c, 0x8d, 0x08, 0xbf, 0x22, 0x3c, 0xd6, 0x04, 0xf6, 0x76, 0x25, 0xe1, 0xe9, + 0x55, 0xd4, 0xd2, 0xab, 0xf8, 0x04, 0x2a, 0x4b, 0x45, 0xf0, 0x8a, 0x98, 0xd9, 0x58, 0xc4, 0x72, + 0xb2, 0x0c, 0x36, 0x11, 0xb5, 0xdf, 0x43, 0x75, 0xaf, 0x04, 0xe3, 0xa9, 0x7b, 0xa3, 0x00, 0xb5, + 0xf7, 0x08, 0x70, 0x06, 0x87, 0x71, 0x4b, 0xb8, 0xd1, 0x95, 0x82, 0x98, 0x1e, 0xc7, 0x31, 0x6d, + 0x38, 0xb5, 0x91, 0x92, 0x0f, 0xbf, 0xcb, 0x6f, 0x5c, 0xa6, 0x64, 0xea, 0x08, 0xf2, 0x36, 0x35, + 0x5f, 0x2f, 0xfc, 0x81, 0xa1, 0xd5, 0xf7, 0x8d, 0x5d, 0x9b, 0x76, 0xbc, 0x65, 0x8a, 0xc6, 0x72, + 0xff, 0x41, 0x63, 0xdb, 0x1f, 0xac, 0xb1, 0x55, 0x09, 0xed, 0xfc, 0x0f, 0x12, 0xda, 0x5d, 0x97, + 0xd0, 0xf1, 0x14, 0xf4, 0x4d, 0xb9, 0x4a, 0x8e, 0xbc, 0x6d, 0x7f, 0xe4, 0x3d, 0x4a, 0x8e, 0xbc, + 0x42, 0xeb, 0xd3, 0x44, 0x21, 0x36, 0xc9, 0x30, 0x31, 0x17, 0x6b, 0x5f, 0xc3, 0xad, 0x01, 0xb9, + 0x8e, 0xa7, 0xdf, 0x0f, 0x44, 0x08, 0x3c, 0x51, 0x32, 0x49, 0x96, 0x40, 0x5b, 0x2a, 0x41, 0xed, + 0x2f, 0x0d, 0x8a, 0x11, 0x85, 0x7b, 0xe0, 0x53, 0xd8, 0xb7, 0xd4, 0x1c, 0x35, 0x85, 0x57, 0x7f, + 0x45, 0x28, 0xb4, 0x3e, 0x59, 0x91, 0xc5, 0xfa, 0xa8, 0xed, 0x6f, 0x19, 0x05, 0x9f, 0xa8, 0x00, + 0x9e, 0x1f, 0xa1, 0xe2, 0x0e, 0xfc, 0x64, 0x52, 0xfd, 0xac, 0xcb, 0xcb, 0xf3, 0xe3, 0x13, 0x7d, + 0x3f, 0x0f, 0x21, 0x47, 0xc9, 0xb5, 0x54, 0xda, 0x59, 0xe2, 0x6f, 0xb8, 0x6d, 0x7f, 0xcb, 0x50, + 0x84, 0x4e, 0x01, 0xf6, 0x38, 0x79, 0x1b, 0xfc, 0x23, 0x7e, 0xcb, 0x42, 0x25, 0x79, 0x4f, 0x31, + 0x77, 0x24, 0xfa, 0x0a, 0x0e, 0xd3, 0xda, 0x27, 0xf8, 0x0f, 0xdd, 0x48, 0xe9, 0x1e, 0xf4, 0x05, + 0x94, 0x57, 0xfa, 0x3e, 0xf8, 0x43, 0x95, 0x96, 0xdb, 0xde, 0xcb, 0xf9, 0x94, 0x2c, 0xcc, 0x31, + 0x96, 0x38, 0x94, 0xfd, 0x94, 0x2c, 0x4e, 0xb0, 0xc4, 0xe8, 0x21, 0x14, 0x5d, 0x42, 0x78, 0x3c, + 0x94, 0x73, 0x1b, 0x87, 0xf2, 0xbe, 0x07, 0x5c, 0x9f, 0xc9, 0xff, 0x7e, 0x9c, 0xdf, 0x87, 0x83, + 0x29, 0x21, 0xae, 0x69, 0x5d, 0x62, 0x4a, 0x89, 0x63, 0x32, 0x97, 0x50, 0xa5, 0xfb, 0xbc, 0x51, + 0xf6, 0x36, 0xba, 0xbe, 0xfd, 0xdc, 0x25, 0x14, 0x9d, 0xc1, 0x81, 0x8a, 0x6f, 0xa9, 0x47, 0x76, + 0x3f, 0xa4, 0x47, 0xca, 0x1e, 0xcf, 0x78, 0x6f, 0x9f, 0xe4, 0x53, 0x46, 0xed, 0xb3, 0x64, 0x6d, + 0x46, 0x12, 0xcb, 0xb9, 0x7a, 0x0a, 0x59, 0x6c, 0x4c, 0x54, 0x2d, 0x8a, 0x86, 0xfa, 0x46, 0x3a, + 0xec, 0x8e, 0x89, 0xc4, 0xb6, 0xfa, 0xc3, 0x7a, 0x49, 0x0f, 0x97, 0xb5, 0x3f, 0x34, 0x28, 0x2d, + 0x95, 0xd7, 0xf5, 0x9e, 0x5a, 0x6c, 0x2e, 0xfd, 0xa3, 0x43, 0xd9, 0xef, 0xb1, 0xb9, 0x54, 0xc7, + 0x0a, 0xf4, 0x39, 0x94, 0x54, 0x43, 0x98, 0x16, 0xa3, 0x62, 0x3e, 0x23, 0x63, 0xe5, 0xb2, 0x68, + 0x14, 0x95, 0xb5, 0x1b, 0x18, 0x51, 0x0b, 0x76, 0xb8, 0x12, 0x4b, 0xa0, 0xbf, 0xe3, 0x94, 0xa7, + 0x42, 0x20, 0x27, 0x23, 0x40, 0x7a, 0x1c, 0xa1, 0x2e, 0x11, 0x14, 0x36, 0x95, 0xe3, 0x5f, 0xd3, + 0x08, 0x90, 0xf7, 0xbf, 0x87, 0x83, 0xb5, 0xa7, 0x07, 0xaa, 0xc1, 0x47, 0xfd, 0xf6, 0xe0, 0x64, + 0xd4, 0x6f, 0x3f, 0xef, 0x99, 0x43, 0xe3, 0xfc, 0xe2, 0xbc, 0x7b, 0xfe, 0xc2, 0x7c, 0x39, 0x18, + 0x0d, 0x7b, 0xdd, 0xb3, 0xd3, 0xb3, 0xde, 0x49, 0x65, 0x0b, 0xed, 0x42, 0xf6, 0xe2, 0xc5, 0xa8, + 0xa2, 0xa1, 0x3c, 0xe4, 0xda, 0x2f, 0x2e, 0x46, 0x95, 0xcc, 0xfd, 0x1e, 0x94, 0x57, 0xde, 0x85, + 0xa8, 0x0a, 0x77, 0x06, 0xbd, 0x8b, 0x9f, 0xce, 0x8d, 0xe7, 0xef, 0xf3, 0xd3, 0x1d, 0x56, 0x34, + 0xef, 0xe3, 0xe5, 0xc9, 0xb0, 0x92, 0x69, 0xbd, 0x4a, 0x84, 0xc4, 0x47, 0xfe, 0x2b, 0x11, 0x9d, + 0x42, 0xe1, 0x84, 0x45, 0x66, 0x74, 0x2b, 0x3d, 0x1d, 0x6f, 0x8f, 0xf5, 0x0d, 0x79, 0x72, 0x6b, + 0x5b, 0x75, 0xed, 0x4b, 0xad, 0x33, 0x85, 0x9b, 0x36, 0xf3, 0x31, 0xd8, 0x91, 0xa2, 0x61, 0x53, + 0x49, 0x38, 0xc5, 0x4e, 0xa7, 0x1c, 0xc3, 0x55, 0xf4, 0x43, 0xed, 0xe7, 0xa7, 0x13, 0xc6, 0x26, + 0x0e, 0x69, 0x4c, 0x98, 0x83, 0xe9, 0xa4, 0xc1, 0xf8, 0xa4, 0xa9, 0x1e, 0xdf, 0x16, 0x27, 0x4a, + 0xde, 0xd8, 0x11, 0x4d, 0xcf, 0x49, 0x33, 0x74, 0xd2, 0x54, 0xbd, 0xa9, 0x40, 0xe6, 0xc4, 0x72, + 0x5f, 0xef, 0xa8, 0xf5, 0x83, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc1, 0xf9, 0x9d, 0xf2, 0xd9, + 0x0b, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// HandshakerServiceClient is the client API for HandshakerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type HandshakerServiceClient interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) +} + +type handshakerServiceClient struct { + cc *grpc.ClientConn +} + +func NewHandshakerServiceClient(cc *grpc.ClientConn) HandshakerServiceClient { + return &handshakerServiceClient{cc} +} + +func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { + stream, err := c.cc.NewStream(ctx, &_HandshakerService_serviceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...) + if err != nil { + return nil, err + } + x := &handshakerServiceDoHandshakeClient{stream} + return x, nil +} + +type HandshakerService_DoHandshakeClient interface { + Send(*HandshakerReq) error + Recv() (*HandshakerResp, error) + grpc.ClientStream +} + +type handshakerServiceDoHandshakeClient struct { + grpc.ClientStream +} + +func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) { + m := new(HandshakerResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HandshakerServiceServer is the server API for HandshakerService service. +type HandshakerServiceServer interface { + // Handshaker service accepts a stream of handshaker request, returning a + // stream of handshaker response. Client is expected to send exactly one + // message with either client_start or server_start followed by one or more + // messages with next. Each time client sends a request, the handshaker + // service expects to respond. Client does not have to wait for service's + // response before sending next request. + DoHandshake(HandshakerService_DoHandshakeServer) error +} + +// UnimplementedHandshakerServiceServer can be embedded to have forward compatible implementations. +type UnimplementedHandshakerServiceServer struct { +} + +func (*UnimplementedHandshakerServiceServer) DoHandshake(srv HandshakerService_DoHandshakeServer) error { + return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") +} + +func RegisterHandshakerServiceServer(s *grpc.Server, srv HandshakerServiceServer) { + s.RegisterService(&_HandshakerService_serviceDesc, srv) +} + +func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream}) +} + +type HandshakerService_DoHandshakeServer interface { + Send(*HandshakerResp) error + Recv() (*HandshakerReq, error) + grpc.ServerStream +} + +type handshakerServiceDoHandshakeServer struct { + grpc.ServerStream +} + +func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) { + m := new(HandshakerReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _HandshakerService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.gcp.HandshakerService", + HandlerType: (*HandshakerServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DoHandshake", + Handler: _HandshakerService_DoHandshake_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/gcp/handshaker.proto", +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go new file mode 100644 index 0000000000..992805165d --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -0,0 +1,184 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/gcp/transport_security_common.proto + +package grpc_gcp + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The security level of the created channel. The list is sorted in increasing +// level of security. This order must always be maintained. +type SecurityLevel int32 + +const ( + SecurityLevel_SECURITY_NONE SecurityLevel = 0 + SecurityLevel_INTEGRITY_ONLY SecurityLevel = 1 + SecurityLevel_INTEGRITY_AND_PRIVACY SecurityLevel = 2 +) + +var SecurityLevel_name = map[int32]string{ + 0: "SECURITY_NONE", + 1: "INTEGRITY_ONLY", + 2: "INTEGRITY_AND_PRIVACY", +} + +var SecurityLevel_value = map[string]int32{ + "SECURITY_NONE": 0, + "INTEGRITY_ONLY": 1, + "INTEGRITY_AND_PRIVACY": 2, +} + +func (x SecurityLevel) String() string { + return proto.EnumName(SecurityLevel_name, int32(x)) +} + +func (SecurityLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_b97e31e3cc23582a, []int{0} +} + +// Max and min supported RPC protocol versions. +type RpcProtocolVersions struct { + // Maximum supported RPC version. + MaxRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,1,opt,name=max_rpc_version,json=maxRpcVersion,proto3" json:"max_rpc_version,omitempty"` + // Minimum supported RPC version. + MinRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,2,opt,name=min_rpc_version,json=minRpcVersion,proto3" json:"min_rpc_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RpcProtocolVersions) Reset() { *m = RpcProtocolVersions{} } +func (m *RpcProtocolVersions) String() string { return proto.CompactTextString(m) } +func (*RpcProtocolVersions) ProtoMessage() {} +func (*RpcProtocolVersions) Descriptor() ([]byte, []int) { + return fileDescriptor_b97e31e3cc23582a, []int{0} +} + +func (m *RpcProtocolVersions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RpcProtocolVersions.Unmarshal(m, b) +} +func (m *RpcProtocolVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RpcProtocolVersions.Marshal(b, m, deterministic) +} +func (m *RpcProtocolVersions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RpcProtocolVersions.Merge(m, src) +} +func (m *RpcProtocolVersions) XXX_Size() int { + return xxx_messageInfo_RpcProtocolVersions.Size(m) +} +func (m *RpcProtocolVersions) XXX_DiscardUnknown() { + xxx_messageInfo_RpcProtocolVersions.DiscardUnknown(m) +} + +var xxx_messageInfo_RpcProtocolVersions proto.InternalMessageInfo + +func (m *RpcProtocolVersions) GetMaxRpcVersion() *RpcProtocolVersions_Version { + if m != nil { + return m.MaxRpcVersion + } + return nil +} + +func (m *RpcProtocolVersions) GetMinRpcVersion() *RpcProtocolVersions_Version { + if m != nil { + return m.MinRpcVersion + } + return nil +} + +// RPC version contains a major version and a minor version. +type RpcProtocolVersions_Version struct { + Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RpcProtocolVersions_Version) Reset() { *m = RpcProtocolVersions_Version{} } +func (m *RpcProtocolVersions_Version) String() string { return proto.CompactTextString(m) } +func (*RpcProtocolVersions_Version) ProtoMessage() {} +func (*RpcProtocolVersions_Version) Descriptor() ([]byte, []int) { + return fileDescriptor_b97e31e3cc23582a, []int{0, 0} +} + +func (m *RpcProtocolVersions_Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RpcProtocolVersions_Version.Unmarshal(m, b) +} +func (m *RpcProtocolVersions_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RpcProtocolVersions_Version.Marshal(b, m, deterministic) +} +func (m *RpcProtocolVersions_Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_RpcProtocolVersions_Version.Merge(m, src) +} +func (m *RpcProtocolVersions_Version) XXX_Size() int { + return xxx_messageInfo_RpcProtocolVersions_Version.Size(m) +} +func (m *RpcProtocolVersions_Version) XXX_DiscardUnknown() { + xxx_messageInfo_RpcProtocolVersions_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_RpcProtocolVersions_Version proto.InternalMessageInfo + +func (m *RpcProtocolVersions_Version) GetMajor() uint32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *RpcProtocolVersions_Version) GetMinor() uint32 { + if m != nil { + return m.Minor + } + return 0 +} + +func init() { + proto.RegisterEnum("grpc.gcp.SecurityLevel", SecurityLevel_name, SecurityLevel_value) + proto.RegisterType((*RpcProtocolVersions)(nil), "grpc.gcp.RpcProtocolVersions") + proto.RegisterType((*RpcProtocolVersions_Version)(nil), "grpc.gcp.RpcProtocolVersions.Version") +} + +func init() { + proto.RegisterFile("grpc/gcp/transport_security_common.proto", fileDescriptor_b97e31e3cc23582a) +} + +var fileDescriptor_b97e31e3cc23582a = []byte{ + // 323 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x41, 0x4b, 0x3b, 0x31, + 0x10, 0xc5, 0xff, 0x5b, 0xf8, 0xab, 0x44, 0x56, 0xeb, 0x6a, 0x41, 0xc5, 0x83, 0x08, 0x42, 0xf1, + 0x90, 0x05, 0xc5, 0xb3, 0xb4, 0xb5, 0x48, 0xa1, 0x6e, 0xeb, 0xb6, 0x16, 0xea, 0x25, 0xc4, 0x18, + 0x42, 0x24, 0x9b, 0x09, 0xb3, 0xb1, 0xd4, 0xaf, 0xec, 0xa7, 0x90, 0x4d, 0xbb, 0x14, 0xc1, 0x8b, + 0xb7, 0xbc, 0xc7, 0xcc, 0x6f, 0x32, 0xf3, 0x48, 0x5b, 0xa1, 0x13, 0xa9, 0x12, 0x2e, 0xf5, 0xc8, + 0x6d, 0xe9, 0x00, 0x3d, 0x2b, 0xa5, 0xf8, 0x40, 0xed, 0x3f, 0x99, 0x80, 0xa2, 0x00, 0x4b, 0x1d, + 0x82, 0x87, 0x64, 0xa7, 0xaa, 0xa4, 0x4a, 0xb8, 0x8b, 0xaf, 0x88, 0x1c, 0xe6, 0x4e, 0x8c, 0x2b, + 0x5b, 0x80, 0x99, 0x49, 0x2c, 0x35, 0xd8, 0x32, 0x79, 0x24, 0xfb, 0x05, 0x5f, 0x32, 0x74, 0x82, + 0x2d, 0x56, 0xde, 0x71, 0x74, 0x1e, 0xb5, 0x77, 0xaf, 0x2f, 0x69, 0xdd, 0x4b, 0x7f, 0xe9, 0xa3, + 0xeb, 0x47, 0x1e, 0x17, 0x7c, 0x99, 0x3b, 0xb1, 0x96, 0x01, 0xa7, 0xed, 0x0f, 0x5c, 0xe3, 0x6f, + 0x38, 0x6d, 0x37, 0xb8, 0xd3, 0x5b, 0xb2, 0x5d, 0x93, 0x8f, 0xc8, 0xff, 0x82, 0xbf, 0x03, 0x86, + 0xef, 0xc5, 0xf9, 0x4a, 0x04, 0x57, 0x5b, 0xc0, 0x30, 0xa5, 0x72, 0x2b, 0x71, 0xf5, 0x44, 0xe2, + 0xc9, 0xfa, 0x1e, 0x43, 0xb9, 0x90, 0x26, 0x39, 0x20, 0xf1, 0xa4, 0xdf, 0x7b, 0xce, 0x07, 0xd3, + 0x39, 0xcb, 0x46, 0x59, 0xbf, 0xf9, 0x2f, 0x49, 0xc8, 0xde, 0x20, 0x9b, 0xf6, 0x1f, 0x82, 0x37, + 0xca, 0x86, 0xf3, 0x66, 0x94, 0x9c, 0x90, 0xd6, 0xc6, 0xeb, 0x64, 0xf7, 0x6c, 0x9c, 0x0f, 0x66, + 0x9d, 0xde, 0xbc, 0xd9, 0xe8, 0x2e, 0x49, 0x4b, 0xc3, 0x6a, 0x07, 0x6e, 0x7c, 0x49, 0xb5, 0xf5, + 0x12, 0x2d, 0x37, 0xdd, 0xb3, 0x69, 0x9d, 0x41, 0x3d, 0xb2, 0x17, 0x12, 0x08, 0x2b, 0x8e, 0xa3, + 0x97, 0x3b, 0x05, 0xa0, 0x8c, 0xa4, 0x0a, 0x0c, 0xb7, 0x8a, 0x02, 0xaa, 0x34, 0xc4, 0x27, 0x50, + 0xbe, 0x49, 0xeb, 0x35, 0x37, 0x65, 0x5a, 0x11, 0xd3, 0x9a, 0x98, 0x86, 0xe8, 0x42, 0x11, 0x53, + 0xc2, 0xbd, 0x6e, 0x05, 0x7d, 0xf3, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x31, 0x14, 0xb4, 0x11, 0xf6, + 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/utils.go b/vendor/google.golang.org/grpc/credentials/alts/utils.go new file mode 100644 index 0000000000..e46280ad5f --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/alts/utils.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package alts + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "regexp" + "runtime" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +const ( + linuxProductNameFile = "/sys/class/dmi/id/product_name" + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +type platformError string + +func (k platformError) Error() string { + return fmt.Sprintf("%s is not supported", string(k)) +} + +var ( + // The following two variables will be reassigned in tests. + runningOS = runtime.GOOS + manufacturerReader = func() (io.Reader, error) { + switch runningOS { + case "linux": + return os.Open(linuxProductNameFile) + case "windows": + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return strings.NewReader(name), nil + } + } + + return nil, errors.New("cannot determine the machine's manufacturer") + default: + return nil, platformError(runningOS) + } + } + vmOnGCP bool +) + +// isRunningOnGCP checks whether the local system, without doing a network request is +// running on GCP. +func isRunningOnGCP() bool { + manufacturer, err := readManufacturer() + if os.IsNotExist(err) { + return false + } + if err != nil { + log.Fatalf("failure to read manufacturer information: %v", err) + } + name := string(manufacturer) + switch runningOS { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + log.Fatal(platformError(runningOS)) + } + return false +} + +func readManufacturer() ([]byte, error) { + reader, err := manufacturerReader() + if err != nil { + return nil, err + } + if reader == nil { + return nil, errors.New("got nil reader") + } + manufacturer, err := ioutil.ReadAll(reader) + if err != nil { + return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) + } + return manufacturer, nil +} + +// AuthInfoFromContext extracts the alts.AuthInfo object from the given context, +// if it exists. This API should be used by gRPC server RPC handlers to get +// information about the communicating peer. For client-side, use grpc.Peer() +// CallOption. +func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("no Peer found in Context") + } + return AuthInfoFromPeer(p) +} + +// AuthInfoFromPeer extracts the alts.AuthInfo object from the given peer, if it +// exists. This API should be used by gRPC clients after obtaining a peer object +// using the grpc.Peer() CallOption. +func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { + altsAuthInfo, ok := p.AuthInfo.(AuthInfo) + if !ok { + return nil, errors.New("no alts.AuthInfo found in Peer") + } + return altsAuthInfo, nil +} + +// ClientAuthorizationCheck checks whether the client is authorized to access +// the requested resources based on the given expected client service accounts. +// This API should be used by gRPC server RPC handlers. This API should not be +// used by clients. +func ClientAuthorizationCheck(ctx context.Context, expectedServiceAccounts []string) error { + authInfo, err := AuthInfoFromContext(ctx) + if err != nil { + return status.Newf(codes.PermissionDenied, "The context is not an ALTS-compatible context: %v", err).Err() + } + for _, sa := range expectedServiceAccounts { + if authInfo.PeerServiceAccount() == sa { + return nil + } + } + return status.Newf(codes.PermissionDenied, "Client %v is not authorized", authInfo.PeerServiceAccount()).Err() +} diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go new file mode 100644 index 0000000000..04b349abcf --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/google/google.go @@ -0,0 +1,125 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package google defines credentials for google cloud services. +package google + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/alts" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +const tokenRequestTimeout = 30 * time.Second + +// NewDefaultCredentials returns a credentials bundle that is configured to work +// with google services. +// +// This API is experimental. +func NewDefaultCredentials() credentials.Bundle { + c := &creds{ + newPerRPCCreds: func() credentials.PerRPCCredentials { + ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) + defer cancel() + perRPCCreds, err := oauth.NewApplicationDefault(ctx) + if err != nil { + grpclog.Warningf("google default creds: failed to create application oauth: %v", err) + } + return perRPCCreds + }, + } + bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + grpclog.Warningf("google default creds: failed to create new creds: %v", err) + } + return bundle +} + +// NewComputeEngineCredentials returns a credentials bundle that is configured to work +// with google services. This API must only be used when running on GCE. Authentication configured +// by this API represents the GCE VM's default service account. +// +// This API is experimental. +func NewComputeEngineCredentials() credentials.Bundle { + c := &creds{ + newPerRPCCreds: func() credentials.PerRPCCredentials { + return oauth.NewComputeEngine() + }, + } + bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + grpclog.Warningf("compute engine creds: failed to create new creds: %v", err) + } + return bundle +} + +// creds implements credentials.Bundle. +type creds struct { + // Supported modes are defined in internal/internal.go. + mode string + // The transport credentials associated with this bundle. + transportCreds credentials.TransportCredentials + // The per RPC credentials associated with this bundle. + perRPCCreds credentials.PerRPCCredentials + // Creates new per RPC credentials + newPerRPCCreds func() credentials.PerRPCCredentials +} + +func (c *creds) TransportCredentials() credentials.TransportCredentials { + return c.transportCreds +} + +func (c *creds) PerRPCCredentials() credentials.PerRPCCredentials { + if c == nil { + return nil + } + return c.perRPCCreds +} + +// NewWithMode should make a copy of Bundle, and switch mode. Modifying the +// existing Bundle may cause races. +func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { + newCreds := &creds{ + mode: mode, + newPerRPCCreds: c.newPerRPCCreds, + } + + // Create transport credentials. + switch mode { + case internal.CredsBundleModeFallback: + newCreds.transportCreds = credentials.NewTLS(nil) + case internal.CredsBundleModeBackendFromBalancer, internal.CredsBundleModeBalancer: + // Only the clients can use google default credentials, so we only need + // to create new ALTS client creds here. + newCreds.transportCreds = alts.NewClientCreds(alts.DefaultClientOptions()) + default: + return nil, fmt.Errorf("unsupported mode: %v", mode) + } + + if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer { + newCreds.perRPCCreds = newCreds.newPerRPCCreds() + } + + return newCreds, nil +}