소스 검색

bump go.opencensus.io v0.22.3

full diff: https://github.com/census-instrumentation/opencensus-go/compare/v0.11.0...v0.22.3

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
Sebastiaan van Stijn 5 년 전
부모
커밋
2971204e43
69개의 변경된 파일3386개의 추가작업 그리고 546개의 파일을 삭제
  1. 2 1
      vendor.conf
  2. 191 0
      vendor/github.com/golang/groupcache/LICENSE
  3. 73 0
      vendor/github.com/golang/groupcache/README.md
  4. 133 0
      vendor/github.com/golang/groupcache/lru/lru.go
  5. 102 25
      vendor/go.opencensus.io/README.md
  6. 15 0
      vendor/go.opencensus.io/go.mod
  7. 8 3
      vendor/go.opencensus.io/internal/internal.go
  8. 4 1
      vendor/go.opencensus.io/internal/tagencoding/tagencoding.go
  9. 1 0
      vendor/go.opencensus.io/internal/traceinternals.go
  10. 5 14
      vendor/go.opencensus.io/metric/metricdata/doc.go
  11. 38 0
      vendor/go.opencensus.io/metric/metricdata/exemplar.go
  12. 35 0
      vendor/go.opencensus.io/metric/metricdata/label.go
  13. 46 0
      vendor/go.opencensus.io/metric/metricdata/metric.go
  14. 193 0
      vendor/go.opencensus.io/metric/metricdata/point.go
  15. 16 0
      vendor/go.opencensus.io/metric/metricdata/type_string.go
  16. 27 0
      vendor/go.opencensus.io/metric/metricdata/unit.go
  17. 78 0
      vendor/go.opencensus.io/metric/metricproducer/manager.go
  18. 28 0
      vendor/go.opencensus.io/metric/metricproducer/producer.go
  19. 21 0
      vendor/go.opencensus.io/opencensus.go
  20. 2 1
      vendor/go.opencensus.io/plugin/ocgrpc/client.go
  21. 2 11
      vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
  22. 2 2
      vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
  23. 1 1
      vendor/go.opencensus.io/plugin/ocgrpc/server.go
  24. 2 2
      vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
  25. 1 1
      vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
  26. 51 23
      vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
  27. 1 1
      vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
  28. 36 3
      vendor/go.opencensus.io/plugin/ochttp/client.go
  29. 31 13
      vendor/go.opencensus.io/plugin/ochttp/client_stats.go
  30. 1 1
      vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go
  31. 61 0
      vendor/go.opencensus.io/plugin/ochttp/route.go
  32. 290 54
      vendor/go.opencensus.io/plugin/ochttp/server.go
  33. 169 0
      vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go
  34. 140 21
      vendor/go.opencensus.io/plugin/ochttp/stats.go
  35. 76 31
      vendor/go.opencensus.io/plugin/ochttp/trace.go
  36. 44 0
      vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go
  37. 164 0
      vendor/go.opencensus.io/resource/resource.go
  38. 26 12
      vendor/go.opencensus.io/stats/doc.go
  39. 1 1
      vendor/go.opencensus.io/stats/internal/record.go
  40. 23 10
      vendor/go.opencensus.io/stats/measure.go
  41. 24 21
      vendor/go.opencensus.io/stats/measure_float64.go
  42. 24 21
      vendor/go.opencensus.io/stats/measure_int64.go
  43. 74 9
      vendor/go.opencensus.io/stats/record.go
  44. 1 0
      vendor/go.opencensus.io/stats/units.go
  45. 6 5
      vendor/go.opencensus.io/stats/view/aggregation.go
  46. 125 39
      vendor/go.opencensus.io/stats/view/aggregation_data.go
  47. 6 4
      vendor/go.opencensus.io/stats/view/collector.go
  48. 28 27
      vendor/go.opencensus.io/stats/view/doc.go
  49. 3 0
      vendor/go.opencensus.io/stats/view/export.go
  50. 54 16
      vendor/go.opencensus.io/stats/view/view.go
  51. 149 0
      vendor/go.opencensus.io/stats/view/view_to_metric.go
  52. 94 44
      vendor/go.opencensus.io/stats/view/worker.go
  53. 22 7
      vendor/go.opencensus.io/stats/view/worker_commands.go
  54. 3 1
      vendor/go.opencensus.io/tag/context.go
  55. 10 1
      vendor/go.opencensus.io/tag/key.go
  56. 49 17
      vendor/go.opencensus.io/tag/map.go
  57. 31 13
      vendor/go.opencensus.io/tag/map_codec.go
  58. 52 0
      vendor/go.opencensus.io/tag/metadata.go
  59. 1 1
      vendor/go.opencensus.io/tag/profile_19.go
  60. 7 2
      vendor/go.opencensus.io/trace/basetypes.go
  61. 47 1
      vendor/go.opencensus.io/trace/config.go
  62. 3 1
      vendor/go.opencensus.io/trace/doc.go
  63. 38 0
      vendor/go.opencensus.io/trace/evictedqueue.go
  64. 33 12
      vendor/go.opencensus.io/trace/export.go
  65. 1 0
      vendor/go.opencensus.io/trace/internal/internal.go
  66. 61 0
      vendor/go.opencensus.io/trace/lrumap.go
  67. 3 4
      vendor/go.opencensus.io/trace/sampling.go
  68. 150 68
      vendor/go.opencensus.io/trace/trace.go
  69. 147 0
      vendor/go.opencensus.io/trace/tracestate/tracestate.go

+ 2 - 1
vendor.conf

@@ -117,7 +117,8 @@ github.com/bsphere/le_go                            7a984a84b5492ae539b79b62fb4a
 # gcplogs deps
 # gcplogs deps
 golang.org/x/oauth2                                 bf48bf16ab8d622ce64ec6ce98d2c98f916b6303
 golang.org/x/oauth2                                 bf48bf16ab8d622ce64ec6ce98d2c98f916b6303
 google.golang.org/api                               de943baf05a022a8f921b544b7827bacaba1aed5
 google.golang.org/api                               de943baf05a022a8f921b544b7827bacaba1aed5
-go.opencensus.io                                    c3ed530f775d85e577ca652cb052a52c078aad26 # v0.11.0
+github.com/golang/groupcache                        869f871628b6baa9cfbc11732cdf6546b17c1298
+go.opencensus.io                                    d835ff86be02193d324330acdb7d65546b05f814 # v0.22.3
 cloud.google.com/go                                 ceeb313ad77b789a7fa5287b36a1d127b69b7093 # v0.44.3
 cloud.google.com/go                                 ceeb313ad77b789a7fa5287b36a1d127b69b7093 # v0.44.3
 github.com/googleapis/gax-go                        bd5b16380fd03dc758d11cef74ba2e3bc8b0e8c2 # v2.0.5
 github.com/googleapis/gax-go                        bd5b16380fd03dc758d11cef74ba2e3bc8b0e8c2 # v2.0.5
 google.golang.org/genproto                          3f1135a288c9a07e340ae8ba4cc6c7065a3160e8
 google.golang.org/genproto                          3f1135a288c9a07e340ae8ba4cc6c7065a3160e8

+ 191 - 0
vendor/github.com/golang/groupcache/LICENSE

@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 73 - 0
vendor/github.com/golang/groupcache/README.md

@@ -0,0 +1,73 @@
+# groupcache
+
+## Summary
+
+groupcache is a caching and cache-filling library, intended as a
+replacement for memcached in many cases.
+
+For API docs and examples, see http://godoc.org/github.com/golang/groupcache
+
+## Comparison to memcached
+
+### **Like memcached**, groupcache:
+
+ * shards by key to select which peer is responsible for that key
+
+### **Unlike memcached**, groupcache:
+
+ * does not require running a separate set of servers, thus massively
+   reducing deployment/configuration pain.  groupcache is a client
+   library as well as a server.  It connects to its own peers.
+
+ * comes with a cache filling mechanism.  Whereas memcached just says
+   "Sorry, cache miss", often resulting in a thundering herd of
+   database (or whatever) loads from an unbounded number of clients
+   (which has resulted in several fun outages), groupcache coordinates
+   cache fills such that only one load in one process of an entire
+   replicated set of processes populates the cache, then multiplexes
+   the loaded value to all callers.
+
+ * does not support versioned values.  If key "foo" is value "bar",
+   key "foo" must always be "bar".  There are neither cache expiration
+   times, nor explicit cache evictions.  Thus there is also no CAS,
+   nor Increment/Decrement.  This also means that groupcache....
+
+ * ... supports automatic mirroring of super-hot items to multiple
+   processes.  This prevents memcached hot spotting where a machine's
+   CPU and/or NIC are overloaded by very popular keys/values.
+
+ * is currently only available for Go.  It's very unlikely that I
+   (bradfitz@) will port the code to any other language.
+
+## Loading process
+
+In a nutshell, a groupcache lookup of **Get("foo")** looks like:
+
+(On machine #5 of a set of N machines running the same code)
+
+ 1. Is the value of "foo" in local memory because it's super hot?  If so, use it.
+
+ 2. Is the value of "foo" in local memory because peer #5 (the current
+    peer) is the owner of it?  If so, use it.
+
+ 3. Amongst all the peers in my set of N, am I the owner of the key
+    "foo"?  (e.g. does it consistent hash to 5?)  If so, load it.  If
+    other callers come in, via the same process or via RPC requests
+    from peers, they block waiting for the load to finish and get the
+    same answer.  If not, RPC to the peer that's the owner and get
+    the answer.  If the RPC fails, just load it locally (still with
+    local dup suppression).
+
+## Users
+
+groupcache is in production use by dl.google.com (its original user),
+parts of Blogger, parts of Google Code, parts of Google Fiber, parts
+of Google production monitoring systems, etc.
+
+## Presentations
+
+See http://talks.golang.org/2013/oscon-dl.slide
+
+## Help
+
+Use the golang-nuts mailing list for any discussion or questions.

+ 133 - 0
vendor/github.com/golang/groupcache/lru/lru.go

@@ -0,0 +1,133 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package lru implements an LRU cache.
+package lru
+
+import "container/list"
+
+// Cache is an LRU cache. It is not safe for concurrent access.
+type Cache struct {
+	// MaxEntries is the maximum number of cache entries before
+	// an item is evicted. Zero means no limit.
+	MaxEntries int
+
+	// OnEvicted optionally specifies a callback function to be
+	// executed when an entry is purged from the cache.
+	OnEvicted func(key Key, value interface{})
+
+	ll    *list.List
+	cache map[interface{}]*list.Element
+}
+
+// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
+type Key interface{}
+
+type entry struct {
+	key   Key
+	value interface{}
+}
+
+// New creates a new Cache.
+// If maxEntries is zero, the cache has no limit and it's assumed
+// that eviction is done by the caller.
+func New(maxEntries int) *Cache {
+	return &Cache{
+		MaxEntries: maxEntries,
+		ll:         list.New(),
+		cache:      make(map[interface{}]*list.Element),
+	}
+}
+
+// Add adds a value to the cache.
+func (c *Cache) Add(key Key, value interface{}) {
+	if c.cache == nil {
+		c.cache = make(map[interface{}]*list.Element)
+		c.ll = list.New()
+	}
+	if ee, ok := c.cache[key]; ok {
+		c.ll.MoveToFront(ee)
+		ee.Value.(*entry).value = value
+		return
+	}
+	ele := c.ll.PushFront(&entry{key, value})
+	c.cache[key] = ele
+	if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
+		c.RemoveOldest()
+	}
+}
+
+// Get looks up a key's value from the cache.
+func (c *Cache) Get(key Key) (value interface{}, ok bool) {
+	if c.cache == nil {
+		return
+	}
+	if ele, hit := c.cache[key]; hit {
+		c.ll.MoveToFront(ele)
+		return ele.Value.(*entry).value, true
+	}
+	return
+}
+
+// Remove removes the provided key from the cache.
+func (c *Cache) Remove(key Key) {
+	if c.cache == nil {
+		return
+	}
+	if ele, hit := c.cache[key]; hit {
+		c.removeElement(ele)
+	}
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *Cache) RemoveOldest() {
+	if c.cache == nil {
+		return
+	}
+	ele := c.ll.Back()
+	if ele != nil {
+		c.removeElement(ele)
+	}
+}
+
+func (c *Cache) removeElement(e *list.Element) {
+	c.ll.Remove(e)
+	kv := e.Value.(*entry)
+	delete(c.cache, kv.key)
+	if c.OnEvicted != nil {
+		c.OnEvicted(kv.key, kv.value)
+	}
+}
+
+// Len returns the number of items in the cache.
+func (c *Cache) Len() int {
+	if c.cache == nil {
+		return 0
+	}
+	return c.ll.Len()
+}
+
+// Clear purges all stored items from the cache.
+func (c *Cache) Clear() {
+	if c.OnEvicted != nil {
+		for _, e := range c.cache {
+			kv := e.Value.(*entry)
+			c.OnEvicted(kv.key, kv.value)
+		}
+	}
+	c.ll = nil
+	c.cache = nil
+}

+ 102 - 25
vendor/go.opencensus.io/README.md

@@ -7,7 +7,9 @@
 
 
 OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
 OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
 collecting application performance and behavior monitoring data.
 collecting application performance and behavior monitoring data.
-Currently it consists of three major components: tags, stats, and tracing.
+Currently it consists of three major components: tags, stats and tracing.
+
+#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289).
 
 
 ## Installation
 ## Installation
 
 
@@ -22,17 +24,42 @@ The use of vendoring or a dependency management tool is recommended.
 
 
 OpenCensus Go libraries require Go 1.8 or later.
 OpenCensus Go libraries require Go 1.8 or later.
 
 
+## Getting Started
+
+The easiest way to get started using OpenCensus in your application is to use an existing
+integration with your RPC framework:
+
+* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp)
+* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc)
+* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql)
+* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus)
+* [Groupcache](https://godoc.org/github.com/orijtech/groupcache)
+* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy)
+* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver)
+* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo)
+* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis)
+* [Memcache](https://godoc.org/github.com/orijtech/gomemcache)
+
+If you're using a framework not listed here, you could either implement your own middleware for your
+framework or use [custom stats](#stats) and [spans](#spans) directly in your application.
+
 ## Exporters
 ## Exporters
 
 
-OpenCensus can export instrumentation data to various backends. 
-Currently, OpenCensus supports:
+OpenCensus can export instrumentation data to various backends.
+OpenCensus has exporter implementations for the following, users
+can implement their own exporters by implementing the exporter interfaces
+([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter),
+[trace](https://godoc.org/go.opencensus.io/trace#Exporter)):
 
 
 * [Prometheus][exporter-prom] for stats
 * [Prometheus][exporter-prom] for stats
 * [OpenZipkin][exporter-zipkin] for traces
 * [OpenZipkin][exporter-zipkin] for traces
-* Stackdriver [Monitoring][exporter-stackdriver] and [Trace][exporter-stackdriver]
+* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces
 * [Jaeger][exporter-jaeger] for traces
 * [Jaeger][exporter-jaeger] for traces
 * [AWS X-Ray][exporter-xray] for traces
 * [AWS X-Ray][exporter-xray] for traces
-
+* [Datadog][exporter-datadog] for stats and traces
+* [Graphite][exporter-graphite] for stats
+* [Honeycomb][exporter-honeycomb] for traces
+* [New Relic][exporter-newrelic] for stats and traces
 
 
 ## Overview
 ## Overview
 
 
@@ -43,13 +70,6 @@ multiple services until there is a response. OpenCensus allows
 you to instrument your services and collect diagnostics data all
 you to instrument your services and collect diagnostics data all
 through your services end-to-end.
 through your services end-to-end.
 
 
-Start with instrumenting HTTP and gRPC clients and servers,
-then add additional custom instrumentation if needed.
-
-* [HTTP guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/http)
-* [gRPC guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/grpc)
-
-
 ## Tags
 ## Tags
 
 
 Tags represent propagated key-value pairs. They are propagated using `context.Context`
 Tags represent propagated key-value pairs. They are propagated using `context.Context`
@@ -57,11 +77,11 @@ in the same process or can be encoded to be transmitted on the wire. Usually, th
 be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler`
 be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler`
 for gRPC.
 for gRPC.
 
 
-Package tag allows adding or modifying tags in the current context.
+Package `tag` allows adding or modifying tags in the current context.
 
 
 [embedmd]:# (internal/readme/tags.go new)
 [embedmd]:# (internal/readme/tags.go new)
 ```go
 ```go
-ctx, err = tag.New(ctx,
+ctx, err := tag.New(ctx,
 	tag.Insert(osKey, "macOS-10.12.5"),
 	tag.Insert(osKey, "macOS-10.12.5"),
 	tag.Upsert(userIDKey, "cde36753ed"),
 	tag.Upsert(userIDKey, "cde36753ed"),
 )
 )
@@ -106,7 +126,7 @@ Currently three types of aggregations are supported:
 
 
 [embedmd]:# (internal/readme/stats.go aggs)
 [embedmd]:# (internal/readme/stats.go aggs)
 ```go
 ```go
-distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32)
+distAgg := view.Distribution(1<<32, 2<<32, 3<<32)
 countAgg := view.Count()
 countAgg := view.Count()
 sumAgg := view.Sum()
 sumAgg := view.Sum()
 ```
 ```
@@ -116,26 +136,79 @@ Here we create a view with the DistributionAggregation over our measure.
 [embedmd]:# (internal/readme/stats.go view)
 [embedmd]:# (internal/readme/stats.go view)
 ```go
 ```go
 if err := view.Register(&view.View{
 if err := view.Register(&view.View{
-	Name:        "my.org/video_size_distribution",
+	Name:        "example.com/video_size_distribution",
 	Description: "distribution of processed video size over time",
 	Description: "distribution of processed video size over time",
 	Measure:     videoSize,
 	Measure:     videoSize,
-	Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32),
+	Aggregation: view.Distribution(1<<32, 2<<32, 3<<32),
 }); err != nil {
 }); err != nil {
-	log.Fatalf("Failed to subscribe to view: %v", err)
+	log.Fatalf("Failed to register view: %v", err)
 }
 }
 ```
 ```
 
 
-Subscribe begins collecting data for the view. Subscribed views' data will be
+Register begins collecting data for the view. Registered views' data will be
 exported via the registered exporters.
 exported via the registered exporters.
 
 
 ## Traces
 ## Traces
 
 
+A distributed trace tracks the progression of a single user request as
+it is handled by the services and processes that make up an application.
+Each step is called a span in the trace. Spans include metadata about the step,
+including especially the time spent in the step, called the span’s latency.
+
+Below you see a trace and several spans underneath it.
+
+![Traces and spans](https://i.imgur.com/7hZwRVj.png)
+
+### Spans
+
+Span is the unit step in a trace. Each span has a name, latency, status and
+additional metadata.
+
+Below we are starting a span for a cache read and ending it
+when we are done:
+
 [embedmd]:# (internal/readme/trace.go startend)
 [embedmd]:# (internal/readme/trace.go startend)
 ```go
 ```go
-ctx, span := trace.StartSpan(ctx, "your choice of name")
+ctx, span := trace.StartSpan(ctx, "cache.Get")
 defer span.End()
 defer span.End()
+
+// Do work to get from cache.
 ```
 ```
 
 
+### Propagation
+
+Spans can have parents or can be root spans if they don't have any parents.
+The current span is propagated in-process and across the network to allow associating
+new child spans with the parent.
+
+In the same process, `context.Context` is used to propagate spans.
+`trace.StartSpan` creates a new span as a root if the current context
+doesn't contain a span. Or, it creates a child of the span that is
+already in current context. The returned context can be used to keep
+propagating the newly created span in the current context.
+
+[embedmd]:# (internal/readme/trace.go startend)
+```go
+ctx, span := trace.StartSpan(ctx, "cache.Get")
+defer span.End()
+
+// Do work to get from cache.
+```
+
+Across the network, OpenCensus provides different propagation
+methods for different protocols.
+
+* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation).
+* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation)
+  by default but can be configured to use a custom propagation method by setting another
+  [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat).
+
+## Execution Tracer
+
+With Go 1.11, OpenCensus Go will support integration with the Go execution tracer.
+See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68)
+for an example of their mutual use.
+
 ## Profiles
 ## Profiles
 
 
 OpenCensus tags can be applied as profiler labels
 OpenCensus tags can be applied as profiler labels
@@ -167,7 +240,7 @@ Before version 1.0.0, the following deprecation policy will be observed:
 
 
 No backwards-incompatible changes will be made except for the removal of symbols that have
 No backwards-incompatible changes will be made except for the removal of symbols that have
 been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
 been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
-removing the *Deprecated* functionality will be made no sooner than 28 days after the first 
+removing the *Deprecated* functionality will be made no sooner than 28 days after the first
 release in which the functionality was marked *Deprecated*.
 release in which the functionality was marked *Deprecated*.
 
 
 [travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
 [travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
@@ -183,8 +256,12 @@ release in which the functionality was marked *Deprecated*.
 [new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap
 [new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap
 [new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
 [new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
 
 
-[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus
+[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus
 [exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
 [exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
-[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin
-[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger
-[exporter-xray]: https://github.com/census-instrumentation/opencensus-go-exporter-aws
+[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin
+[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger
+[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws
+[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog
+[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite
+[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter
+[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go

+ 15 - 0
vendor/go.opencensus.io/go.mod

@@ -0,0 +1,15 @@
+module go.opencensus.io
+
+require (
+	github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6
+	github.com/golang/protobuf v1.3.1
+	github.com/google/go-cmp v0.3.0
+	github.com/stretchr/testify v1.4.0
+	golang.org/x/net v0.0.0-20190620200207-3b0461eec859
+	golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect
+	golang.org/x/text v0.3.2 // indirect
+	google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect
+	google.golang.org/grpc v1.20.1
+)
+
+go 1.13

+ 8 - 3
vendor/go.opencensus.io/internal/internal.go

@@ -14,11 +14,16 @@
 
 
 package internal // import "go.opencensus.io/internal"
 package internal // import "go.opencensus.io/internal"
 
 
-import "time"
+import (
+	"fmt"
+	"time"
+
+	opencensus "go.opencensus.io"
+)
 
 
 // UserAgent is the user agent to be added to the outgoing
 // UserAgent is the user agent to be added to the outgoing
 // requests from the exporters.
 // requests from the exporters.
-const UserAgent = "opencensus-go [0.11.0]"
+var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version())
 
 
 // MonotonicEndTime returns the end time at present
 // MonotonicEndTime returns the end time at present
 // but offset from start, monotonically.
 // but offset from start, monotonically.
@@ -28,5 +33,5 @@ const UserAgent = "opencensus-go [0.11.0]"
 // end as a monotonic time.
 // end as a monotonic time.
 // See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
 // See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
 func MonotonicEndTime(start time.Time) time.Time {
 func MonotonicEndTime(start time.Time) time.Time {
-	return start.Add(time.Now().Sub(start))
+	return start.Add(time.Since(start))
 }
 }

+ 4 - 1
vendor/go.opencensus.io/internal/tagencoding/tagencoding.go

@@ -17,6 +17,7 @@
 // used interally by the stats collector.
 // used interally by the stats collector.
 package tagencoding // import "go.opencensus.io/internal/tagencoding"
 package tagencoding // import "go.opencensus.io/internal/tagencoding"
 
 
+// Values represent the encoded buffer for the values.
 type Values struct {
 type Values struct {
 	Buffer     []byte
 	Buffer     []byte
 	WriteIndex int
 	WriteIndex int
@@ -31,6 +32,7 @@ func (vb *Values) growIfRequired(expected int) {
 	}
 	}
 }
 }
 
 
+// WriteValue is the helper method to encode Values from map[Key][]byte.
 func (vb *Values) WriteValue(v []byte) {
 func (vb *Values) WriteValue(v []byte) {
 	length := len(v) & 0xff
 	length := len(v) & 0xff
 	vb.growIfRequired(1 + length)
 	vb.growIfRequired(1 + length)
@@ -49,7 +51,7 @@ func (vb *Values) WriteValue(v []byte) {
 	vb.WriteIndex += length
 	vb.WriteIndex += length
 }
 }
 
 
-// ReadValue is the helper method to read the values when decoding valuesBytes to a map[Key][]byte.
+// ReadValue is the helper method to decode Values to a map[Key][]byte.
 func (vb *Values) ReadValue() []byte {
 func (vb *Values) ReadValue() []byte {
 	// read length of v
 	// read length of v
 	length := int(vb.Buffer[vb.ReadIndex])
 	length := int(vb.Buffer[vb.ReadIndex])
@@ -67,6 +69,7 @@ func (vb *Values) ReadValue() []byte {
 	return v
 	return v
 }
 }
 
 
+// Bytes returns a reference to already written bytes in the Buffer.
 func (vb *Values) Bytes() []byte {
 func (vb *Values) Bytes() []byte {
 	return vb.Buffer[:vb.WriteIndex]
 	return vb.Buffer[:vb.WriteIndex]
 }
 }

+ 1 - 0
vendor/go.opencensus.io/internal/traceinternals.go

@@ -22,6 +22,7 @@ import (
 // TODO(#412): remove this
 // TODO(#412): remove this
 var Trace interface{}
 var Trace interface{}
 
 
+// LocalSpanStoreEnabled true if the local span store is enabled.
 var LocalSpanStoreEnabled bool
 var LocalSpanStoreEnabled bool
 
 
 // BucketConfiguration stores the number of samples to store for span buckets
 // BucketConfiguration stores the number of samples to store for span buckets

+ 5 - 14
vendor/go.opencensus.io/stats/internal/validation.go → vendor/go.opencensus.io/metric/metricdata/doc.go

@@ -12,17 +12,8 @@
 // See the License for the specific language governing permissions and
 // See the License for the specific language governing permissions and
 // limitations under the License.
 // limitations under the License.
 
 
-package internal // import "go.opencensus.io/stats/internal"
-
-const (
-	MaxNameLength = 255
-)
-
-func IsPrintable(str string) bool {
-	for _, r := range str {
-		if !(r >= ' ' && r <= '~') {
-			return false
-		}
-	}
-	return true
-}
+// Package metricdata contains the metrics data model.
+//
+// This is an EXPERIMENTAL package, and may change in arbitrary ways without
+// notice.
+package metricdata // import "go.opencensus.io/metric/metricdata"

+ 38 - 0
vendor/go.opencensus.io/metric/metricdata/exemplar.go

@@ -0,0 +1,38 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricdata
+
+import (
+	"time"
+)
+
+// Exemplars keys.
+const (
+	AttachmentKeySpanContext = "SpanContext"
+)
+
+// Exemplar is an example data point associated with each bucket of a
+// distribution type aggregation.
+//
+// Their purpose is to provide an example of the kind of thing
+// (request, RPC, trace span, etc.) that resulted in that measurement.
+type Exemplar struct {
+	Value       float64     // the value that was recorded
+	Timestamp   time.Time   // the time the value was recorded
+	Attachments Attachments // attachments (if any)
+}
+
+// Attachments is a map of extra values associated with a recorded data point.
+type Attachments map[string]interface{}

+ 35 - 0
vendor/go.opencensus.io/metric/metricdata/label.go

@@ -0,0 +1,35 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricdata
+
+// LabelKey represents key of a label. It has optional
+// description attribute.
+type LabelKey struct {
+	Key         string
+	Description string
+}
+
+// LabelValue represents the value of a label.
+// The zero value represents a missing label value, which may be treated
+// differently to an empty string value by some back ends.
+type LabelValue struct {
+	Value   string // string value of the label
+	Present bool   // flag that indicated whether a value is present or not
+}
+
+// NewLabelValue creates a new non-nil LabelValue that represents the given string.
+func NewLabelValue(val string) LabelValue {
+	return LabelValue{Value: val, Present: true}
+}

+ 46 - 0
vendor/go.opencensus.io/metric/metricdata/metric.go

@@ -0,0 +1,46 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricdata
+
+import (
+	"time"
+
+	"go.opencensus.io/resource"
+)
+
+// Descriptor holds metadata about a metric.
+type Descriptor struct {
+	Name        string     // full name of the metric
+	Description string     // human-readable description
+	Unit        Unit       // units for the measure
+	Type        Type       // type of measure
+	LabelKeys   []LabelKey // label keys
+}
+
+// Metric represents a quantity measured against a resource with different
+// label value combinations.
+type Metric struct {
+	Descriptor Descriptor         // metric descriptor
+	Resource   *resource.Resource // resource against which this was measured
+	TimeSeries []*TimeSeries      // one time series for each combination of label values
+}
+
+// TimeSeries is a sequence of points associated with a combination of label
+// values.
+type TimeSeries struct {
+	LabelValues []LabelValue // label values, same order as keys in the metric descriptor
+	Points      []Point      // points sequence
+	StartTime   time.Time    // time we started recording this time series
+}

+ 193 - 0
vendor/go.opencensus.io/metric/metricdata/point.go

@@ -0,0 +1,193 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricdata
+
+import (
+	"time"
+)
+
+// Point is a single data point of a time series.
+type Point struct {
+	// Time is the point in time that this point represents in a time series.
+	Time time.Time
+	// Value is the value of this point. Prefer using ReadValue to switching on
+	// the value type, since new value types might be added.
+	Value interface{}
+}
+
+//go:generate stringer -type ValueType
+
+// NewFloat64Point creates a new Point holding a float64 value.
+func NewFloat64Point(t time.Time, val float64) Point {
+	return Point{
+		Value: val,
+		Time:  t,
+	}
+}
+
+// NewInt64Point creates a new Point holding an int64 value.
+func NewInt64Point(t time.Time, val int64) Point {
+	return Point{
+		Value: val,
+		Time:  t,
+	}
+}
+
+// NewDistributionPoint creates a new Point holding a Distribution value.
+func NewDistributionPoint(t time.Time, val *Distribution) Point {
+	return Point{
+		Value: val,
+		Time:  t,
+	}
+}
+
+// NewSummaryPoint creates a new Point holding a Summary value.
+func NewSummaryPoint(t time.Time, val *Summary) Point {
+	return Point{
+		Value: val,
+		Time:  t,
+	}
+}
+
+// ValueVisitor allows reading the value of a point.
+type ValueVisitor interface {
+	VisitFloat64Value(float64)
+	VisitInt64Value(int64)
+	VisitDistributionValue(*Distribution)
+	VisitSummaryValue(*Summary)
+}
+
+// ReadValue accepts a ValueVisitor and calls the appropriate method with the
+// value of this point.
+// Consumers of Point should use this in preference to switching on the type
+// of the value directly, since new value types may be added.
+func (p Point) ReadValue(vv ValueVisitor) {
+	switch v := p.Value.(type) {
+	case int64:
+		vv.VisitInt64Value(v)
+	case float64:
+		vv.VisitFloat64Value(v)
+	case *Distribution:
+		vv.VisitDistributionValue(v)
+	case *Summary:
+		vv.VisitSummaryValue(v)
+	default:
+		panic("unexpected value type")
+	}
+}
+
+// Distribution contains summary statistics for a population of values. It
+// optionally contains a histogram representing the distribution of those
+// values across a set of buckets.
+type Distribution struct {
+	// Count is the number of values in the population. Must be non-negative. This value
+	// must equal the sum of the values in bucket_counts if a histogram is
+	// provided.
+	Count int64
+	// Sum is the sum of the values in the population. If count is zero then this field
+	// must be zero.
+	Sum float64
+	// SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the
+	// population. For values x_i this is:
+	//
+	//     Sum[i=1..n]((x_i - mean)^2)
+	//
+	// Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
+	// describes Welford's method for accumulating this sum in one pass.
+	//
+	// If count is zero then this field must be zero.
+	SumOfSquaredDeviation float64
+	// BucketOptions describes the bounds of the histogram buckets in this
+	// distribution.
+	//
+	// A Distribution may optionally contain a histogram of the values in the
+	// population.
+	//
+	// If nil, there is no associated histogram.
+	BucketOptions *BucketOptions
+	// Bucket If the distribution does not have a histogram, then omit this field.
+	// If there is a histogram, then the sum of the values in the Bucket counts
+	// must equal the value in the count field of the distribution.
+	Buckets []Bucket
+}
+
+// BucketOptions describes the bounds of the histogram buckets in this
+// distribution.
+type BucketOptions struct {
+	// Bounds specifies a set of bucket upper bounds.
+	// This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket
+	// index i are:
+	//
+	// [0, Bounds[i]) for i == 0
+	// [Bounds[i-1], Bounds[i]) for 0 < i < N-1
+	// [Bounds[i-1], +infinity) for i == N-1
+	Bounds []float64
+}
+
+// Bucket represents a single bucket (value range) in a distribution.
+type Bucket struct {
+	// Count is the number of values in each bucket of the histogram, as described in
+	// bucket_bounds.
+	Count int64
+	// Exemplar associated with this bucket (if any).
+	Exemplar *Exemplar
+}
+
+// Summary is a representation of percentiles.
+type Summary struct {
+	// Count is the cumulative count (if available).
+	Count int64
+	// Sum is the cumulative sum of values  (if available).
+	Sum float64
+	// HasCountAndSum is true if Count and Sum are available.
+	HasCountAndSum bool
+	// Snapshot represents percentiles calculated over an arbitrary time window.
+	// The values in this struct can be reset at arbitrary unknown times, with
+	// the requirement that all of them are reset at the same time.
+	Snapshot Snapshot
+}
+
+// Snapshot represents percentiles over an arbitrary time.
+// The values in this struct can be reset at arbitrary unknown times, with
+// the requirement that all of them are reset at the same time.
+type Snapshot struct {
+	// Count is the number of values in the snapshot. Optional since some systems don't
+	// expose this. Set to 0 if not available.
+	Count int64
+	// Sum is the sum of values in the snapshot. Optional since some systems don't
+	// expose this. If count is 0 then this field must be zero.
+	Sum float64
+	// Percentiles is a map from percentile (range (0-100.0]) to the value of
+	// the percentile.
+	Percentiles map[float64]float64
+}
+
+//go:generate stringer -type Type
+
+// Type is the overall type of metric, including its value type and whether it
+// represents a cumulative total (since the start time) or if it represents a
+// gauge value.
+type Type int
+
+// Metric types.
+const (
+	TypeGaugeInt64 Type = iota
+	TypeGaugeFloat64
+	TypeGaugeDistribution
+	TypeCumulativeInt64
+	TypeCumulativeFloat64
+	TypeCumulativeDistribution
+	TypeSummary
+)

+ 16 - 0
vendor/go.opencensus.io/metric/metricdata/type_string.go

@@ -0,0 +1,16 @@
+// Code generated by "stringer -type Type"; DO NOT EDIT.
+
+package metricdata
+
+import "strconv"
+
+const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary"
+
+var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128}
+
+func (i Type) String() string {
+	if i < 0 || i >= Type(len(_Type_index)-1) {
+		return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _Type_name[_Type_index[i]:_Type_index[i+1]]
+}

+ 27 - 0
vendor/go.opencensus.io/metric/metricdata/unit.go

@@ -0,0 +1,27 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricdata
+
+// Unit is a string encoded according to the case-sensitive abbreviations from the
+// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
+type Unit string
+
+// Predefined units. To record against a unit not represented here, create your
+// own Unit type constant from a string.
+const (
+	UnitDimensionless Unit = "1"
+	UnitBytes         Unit = "By"
+	UnitMilliseconds  Unit = "ms"
+)

+ 78 - 0
vendor/go.opencensus.io/metric/metricproducer/manager.go

@@ -0,0 +1,78 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricproducer
+
+import (
+	"sync"
+)
+
+// Manager maintains a list of active producers. Producers can register
+// with the manager to allow readers to read all metrics provided by them.
+// Readers can retrieve all producers registered with the manager,
+// read metrics from the producers and export them.
+type Manager struct {
+	mu        sync.RWMutex
+	producers map[Producer]struct{}
+}
+
+var prodMgr *Manager
+var once sync.Once
+
+// GlobalManager is a single instance of producer manager
+// that is used by all producers and all readers.
+func GlobalManager() *Manager {
+	once.Do(func() {
+		prodMgr = &Manager{}
+		prodMgr.producers = make(map[Producer]struct{})
+	})
+	return prodMgr
+}
+
+// AddProducer adds the producer to the Manager if it is not already present.
+func (pm *Manager) AddProducer(producer Producer) {
+	if producer == nil {
+		return
+	}
+	pm.mu.Lock()
+	defer pm.mu.Unlock()
+	pm.producers[producer] = struct{}{}
+}
+
+// DeleteProducer deletes the producer from the Manager if it is present.
+func (pm *Manager) DeleteProducer(producer Producer) {
+	if producer == nil {
+		return
+	}
+	pm.mu.Lock()
+	defer pm.mu.Unlock()
+	delete(pm.producers, producer)
+}
+
+// GetAll returns a slice of all producer currently registered with
+// the Manager. For each call it generates a new slice. The slice
+// should not be cached as registration may change at any time. It is
+// typically called periodically by exporter to read metrics from
+// the producers.
+func (pm *Manager) GetAll() []Producer {
+	pm.mu.Lock()
+	defer pm.mu.Unlock()
+	producers := make([]Producer, len(pm.producers))
+	i := 0
+	for producer := range pm.producers {
+		producers[i] = producer
+		i++
+	}
+	return producers
+}

+ 28 - 0
vendor/go.opencensus.io/metric/metricproducer/producer.go

@@ -0,0 +1,28 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricproducer
+
+import (
+	"go.opencensus.io/metric/metricdata"
+)
+
+// Producer is a source of metrics.
+type Producer interface {
+	// Read should return the current values of all metrics supported by this
+	// metric provider.
+	// The returned metrics should be unique for each combination of name and
+	// resource.
+	Read() []*metricdata.Metric
+}

+ 21 - 0
vendor/go.opencensus.io/opencensus.go

@@ -0,0 +1,21 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package opencensus contains Go support for OpenCensus.
+package opencensus // import "go.opencensus.io"
+
+// Version is the current release version of OpenCensus in use.
+func Version() string {
+	return "0.23.0"
+}

+ 2 - 1
vendor/go.opencensus.io/plugin/ocgrpc/client.go

@@ -15,8 +15,8 @@
 package ocgrpc
 package ocgrpc
 
 
 import (
 import (
+	"context"
 	"go.opencensus.io/trace"
 	"go.opencensus.io/trace"
-	"golang.org/x/net/context"
 
 
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/stats"
 )
 )
@@ -31,6 +31,7 @@ type ClientHandler struct {
 	StartOptions trace.StartOptions
 	StartOptions trace.StartOptions
 }
 }
 
 
+// HandleConn exists to satisfy gRPC stats.Handler.
 func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
 func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
 	// no-op
 	// no-op
 }
 }

+ 2 - 11
vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go

@@ -31,9 +31,9 @@ var (
 	ClientServerLatency          = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
 	ClientServerLatency          = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
 )
 )
 
 
-// Predefined views may be subscribed to collect data for the above measures.
+// Predefined views may be registered to collect data for the above measures.
 // As always, you may also define your own custom views over measures collected by this
 // As always, you may also define your own custom views over measures collected by this
-// package. These are declared as a convenience only; none are subscribed by
+// package. These are declared as a convenience only; none are registered by
 // default.
 // default.
 var (
 var (
 	ClientSentBytesPerRPCView = &view.View{
 	ClientSentBytesPerRPCView = &view.View{
@@ -91,15 +91,6 @@ var (
 		TagKeys:     []tag.Key{KeyClientMethod},
 		TagKeys:     []tag.Key{KeyClientMethod},
 		Aggregation: DefaultMillisecondsDistribution,
 		Aggregation: DefaultMillisecondsDistribution,
 	}
 	}
-
-	// Deprecated: This view is going to be removed, if you need it please define it
-	// yourself.
-	ClientRequestCountView = &view.View{
-		Name:        "Count of request messages per client RPC",
-		TagKeys:     []tag.Key{KeyClientMethod},
-		Measure:     ClientRoundtripLatency,
-		Aggregation: view.Count(),
-	}
 )
 )
 
 
 // DefaultClientViews are the default client views provided by this package.
 // DefaultClientViews are the default client views provided by this package.

+ 2 - 2
vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go

@@ -16,10 +16,10 @@
 package ocgrpc
 package ocgrpc
 
 
 import (
 import (
+	"context"
 	"time"
 	"time"
 
 
 	"go.opencensus.io/tag"
 	"go.opencensus.io/tag"
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/stats"
 )
 )
@@ -30,7 +30,7 @@ func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo)
 	startTime := time.Now()
 	startTime := time.Now()
 	if info == nil {
 	if info == nil {
 		if grpclog.V(2) {
 		if grpclog.V(2) {
-			grpclog.Infof("clientHandler.TagRPC called with nil info.", info.FullMethodName)
+			grpclog.Info("clientHandler.TagRPC called with nil info.")
 		}
 		}
 		return ctx
 		return ctx
 	}
 	}

+ 1 - 1
vendor/go.opencensus.io/plugin/ocgrpc/server.go

@@ -15,8 +15,8 @@
 package ocgrpc
 package ocgrpc
 
 
 import (
 import (
+	"context"
 	"go.opencensus.io/trace"
 	"go.opencensus.io/trace"
-	"golang.org/x/net/context"
 
 
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/stats"
 )
 )

+ 2 - 2
vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go

@@ -34,9 +34,9 @@ var (
 // mechanism to load these defaults from a common repository/config shared by
 // mechanism to load these defaults from a common repository/config shared by
 // all supported languages. Likely a serialized protobuf of these defaults.
 // all supported languages. Likely a serialized protobuf of these defaults.
 
 
-// Predefined views may be subscribed to collect data for the above measures.
+// Predefined views may be registered to collect data for the above measures.
 // As always, you may also define your own custom views over measures collected by this
 // As always, you may also define your own custom views over measures collected by this
-// package. These are declared as a convenience only; none are subscribed by
+// package. These are declared as a convenience only; none are registered by
 // default.
 // default.
 var (
 var (
 	ServerReceivedBytesPerRPCView = &view.View{
 	ServerReceivedBytesPerRPCView = &view.View{

+ 1 - 1
vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go

@@ -18,7 +18,7 @@ package ocgrpc
 import (
 import (
 	"time"
 	"time"
 
 
-	"golang.org/x/net/context"
+	"context"
 
 
 	"go.opencensus.io/tag"
 	"go.opencensus.io/tag"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/grpclog"

+ 51 - 23
vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go

@@ -22,9 +22,11 @@ import (
 	"sync/atomic"
 	"sync/atomic"
 	"time"
 	"time"
 
 
+	"go.opencensus.io/metric/metricdata"
 	ocstats "go.opencensus.io/stats"
 	ocstats "go.opencensus.io/stats"
 	"go.opencensus.io/stats/view"
 	"go.opencensus.io/stats/view"
 	"go.opencensus.io/tag"
 	"go.opencensus.io/tag"
+	"go.opencensus.io/trace"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/stats"
@@ -51,16 +53,22 @@ type rpcData struct {
 // The following variables define the default hard-coded auxiliary data used by
 // The following variables define the default hard-coded auxiliary data used by
 // both the default GRPC client and GRPC server metrics.
 // both the default GRPC client and GRPC server metrics.
 var (
 var (
-	DefaultBytesDistribution        = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
-	DefaultMillisecondsDistribution = view.Distribution(0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
-	DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
+	DefaultBytesDistribution        = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
+	DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
+	DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
 )
 )
 
 
+// Server tags are applied to the context used to process each RPC, as well as
+// the measures at the end of each RPC.
 var (
 var (
-	KeyServerMethod, _ = tag.NewKey("grpc_server_method")
-	KeyClientMethod, _ = tag.NewKey("grpc_client_method")
-	KeyServerStatus, _ = tag.NewKey("grpc_server_status")
-	KeyClientStatus, _ = tag.NewKey("grpc_client_status")
+	KeyServerMethod = tag.MustNewKey("grpc_server_method")
+	KeyServerStatus = tag.MustNewKey("grpc_server_status")
+)
+
+// Client tags are applied to measures at the end of each RPC.
+var (
+	KeyClientMethod = tag.MustNewKey("grpc_client_method")
+	KeyClientStatus = tag.MustNewKey("grpc_client_status")
 )
 )
 
 
 var (
 var (
@@ -135,24 +143,31 @@ func handleRPCEnd(ctx context.Context, s *stats.End) {
 	}
 	}
 
 
 	latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
 	latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
+	attachments := getSpanCtxAttachment(ctx)
 	if s.Client {
 	if s.Client {
-		ctx, _ = tag.New(ctx,
-			tag.Upsert(KeyClientMethod, methodName(d.method)),
-			tag.Upsert(KeyClientStatus, st))
-		ocstats.Record(ctx,
-			ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
-			ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
-			ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
-			ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
-			ClientRoundtripLatency.M(latencyMillis))
+		ocstats.RecordWithOptions(ctx,
+			ocstats.WithTags(
+				tag.Upsert(KeyClientMethod, methodName(d.method)),
+				tag.Upsert(KeyClientStatus, st)),
+			ocstats.WithAttachments(attachments),
+			ocstats.WithMeasurements(
+				ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
+				ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
+				ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
+				ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
+				ClientRoundtripLatency.M(latencyMillis)))
 	} else {
 	} else {
-		ctx, _ = tag.New(ctx, tag.Upsert(KeyServerStatus, st))
-		ocstats.Record(ctx,
-			ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
-			ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
-			ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
-			ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
-			ServerLatency.M(latencyMillis))
+		ocstats.RecordWithOptions(ctx,
+			ocstats.WithTags(
+				tag.Upsert(KeyServerStatus, st),
+			),
+			ocstats.WithAttachments(attachments),
+			ocstats.WithMeasurements(
+				ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
+				ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
+				ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
+				ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
+				ServerLatency.M(latencyMillis)))
 	}
 	}
 }
 }
 
 
@@ -197,3 +212,16 @@ func statusCodeToString(s *status.Status) string {
 		return "CODE_" + strconv.FormatInt(int64(c), 10)
 		return "CODE_" + strconv.FormatInt(int64(c), 10)
 	}
 	}
 }
 }
+
+func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments {
+	attachments := map[string]interface{}{}
+	span := trace.FromContext(ctx)
+	if span == nil {
+		return attachments
+	}
+	spanCtx := span.SpanContext()
+	if spanCtx.IsSampled() {
+		attachments[metricdata.AttachmentKeySpanContext] = spanCtx
+	}
+	return attachments
+}

+ 1 - 1
vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go

@@ -19,9 +19,9 @@ import (
 
 
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/codes"
 
 
+	"context"
 	"go.opencensus.io/trace"
 	"go.opencensus.io/trace"
 	"go.opencensus.io/trace/propagation"
 	"go.opencensus.io/trace/propagation"
-	"golang.org/x/net/context"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
 	"google.golang.org/grpc/status"

+ 36 - 3
vendor/go.opencensus.io/plugin/ochttp/client.go

@@ -16,14 +16,18 @@ package ochttp
 
 
 import (
 import (
 	"net/http"
 	"net/http"
+	"net/http/httptrace"
 
 
 	"go.opencensus.io/trace"
 	"go.opencensus.io/trace"
 	"go.opencensus.io/trace/propagation"
 	"go.opencensus.io/trace/propagation"
 )
 )
 
 
 // Transport is an http.RoundTripper that instruments all outgoing requests with
 // Transport is an http.RoundTripper that instruments all outgoing requests with
-// stats and tracing. The zero value is intended to be a useful default, but for
-// now it's recommended that you explicitly set Propagation.
+// OpenCensus stats and tracing.
+//
+// The zero value is intended to be a useful default, but for
+// now it's recommended that you explicitly set Propagation, since the default
+// for this may change.
 type Transport struct {
 type Transport struct {
 	// Base may be set to wrap another http.RoundTripper that does the actual
 	// Base may be set to wrap another http.RoundTripper that does the actual
 	// requests. By default http.DefaultTransport is used.
 	// requests. By default http.DefaultTransport is used.
@@ -43,24 +47,53 @@ type Transport struct {
 	// for spans started by this transport.
 	// for spans started by this transport.
 	StartOptions trace.StartOptions
 	StartOptions trace.StartOptions
 
 
+	// GetStartOptions allows to set start options per request. If set,
+	// StartOptions is going to be ignored.
+	GetStartOptions func(*http.Request) trace.StartOptions
+
+	// NameFromRequest holds the function to use for generating the span name
+	// from the information found in the outgoing HTTP Request. By default the
+	// name equals the URL Path.
+	FormatSpanName func(*http.Request) string
+
+	// NewClientTrace may be set to a function allowing the current *trace.Span
+	// to be annotated with HTTP request event information emitted by the
+	// httptrace package.
+	NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
+
 	// TODO: Implement tag propagation for HTTP.
 	// TODO: Implement tag propagation for HTTP.
 }
 }
 
 
 // RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request.
 // RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request.
 func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
 func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
 	rt := t.base()
 	rt := t.base()
+	if isHealthEndpoint(req.URL.Path) {
+		return rt.RoundTrip(req)
+	}
 	// TODO: remove excessive nesting of http.RoundTrippers here.
 	// TODO: remove excessive nesting of http.RoundTrippers here.
 	format := t.Propagation
 	format := t.Propagation
 	if format == nil {
 	if format == nil {
 		format = defaultFormat
 		format = defaultFormat
 	}
 	}
+	spanNameFormatter := t.FormatSpanName
+	if spanNameFormatter == nil {
+		spanNameFormatter = spanNameFromURL
+	}
+
+	startOpts := t.StartOptions
+	if t.GetStartOptions != nil {
+		startOpts = t.GetStartOptions(req)
+	}
+
 	rt = &traceTransport{
 	rt = &traceTransport{
 		base:   rt,
 		base:   rt,
 		format: format,
 		format: format,
 		startOptions: trace.StartOptions{
 		startOptions: trace.StartOptions{
-			Sampler:  t.StartOptions.Sampler,
+			Sampler:  startOpts.Sampler,
 			SpanKind: trace.SpanKindClient,
 			SpanKind: trace.SpanKindClient,
 		},
 		},
+		formatSpanName: spanNameFormatter,
+		newClientTrace: t.NewClientTrace,
 	}
 	}
 	rt = statsTransport{base: rt}
 	rt = statsTransport{base: rt}
 	return rt.RoundTrip(req)
 	return rt.RoundTrip(req)

+ 31 - 13
vendor/go.opencensus.io/plugin/ochttp/client_stats.go

@@ -34,8 +34,11 @@ type statsTransport struct {
 // RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request.
 // RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request.
 func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
 func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
 	ctx, _ := tag.New(req.Context(),
 	ctx, _ := tag.New(req.Context(),
-		tag.Upsert(Host, req.URL.Host),
+		tag.Upsert(KeyClientHost, req.Host),
+		tag.Upsert(Host, req.Host),
+		tag.Upsert(KeyClientPath, req.URL.Path),
 		tag.Upsert(Path, req.URL.Path),
 		tag.Upsert(Path, req.URL.Path),
+		tag.Upsert(KeyClientMethod, req.Method),
 		tag.Upsert(Method, req.Method))
 		tag.Upsert(Method, req.Method))
 	req = req.WithContext(ctx)
 	req = req.WithContext(ctx)
 	track := &tracker{
 	track := &tracker{
@@ -58,11 +61,14 @@ func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
 		track.end()
 		track.end()
 	} else {
 	} else {
 		track.statusCode = resp.StatusCode
 		track.statusCode = resp.StatusCode
+		if req.Method != "HEAD" {
+			track.respContentLength = resp.ContentLength
+		}
 		if resp.Body == nil {
 		if resp.Body == nil {
 			track.end()
 			track.end()
 		} else {
 		} else {
 			track.body = resp.Body
 			track.body = resp.Body
-			resp.Body = track
+			resp.Body = wrappedBody(track, resp.Body)
 		}
 		}
 	}
 	}
 	return resp, err
 	return resp, err
@@ -79,36 +85,48 @@ func (t statsTransport) CancelRequest(req *http.Request) {
 }
 }
 
 
 type tracker struct {
 type tracker struct {
-	ctx        context.Context
-	respSize   int64
-	reqSize    int64
-	start      time.Time
-	body       io.ReadCloser
-	statusCode int
-	endOnce    sync.Once
+	ctx               context.Context
+	respSize          int64
+	respContentLength int64
+	reqSize           int64
+	start             time.Time
+	body              io.ReadCloser
+	statusCode        int
+	endOnce           sync.Once
 }
 }
 
 
 var _ io.ReadCloser = (*tracker)(nil)
 var _ io.ReadCloser = (*tracker)(nil)
 
 
 func (t *tracker) end() {
 func (t *tracker) end() {
 	t.endOnce.Do(func() {
 	t.endOnce.Do(func() {
+		latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond)
+		respSize := t.respSize
+		if t.respSize == 0 && t.respContentLength > 0 {
+			respSize = t.respContentLength
+		}
 		m := []stats.Measurement{
 		m := []stats.Measurement{
-			ClientLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
+			ClientSentBytes.M(t.reqSize),
+			ClientReceivedBytes.M(respSize),
+			ClientRoundtripLatency.M(latencyMs),
+			ClientLatency.M(latencyMs),
 			ClientResponseBytes.M(t.respSize),
 			ClientResponseBytes.M(t.respSize),
 		}
 		}
 		if t.reqSize >= 0 {
 		if t.reqSize >= 0 {
 			m = append(m, ClientRequestBytes.M(t.reqSize))
 			m = append(m, ClientRequestBytes.M(t.reqSize))
 		}
 		}
-		ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)))
-		stats.Record(ctx, m...)
+
+		stats.RecordWithTags(t.ctx, []tag.Mutator{
+			tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)),
+			tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)),
+		}, m...)
 	})
 	})
 }
 }
 
 
 func (t *tracker) Read(b []byte) (int, error) {
 func (t *tracker) Read(b []byte) (int, error) {
 	n, err := t.body.Read(b)
 	n, err := t.body.Read(b)
+	t.respSize += int64(n)
 	switch err {
 	switch err {
 	case nil:
 	case nil:
-		t.respSize += int64(n)
 		return n, nil
 		return n, nil
 	case io.EOF:
 	case io.EOF:
 		t.end()
 		t.end()

+ 1 - 1
vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go

@@ -38,7 +38,7 @@ const (
 // because there are additional fields not represented in the
 // because there are additional fields not represented in the
 // OpenCensus span context. Spans created from the incoming
 // OpenCensus span context. Spans created from the incoming
 // header will be the direct children of the client-side span.
 // header will be the direct children of the client-side span.
-// Similarly, reciever of the outgoing spans should use client-side
+// Similarly, receiver of the outgoing spans should use client-side
 // span created by OpenCensus as the parent.
 // span created by OpenCensus as the parent.
 type HTTPFormat struct{}
 type HTTPFormat struct{}
 
 

+ 61 - 0
vendor/go.opencensus.io/plugin/ochttp/route.go

@@ -0,0 +1,61 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ochttp
+
+import (
+	"context"
+	"net/http"
+
+	"go.opencensus.io/tag"
+)
+
+// SetRoute sets the http_server_route tag to the given value.
+// It's useful when an HTTP framework does not support the http.Handler interface
+// and using WithRouteTag is not an option, but provides a way to hook into the request flow.
+func SetRoute(ctx context.Context, route string) {
+	if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok {
+		a.t = append(a.t, tag.Upsert(KeyServerRoute, route))
+	}
+}
+
+// WithRouteTag returns an http.Handler that records stats with the
+// http_server_route tag set to the given value.
+func WithRouteTag(handler http.Handler, route string) http.Handler {
+	return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator {
+		addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)}
+		ctx, _ := tag.New(r.Context(), addRoute...)
+		r = r.WithContext(ctx)
+		handler.ServeHTTP(w, r)
+		return addRoute
+	})
+}
+
+// taggedHandlerFunc is a http.Handler that returns tags describing the
+// processing of the request. These tags will be recorded along with the
+// measures in this package at the end of the request.
+type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator
+
+func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	tags := h(w, r)
+	if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok {
+		a.t = append(a.t, tags...)
+	}
+}
+
+type addedTagsKey struct{}
+
+type addedTags struct {
+	t []tag.Mutator
+}

+ 290 - 54
vendor/go.opencensus.io/plugin/ochttp/server.go

@@ -15,10 +15,8 @@
 package ochttp
 package ochttp
 
 
 import (
 import (
-	"bufio"
 	"context"
 	"context"
-	"errors"
-	"net"
+	"io"
 	"net/http"
 	"net/http"
 	"strconv"
 	"strconv"
 	"sync"
 	"sync"
@@ -30,16 +28,19 @@ import (
 	"go.opencensus.io/trace/propagation"
 	"go.opencensus.io/trace/propagation"
 )
 )
 
 
-// Handler is a http.Handler that is aware of the incoming request's span.
+// Handler is an http.Handler wrapper to instrument your HTTP server with
+// OpenCensus. It supports both stats and tracing.
 //
 //
+// Tracing
+//
+// This handler is aware of the incoming request's span, reading it from request
+// headers as configured using the Propagation field.
 // The extracted span can be accessed from the incoming request's
 // The extracted span can be accessed from the incoming request's
 // context.
 // context.
 //
 //
 //    span := trace.FromContext(r.Context())
 //    span := trace.FromContext(r.Context())
 //
 //
 // The server span will be automatically ended at the end of ServeHTTP.
 // The server span will be automatically ended at the end of ServeHTTP.
-//
-// Incoming propagation mechanism is determined by the given HTTP propagators.
 type Handler struct {
 type Handler struct {
 	// Propagation defines how traces are propagated. If unspecified,
 	// Propagation defines how traces are propagated. If unspecified,
 	// B3 propagation will be used.
 	// B3 propagation will be used.
@@ -55,50 +56,86 @@ type Handler struct {
 	// for spans started by this transport.
 	// for spans started by this transport.
 	StartOptions trace.StartOptions
 	StartOptions trace.StartOptions
 
 
+	// GetStartOptions allows to set start options per request. If set,
+	// StartOptions is going to be ignored.
+	GetStartOptions func(*http.Request) trace.StartOptions
+
 	// IsPublicEndpoint should be set to true for publicly accessible HTTP(S)
 	// IsPublicEndpoint should be set to true for publicly accessible HTTP(S)
 	// servers. If true, any trace metadata set on the incoming request will
 	// servers. If true, any trace metadata set on the incoming request will
 	// be added as a linked trace instead of being added as a parent of the
 	// be added as a linked trace instead of being added as a parent of the
 	// current trace.
 	// current trace.
 	IsPublicEndpoint bool
 	IsPublicEndpoint bool
+
+	// FormatSpanName holds the function to use for generating the span name
+	// from the information found in the incoming HTTP Request. By default the
+	// name equals the URL Path.
+	FormatSpanName func(*http.Request) string
+
+	// IsHealthEndpoint holds the function to use for determining if the
+	// incoming HTTP request should be considered a health check. This is in
+	// addition to the private isHealthEndpoint func which may also indicate
+	// tracing should be skipped.
+	IsHealthEndpoint func(*http.Request) bool
 }
 }
 
 
 func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	var traceEnd, statsEnd func()
-	r, traceEnd = h.startTrace(w, r)
+	var tags addedTags
+	r, traceEnd := h.startTrace(w, r)
 	defer traceEnd()
 	defer traceEnd()
-	w, statsEnd = h.startStats(w, r)
-	defer statsEnd()
+	w, statsEnd := h.startStats(w, r)
+	defer statsEnd(&tags)
 	handler := h.Handler
 	handler := h.Handler
 	if handler == nil {
 	if handler == nil {
 		handler = http.DefaultServeMux
 		handler = http.DefaultServeMux
 	}
 	}
+	r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags))
 	handler.ServeHTTP(w, r)
 	handler.ServeHTTP(w, r)
 }
 }
 
 
 func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) {
 func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) {
-	name := spanNameFromURL(r.URL)
+	if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) {
+		return r, func() {}
+	}
+	var name string
+	if h.FormatSpanName == nil {
+		name = spanNameFromURL(r)
+	} else {
+		name = h.FormatSpanName(r)
+	}
 	ctx := r.Context()
 	ctx := r.Context()
+
+	startOpts := h.StartOptions
+	if h.GetStartOptions != nil {
+		startOpts = h.GetStartOptions(r)
+	}
+
 	var span *trace.Span
 	var span *trace.Span
 	sc, ok := h.extractSpanContext(r)
 	sc, ok := h.extractSpanContext(r)
 	if ok && !h.IsPublicEndpoint {
 	if ok && !h.IsPublicEndpoint {
 		ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc,
 		ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc,
-			trace.WithSampler(h.StartOptions.Sampler),
+			trace.WithSampler(startOpts.Sampler),
 			trace.WithSpanKind(trace.SpanKindServer))
 			trace.WithSpanKind(trace.SpanKindServer))
 	} else {
 	} else {
 		ctx, span = trace.StartSpan(ctx, name,
 		ctx, span = trace.StartSpan(ctx, name,
-			trace.WithSampler(h.StartOptions.Sampler),
+			trace.WithSampler(startOpts.Sampler),
 			trace.WithSpanKind(trace.SpanKindServer),
 			trace.WithSpanKind(trace.SpanKindServer),
 		)
 		)
 		if ok {
 		if ok {
 			span.AddLink(trace.Link{
 			span.AddLink(trace.Link{
 				TraceID:    sc.TraceID,
 				TraceID:    sc.TraceID,
 				SpanID:     sc.SpanID,
 				SpanID:     sc.SpanID,
-				Type:       trace.LinkTypeChild,
+				Type:       trace.LinkTypeParent,
 				Attributes: nil,
 				Attributes: nil,
 			})
 			})
 		}
 		}
 	}
 	}
 	span.AddAttributes(requestAttrs(r)...)
 	span.AddAttributes(requestAttrs(r)...)
+	if r.Body == nil {
+		// TODO: Handle cases where ContentLength is not set.
+	} else if r.ContentLength > 0 {
+		span.AddMessageReceiveEvent(0, /* TODO: messageID */
+			r.ContentLength, -1)
+	}
 	return r.WithContext(ctx), span.End
 	return r.WithContext(ctx), span.End
 }
 }
 
 
@@ -109,9 +146,9 @@ func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool)
 	return h.Propagation.SpanContextFromRequest(r)
 	return h.Propagation.SpanContextFromRequest(r)
 }
 }
 
 
-func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func()) {
+func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) {
 	ctx, _ := tag.New(r.Context(),
 	ctx, _ := tag.New(r.Context(),
-		tag.Upsert(Host, r.URL.Host),
+		tag.Upsert(Host, r.Host),
 		tag.Upsert(Path, r.URL.Path),
 		tag.Upsert(Path, r.URL.Path),
 		tag.Upsert(Method, r.Method))
 		tag.Upsert(Method, r.Method))
 	track := &trackingResponseWriter{
 	track := &trackingResponseWriter{
@@ -126,7 +163,7 @@ func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.Respo
 		track.reqSize = r.ContentLength
 		track.reqSize = r.ContentLength
 	}
 	}
 	stats.Record(ctx, ServerRequestCount.M(1))
 	stats.Record(ctx, ServerRequestCount.M(1))
-	return track, track.end
+	return track.wrappedResponseWriter(), track.end
 }
 }
 
 
 type trackingResponseWriter struct {
 type trackingResponseWriter struct {
@@ -140,40 +177,10 @@ type trackingResponseWriter struct {
 	writer     http.ResponseWriter
 	writer     http.ResponseWriter
 }
 }
 
 
-// Compile time assertions for widely used net/http interfaces
-var _ http.CloseNotifier = (*trackingResponseWriter)(nil)
-var _ http.Flusher = (*trackingResponseWriter)(nil)
-var _ http.Hijacker = (*trackingResponseWriter)(nil)
-var _ http.Pusher = (*trackingResponseWriter)(nil)
+// Compile time assertion for ResponseWriter interface
 var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
 var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
 
 
-var errHijackerUnimplemented = errors.New("ResponseWriter does not implement http.Hijacker")
-
-func (t *trackingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
-	hj, ok := t.writer.(http.Hijacker)
-	if !ok {
-		return nil, nil, errHijackerUnimplemented
-	}
-	return hj.Hijack()
-}
-
-func (t *trackingResponseWriter) CloseNotify() <-chan bool {
-	cn, ok := t.writer.(http.CloseNotifier)
-	if !ok {
-		return nil
-	}
-	return cn.CloseNotify()
-}
-
-func (t *trackingResponseWriter) Push(target string, opts *http.PushOptions) error {
-	pusher, ok := t.writer.(http.Pusher)
-	if !ok {
-		return http.ErrNotSupported
-	}
-	return pusher.Push(target, opts)
-}
-
-func (t *trackingResponseWriter) end() {
+func (t *trackingResponseWriter) end(tags *addedTags) {
 	t.endOnce.Do(func() {
 	t.endOnce.Do(func() {
 		if t.statusCode == 0 {
 		if t.statusCode == 0 {
 			t.statusCode = 200
 			t.statusCode = 200
@@ -181,6 +188,7 @@ func (t *trackingResponseWriter) end() {
 
 
 		span := trace.FromContext(t.ctx)
 		span := trace.FromContext(t.ctx)
 		span.SetStatus(TraceStatus(t.statusCode, t.statusLine))
 		span.SetStatus(TraceStatus(t.statusCode, t.statusLine))
+		span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode)))
 
 
 		m := []stats.Measurement{
 		m := []stats.Measurement{
 			ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
 			ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
@@ -189,8 +197,10 @@ func (t *trackingResponseWriter) end() {
 		if t.reqSize >= 0 {
 		if t.reqSize >= 0 {
 			m = append(m, ServerRequestBytes.M(t.reqSize))
 			m = append(m, ServerRequestBytes.M(t.reqSize))
 		}
 		}
-		ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)))
-		stats.Record(ctx, m...)
+		allTags := make([]tag.Mutator, len(tags.t)+1)
+		allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))
+		copy(allTags[1:], tags.t)
+		stats.RecordWithTags(t.ctx, allTags, m...)
 	})
 	})
 }
 }
 
 
@@ -201,6 +211,9 @@ func (t *trackingResponseWriter) Header() http.Header {
 func (t *trackingResponseWriter) Write(data []byte) (int, error) {
 func (t *trackingResponseWriter) Write(data []byte) (int, error) {
 	n, err := t.writer.Write(data)
 	n, err := t.writer.Write(data)
 	t.respSize += int64(n)
 	t.respSize += int64(n)
+	// Add message event for request bytes sent.
+	span := trace.FromContext(t.ctx)
+	span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1)
 	return n, err
 	return n, err
 }
 }
 
 
@@ -210,8 +223,231 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) {
 	t.statusLine = http.StatusText(t.statusCode)
 	t.statusLine = http.StatusText(t.statusCode)
 }
 }
 
 
-func (t *trackingResponseWriter) Flush() {
-	if flusher, ok := t.writer.(http.Flusher); ok {
-		flusher.Flush()
+// wrappedResponseWriter returns a wrapped version of the original
+//  ResponseWriter and only implements the same combination of additional
+// interfaces as the original.
+// This implementation is based on https://github.com/felixge/httpsnoop.
+func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {
+	var (
+		hj, i0 = t.writer.(http.Hijacker)
+		cn, i1 = t.writer.(http.CloseNotifier)
+		pu, i2 = t.writer.(http.Pusher)
+		fl, i3 = t.writer.(http.Flusher)
+		rf, i4 = t.writer.(io.ReaderFrom)
+	)
+
+	switch {
+	case !i0 && !i1 && !i2 && !i3 && !i4:
+		return struct {
+			http.ResponseWriter
+		}{t}
+	case !i0 && !i1 && !i2 && !i3 && i4:
+		return struct {
+			http.ResponseWriter
+			io.ReaderFrom
+		}{t, rf}
+	case !i0 && !i1 && !i2 && i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.Flusher
+		}{t, fl}
+	case !i0 && !i1 && !i2 && i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.Flusher
+			io.ReaderFrom
+		}{t, fl, rf}
+	case !i0 && !i1 && i2 && !i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.Pusher
+		}{t, pu}
+	case !i0 && !i1 && i2 && !i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.Pusher
+			io.ReaderFrom
+		}{t, pu, rf}
+	case !i0 && !i1 && i2 && i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.Pusher
+			http.Flusher
+		}{t, pu, fl}
+	case !i0 && !i1 && i2 && i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.Pusher
+			http.Flusher
+			io.ReaderFrom
+		}{t, pu, fl, rf}
+	case !i0 && i1 && !i2 && !i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.CloseNotifier
+		}{t, cn}
+	case !i0 && i1 && !i2 && !i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.CloseNotifier
+			io.ReaderFrom
+		}{t, cn, rf}
+	case !i0 && i1 && !i2 && i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.CloseNotifier
+			http.Flusher
+		}{t, cn, fl}
+	case !i0 && i1 && !i2 && i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.CloseNotifier
+			http.Flusher
+			io.ReaderFrom
+		}{t, cn, fl, rf}
+	case !i0 && i1 && i2 && !i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.CloseNotifier
+			http.Pusher
+		}{t, cn, pu}
+	case !i0 && i1 && i2 && !i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.CloseNotifier
+			http.Pusher
+			io.ReaderFrom
+		}{t, cn, pu, rf}
+	case !i0 && i1 && i2 && i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.CloseNotifier
+			http.Pusher
+			http.Flusher
+		}{t, cn, pu, fl}
+	case !i0 && i1 && i2 && i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.CloseNotifier
+			http.Pusher
+			http.Flusher
+			io.ReaderFrom
+		}{t, cn, pu, fl, rf}
+	case i0 && !i1 && !i2 && !i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+		}{t, hj}
+	case i0 && !i1 && !i2 && !i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			io.ReaderFrom
+		}{t, hj, rf}
+	case i0 && !i1 && !i2 && i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.Flusher
+		}{t, hj, fl}
+	case i0 && !i1 && !i2 && i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.Flusher
+			io.ReaderFrom
+		}{t, hj, fl, rf}
+	case i0 && !i1 && i2 && !i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.Pusher
+		}{t, hj, pu}
+	case i0 && !i1 && i2 && !i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.Pusher
+			io.ReaderFrom
+		}{t, hj, pu, rf}
+	case i0 && !i1 && i2 && i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.Pusher
+			http.Flusher
+		}{t, hj, pu, fl}
+	case i0 && !i1 && i2 && i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.Pusher
+			http.Flusher
+			io.ReaderFrom
+		}{t, hj, pu, fl, rf}
+	case i0 && i1 && !i2 && !i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.CloseNotifier
+		}{t, hj, cn}
+	case i0 && i1 && !i2 && !i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.CloseNotifier
+			io.ReaderFrom
+		}{t, hj, cn, rf}
+	case i0 && i1 && !i2 && i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.CloseNotifier
+			http.Flusher
+		}{t, hj, cn, fl}
+	case i0 && i1 && !i2 && i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.CloseNotifier
+			http.Flusher
+			io.ReaderFrom
+		}{t, hj, cn, fl, rf}
+	case i0 && i1 && i2 && !i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.CloseNotifier
+			http.Pusher
+		}{t, hj, cn, pu}
+	case i0 && i1 && i2 && !i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.CloseNotifier
+			http.Pusher
+			io.ReaderFrom
+		}{t, hj, cn, pu, rf}
+	case i0 && i1 && i2 && i3 && !i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.CloseNotifier
+			http.Pusher
+			http.Flusher
+		}{t, hj, cn, pu, fl}
+	case i0 && i1 && i2 && i3 && i4:
+		return struct {
+			http.ResponseWriter
+			http.Hijacker
+			http.CloseNotifier
+			http.Pusher
+			http.Flusher
+			io.ReaderFrom
+		}{t, hj, cn, pu, fl, rf}
+	default:
+		return struct {
+			http.ResponseWriter
+		}{t}
 	}
 	}
 }
 }

+ 169 - 0
vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go

@@ -0,0 +1,169 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ochttp
+
+import (
+	"crypto/tls"
+	"net/http"
+	"net/http/httptrace"
+	"strings"
+
+	"go.opencensus.io/trace"
+)
+
+type spanAnnotator struct {
+	sp *trace.Span
+}
+
+// TODO: Remove NewSpanAnnotator at the next release.
+
+// NewSpanAnnotator returns a httptrace.ClientTrace which annotates
+// all emitted httptrace events on the provided Span.
+// Deprecated: Use NewSpanAnnotatingClientTrace instead
+func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace {
+	return NewSpanAnnotatingClientTrace(r, s)
+}
+
+// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates
+// all emitted httptrace events on the provided Span.
+func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace {
+	sa := spanAnnotator{sp: s}
+
+	return &httptrace.ClientTrace{
+		GetConn:              sa.getConn,
+		GotConn:              sa.gotConn,
+		PutIdleConn:          sa.putIdleConn,
+		GotFirstResponseByte: sa.gotFirstResponseByte,
+		Got100Continue:       sa.got100Continue,
+		DNSStart:             sa.dnsStart,
+		DNSDone:              sa.dnsDone,
+		ConnectStart:         sa.connectStart,
+		ConnectDone:          sa.connectDone,
+		TLSHandshakeStart:    sa.tlsHandshakeStart,
+		TLSHandshakeDone:     sa.tlsHandshakeDone,
+		WroteHeaders:         sa.wroteHeaders,
+		Wait100Continue:      sa.wait100Continue,
+		WroteRequest:         sa.wroteRequest,
+	}
+}
+
+func (s spanAnnotator) getConn(hostPort string) {
+	attrs := []trace.Attribute{
+		trace.StringAttribute("httptrace.get_connection.host_port", hostPort),
+	}
+	s.sp.Annotate(attrs, "GetConn")
+}
+
+func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) {
+	attrs := []trace.Attribute{
+		trace.BoolAttribute("httptrace.got_connection.reused", info.Reused),
+		trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle),
+	}
+	if info.WasIdle {
+		attrs = append(attrs,
+			trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String()))
+	}
+	s.sp.Annotate(attrs, "GotConn")
+}
+
+// PutIdleConn implements a httptrace.ClientTrace hook
+func (s spanAnnotator) putIdleConn(err error) {
+	var attrs []trace.Attribute
+	if err != nil {
+		attrs = append(attrs,
+			trace.StringAttribute("httptrace.put_idle_connection.error", err.Error()))
+	}
+	s.sp.Annotate(attrs, "PutIdleConn")
+}
+
+func (s spanAnnotator) gotFirstResponseByte() {
+	s.sp.Annotate(nil, "GotFirstResponseByte")
+}
+
+func (s spanAnnotator) got100Continue() {
+	s.sp.Annotate(nil, "Got100Continue")
+}
+
+func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) {
+	attrs := []trace.Attribute{
+		trace.StringAttribute("httptrace.dns_start.host", info.Host),
+	}
+	s.sp.Annotate(attrs, "DNSStart")
+}
+
+func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) {
+	var addrs []string
+	for _, addr := range info.Addrs {
+		addrs = append(addrs, addr.String())
+	}
+	attrs := []trace.Attribute{
+		trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")),
+	}
+	if info.Err != nil {
+		attrs = append(attrs,
+			trace.StringAttribute("httptrace.dns_done.error", info.Err.Error()))
+	}
+	s.sp.Annotate(attrs, "DNSDone")
+}
+
+func (s spanAnnotator) connectStart(network, addr string) {
+	attrs := []trace.Attribute{
+		trace.StringAttribute("httptrace.connect_start.network", network),
+		trace.StringAttribute("httptrace.connect_start.addr", addr),
+	}
+	s.sp.Annotate(attrs, "ConnectStart")
+}
+
+func (s spanAnnotator) connectDone(network, addr string, err error) {
+	attrs := []trace.Attribute{
+		trace.StringAttribute("httptrace.connect_done.network", network),
+		trace.StringAttribute("httptrace.connect_done.addr", addr),
+	}
+	if err != nil {
+		attrs = append(attrs,
+			trace.StringAttribute("httptrace.connect_done.error", err.Error()))
+	}
+	s.sp.Annotate(attrs, "ConnectDone")
+}
+
+func (s spanAnnotator) tlsHandshakeStart() {
+	s.sp.Annotate(nil, "TLSHandshakeStart")
+}
+
+func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) {
+	var attrs []trace.Attribute
+	if err != nil {
+		attrs = append(attrs,
+			trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error()))
+	}
+	s.sp.Annotate(attrs, "TLSHandshakeDone")
+}
+
+func (s spanAnnotator) wroteHeaders() {
+	s.sp.Annotate(nil, "WroteHeaders")
+}
+
+func (s spanAnnotator) wait100Continue() {
+	s.sp.Annotate(nil, "Wait100Continue")
+}
+
+func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) {
+	var attrs []trace.Attribute
+	if info.Err != nil {
+		attrs = append(attrs,
+			trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error()))
+	}
+	s.sp.Annotate(attrs, "WroteRequest")
+}

+ 140 - 21
vendor/go.opencensus.io/plugin/ochttp/stats.go

@@ -20,20 +20,67 @@ import (
 	"go.opencensus.io/tag"
 	"go.opencensus.io/tag"
 )
 )
 
 
+// Deprecated: client HTTP measures.
+var (
+	// Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect.
+	ClientRequestCount = stats.Int64(
+		"opencensus.io/http/client/request_count",
+		"Number of HTTP requests started",
+		stats.UnitDimensionless)
+	// Deprecated: Use ClientSentBytes.
+	ClientRequestBytes = stats.Int64(
+		"opencensus.io/http/client/request_bytes",
+		"HTTP request body size if set as ContentLength (uncompressed)",
+		stats.UnitBytes)
+	// Deprecated: Use ClientReceivedBytes.
+	ClientResponseBytes = stats.Int64(
+		"opencensus.io/http/client/response_bytes",
+		"HTTP response body size (uncompressed)",
+		stats.UnitBytes)
+	// Deprecated: Use ClientRoundtripLatency.
+	ClientLatency = stats.Float64(
+		"opencensus.io/http/client/latency",
+		"End-to-end latency",
+		stats.UnitMilliseconds)
+)
+
 // The following client HTTP measures are supported for use in custom views.
 // The following client HTTP measures are supported for use in custom views.
 var (
 var (
-	ClientRequestCount  = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless)
-	ClientRequestBytes  = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
-	ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
-	ClientLatency       = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds)
+	ClientSentBytes = stats.Int64(
+		"opencensus.io/http/client/sent_bytes",
+		"Total bytes sent in request body (not including headers)",
+		stats.UnitBytes,
+	)
+	ClientReceivedBytes = stats.Int64(
+		"opencensus.io/http/client/received_bytes",
+		"Total bytes received in response bodies (not including headers but including error responses with bodies)",
+		stats.UnitBytes,
+	)
+	ClientRoundtripLatency = stats.Float64(
+		"opencensus.io/http/client/roundtrip_latency",
+		"Time between first byte of request headers sent to last byte of response received, or terminal error",
+		stats.UnitMilliseconds,
+	)
 )
 )
 
 
 // The following server HTTP measures are supported for use in custom views:
 // The following server HTTP measures are supported for use in custom views:
 var (
 var (
-	ServerRequestCount  = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless)
-	ServerRequestBytes  = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
-	ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
-	ServerLatency       = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds)
+	ServerRequestCount = stats.Int64(
+		"opencensus.io/http/server/request_count",
+		"Number of HTTP requests started",
+		stats.UnitDimensionless)
+	ServerRequestBytes = stats.Int64(
+		"opencensus.io/http/server/request_bytes",
+		"HTTP request body size if set as ContentLength (uncompressed)",
+		stats.UnitBytes)
+	ServerResponseBytes = stats.Int64(
+		"opencensus.io/http/server/response_bytes",
+		"HTTP response body size (uncompressed)",
+		stats.UnitBytes)
+	ServerLatency = stats.Float64(
+		"opencensus.io/http/server/latency",
+		"End-to-end latency",
+		stats.UnitMilliseconds)
 )
 )
 
 
 // The following tags are applied to stats recorded by this package. Host, Path
 // The following tags are applied to stats recorded by this package. Host, Path
@@ -41,28 +88,89 @@ var (
 // ClientRequestCount or ServerRequestCount, since it is recorded before the status is known.
 // ClientRequestCount or ServerRequestCount, since it is recorded before the status is known.
 var (
 var (
 	// Host is the value of the HTTP Host header.
 	// Host is the value of the HTTP Host header.
-	Host, _ = tag.NewKey("http.host")
+	//
+	// The value of this tag can be controlled by the HTTP client, so you need
+	// to watch out for potentially generating high-cardinality labels in your
+	// metrics backend if you use this tag in views.
+	Host = tag.MustNewKey("http.host")
 
 
 	// StatusCode is the numeric HTTP response status code,
 	// StatusCode is the numeric HTTP response status code,
 	// or "error" if a transport error occurred and no status code was read.
 	// or "error" if a transport error occurred and no status code was read.
-	StatusCode, _ = tag.NewKey("http.status")
+	StatusCode = tag.MustNewKey("http.status")
 
 
 	// Path is the URL path (not including query string) in the request.
 	// Path is the URL path (not including query string) in the request.
-	Path, _ = tag.NewKey("http.path")
+	//
+	// The value of this tag can be controlled by the HTTP client, so you need
+	// to watch out for potentially generating high-cardinality labels in your
+	// metrics backend if you use this tag in views.
+	Path = tag.MustNewKey("http.path")
 
 
 	// Method is the HTTP method of the request, capitalized (GET, POST, etc.).
 	// Method is the HTTP method of the request, capitalized (GET, POST, etc.).
-	Method, _ = tag.NewKey("http.method")
+	Method = tag.MustNewKey("http.method")
+
+	// KeyServerRoute is a low cardinality string representing the logical
+	// handler of the request. This is usually the pattern registered on the a
+	// ServeMux (or similar string).
+	KeyServerRoute = tag.MustNewKey("http_server_route")
+)
+
+// Client tag keys.
+var (
+	// KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.).
+	KeyClientMethod = tag.MustNewKey("http_client_method")
+	// KeyClientPath is the URL path (not including query string).
+	KeyClientPath = tag.MustNewKey("http_client_path")
+	// KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received.
+	KeyClientStatus = tag.MustNewKey("http_client_status")
+	// KeyClientHost is the value of the request Host header.
+	KeyClientHost = tag.MustNewKey("http_client_host")
 )
 )
 
 
 // Default distributions used by views in this package.
 // Default distributions used by views in this package.
 var (
 var (
-	DefaultSizeDistribution    = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
-	DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
+	DefaultSizeDistribution    = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
+	DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
+)
+
+// Package ochttp provides some convenience views for client measures.
+// You still need to register these views for data to actually be collected.
+var (
+	ClientSentBytesDistribution = &view.View{
+		Name:        "opencensus.io/http/client/sent_bytes",
+		Measure:     ClientSentBytes,
+		Aggregation: DefaultSizeDistribution,
+		Description: "Total bytes sent in request body (not including headers), by HTTP method and response status",
+		TagKeys:     []tag.Key{KeyClientMethod, KeyClientStatus},
+	}
+
+	ClientReceivedBytesDistribution = &view.View{
+		Name:        "opencensus.io/http/client/received_bytes",
+		Measure:     ClientReceivedBytes,
+		Aggregation: DefaultSizeDistribution,
+		Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status",
+		TagKeys:     []tag.Key{KeyClientMethod, KeyClientStatus},
+	}
+
+	ClientRoundtripLatencyDistribution = &view.View{
+		Name:        "opencensus.io/http/client/roundtrip_latency",
+		Measure:     ClientRoundtripLatency,
+		Aggregation: DefaultLatencyDistribution,
+		Description: "End-to-end latency, by HTTP method and response status",
+		TagKeys:     []tag.Key{KeyClientMethod, KeyClientStatus},
+	}
+
+	ClientCompletedCount = &view.View{
+		Name:        "opencensus.io/http/client/completed_count",
+		Measure:     ClientRoundtripLatency,
+		Aggregation: view.Count(),
+		Description: "Count of completed requests, by HTTP method and response status",
+		TagKeys:     []tag.Key{KeyClientMethod, KeyClientStatus},
+	}
 )
 )
 
 
-// Package ochttp provides some convenience views.
-// You need to subscribe to the views for data to actually be collected.
+// Deprecated: Old client Views.
 var (
 var (
+	// Deprecated: No direct replacement, but see ClientCompletedCount.
 	ClientRequestCountView = &view.View{
 	ClientRequestCountView = &view.View{
 		Name:        "opencensus.io/http/client/request_count",
 		Name:        "opencensus.io/http/client/request_count",
 		Description: "Count of HTTP requests started",
 		Description: "Count of HTTP requests started",
@@ -70,43 +178,52 @@ var (
 		Aggregation: view.Count(),
 		Aggregation: view.Count(),
 	}
 	}
 
 
+	// Deprecated: Use ClientSentBytesDistribution.
 	ClientRequestBytesView = &view.View{
 	ClientRequestBytesView = &view.View{
 		Name:        "opencensus.io/http/client/request_bytes",
 		Name:        "opencensus.io/http/client/request_bytes",
 		Description: "Size distribution of HTTP request body",
 		Description: "Size distribution of HTTP request body",
-		Measure:     ClientRequestBytes,
+		Measure:     ClientSentBytes,
 		Aggregation: DefaultSizeDistribution,
 		Aggregation: DefaultSizeDistribution,
 	}
 	}
 
 
+	// Deprecated: Use ClientReceivedBytesDistribution instead.
 	ClientResponseBytesView = &view.View{
 	ClientResponseBytesView = &view.View{
 		Name:        "opencensus.io/http/client/response_bytes",
 		Name:        "opencensus.io/http/client/response_bytes",
 		Description: "Size distribution of HTTP response body",
 		Description: "Size distribution of HTTP response body",
-		Measure:     ClientResponseBytes,
+		Measure:     ClientReceivedBytes,
 		Aggregation: DefaultSizeDistribution,
 		Aggregation: DefaultSizeDistribution,
 	}
 	}
 
 
+	// Deprecated: Use ClientRoundtripLatencyDistribution instead.
 	ClientLatencyView = &view.View{
 	ClientLatencyView = &view.View{
 		Name:        "opencensus.io/http/client/latency",
 		Name:        "opencensus.io/http/client/latency",
 		Description: "Latency distribution of HTTP requests",
 		Description: "Latency distribution of HTTP requests",
-		Measure:     ClientLatency,
+		Measure:     ClientRoundtripLatency,
 		Aggregation: DefaultLatencyDistribution,
 		Aggregation: DefaultLatencyDistribution,
 	}
 	}
 
 
+	// Deprecated: Use ClientCompletedCount instead.
 	ClientRequestCountByMethod = &view.View{
 	ClientRequestCountByMethod = &view.View{
 		Name:        "opencensus.io/http/client/request_count_by_method",
 		Name:        "opencensus.io/http/client/request_count_by_method",
 		Description: "Client request count by HTTP method",
 		Description: "Client request count by HTTP method",
 		TagKeys:     []tag.Key{Method},
 		TagKeys:     []tag.Key{Method},
-		Measure:     ClientRequestCount,
+		Measure:     ClientSentBytes,
 		Aggregation: view.Count(),
 		Aggregation: view.Count(),
 	}
 	}
 
 
+	// Deprecated: Use ClientCompletedCount instead.
 	ClientResponseCountByStatusCode = &view.View{
 	ClientResponseCountByStatusCode = &view.View{
 		Name:        "opencensus.io/http/client/response_count_by_status_code",
 		Name:        "opencensus.io/http/client/response_count_by_status_code",
 		Description: "Client response count by status code",
 		Description: "Client response count by status code",
 		TagKeys:     []tag.Key{StatusCode},
 		TagKeys:     []tag.Key{StatusCode},
-		Measure:     ClientLatency,
+		Measure:     ClientRoundtripLatency,
 		Aggregation: view.Count(),
 		Aggregation: view.Count(),
 	}
 	}
+)
 
 
+// Package ochttp provides some convenience views for server measures.
+// You still need to register these views for data to actually be collected.
+var (
 	ServerRequestCountView = &view.View{
 	ServerRequestCountView = &view.View{
 		Name:        "opencensus.io/http/server/request_count",
 		Name:        "opencensus.io/http/server/request_count",
 		Description: "Count of HTTP requests started",
 		Description: "Count of HTTP requests started",
@@ -153,6 +270,7 @@ var (
 )
 )
 
 
 // DefaultClientViews are the default client views provided by this package.
 // DefaultClientViews are the default client views provided by this package.
+// Deprecated: No replacement. Register the views you would like individually.
 var DefaultClientViews = []*view.View{
 var DefaultClientViews = []*view.View{
 	ClientRequestCountView,
 	ClientRequestCountView,
 	ClientRequestBytesView,
 	ClientRequestBytesView,
@@ -163,6 +281,7 @@ var DefaultClientViews = []*view.View{
 }
 }
 
 
 // DefaultServerViews are the default server views provided by this package.
 // DefaultServerViews are the default server views provided by this package.
+// Deprecated: No replacement. Register the views you would like individually.
 var DefaultServerViews = []*view.View{
 var DefaultServerViews = []*view.View{
 	ServerRequestCountView,
 	ServerRequestCountView,
 	ServerRequestBytesView,
 	ServerRequestBytesView,

+ 76 - 31
vendor/go.opencensus.io/plugin/ochttp/trace.go

@@ -17,7 +17,7 @@ package ochttp
 import (
 import (
 	"io"
 	"io"
 	"net/http"
 	"net/http"
-	"net/url"
+	"net/http/httptrace"
 
 
 	"go.opencensus.io/plugin/ochttp/propagation/b3"
 	"go.opencensus.io/plugin/ochttp/propagation/b3"
 	"go.opencensus.io/trace"
 	"go.opencensus.io/trace"
@@ -34,14 +34,17 @@ const (
 	HostAttribute       = "http.host"
 	HostAttribute       = "http.host"
 	MethodAttribute     = "http.method"
 	MethodAttribute     = "http.method"
 	PathAttribute       = "http.path"
 	PathAttribute       = "http.path"
+	URLAttribute        = "http.url"
 	UserAgentAttribute  = "http.user_agent"
 	UserAgentAttribute  = "http.user_agent"
 	StatusCodeAttribute = "http.status_code"
 	StatusCodeAttribute = "http.status_code"
 )
 )
 
 
 type traceTransport struct {
 type traceTransport struct {
-	base         http.RoundTripper
-	startOptions trace.StartOptions
-	format       propagation.HTTPFormat
+	base           http.RoundTripper
+	startOptions   trace.StartOptions
+	format         propagation.HTTPFormat
+	formatSpanName func(*http.Request) string
+	newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
 }
 }
 
 
 // TODO(jbd): Add message events for request and response size.
 // TODO(jbd): Add message events for request and response size.
@@ -50,15 +53,30 @@ type traceTransport struct {
 // The created span can follow a parent span, if a parent is presented in
 // The created span can follow a parent span, if a parent is presented in
 // the request's context.
 // the request's context.
 func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
 func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
-	name := spanNameFromURL(req.URL)
+	name := t.formatSpanName(req)
 	// TODO(jbd): Discuss whether we want to prefix
 	// TODO(jbd): Discuss whether we want to prefix
 	// outgoing requests with Sent.
 	// outgoing requests with Sent.
-	_, span := trace.StartSpan(req.Context(), name,
+	ctx, span := trace.StartSpan(req.Context(), name,
 		trace.WithSampler(t.startOptions.Sampler),
 		trace.WithSampler(t.startOptions.Sampler),
 		trace.WithSpanKind(trace.SpanKindClient))
 		trace.WithSpanKind(trace.SpanKindClient))
 
 
-	req = req.WithContext(trace.WithSpan(req.Context(), span))
+	if t.newClientTrace != nil {
+		req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span)))
+	} else {
+		req = req.WithContext(ctx)
+	}
+
 	if t.format != nil {
 	if t.format != nil {
+		// SpanContextToRequest will modify its Request argument, which is
+		// contrary to the contract for http.RoundTripper, so we need to
+		// pass it a copy of the Request.
+		// However, the Request struct itself was already copied by
+		// the WithContext calls above and so we just need to copy the header.
+		header := make(http.Header)
+		for k, v := range req.Header {
+			header[k] = v
+		}
+		req.Header = header
 		t.format.SpanContextToRequest(span.SpanContext(), req)
 		t.format.SpanContextToRequest(span.SpanContext(), req)
 	}
 	}
 
 
@@ -76,7 +94,8 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
 	// span.End() will be invoked after
 	// span.End() will be invoked after
 	// a read from resp.Body returns io.EOF or when
 	// a read from resp.Body returns io.EOF or when
 	// resp.Body.Close() is invoked.
 	// resp.Body.Close() is invoked.
-	resp.Body = &bodyTracker{rc: resp.Body, span: span}
+	bt := &bodyTracker{rc: resp.Body, span: span}
+	resp.Body = wrappedBody(bt, resp.Body)
 	return resp, err
 	return resp, err
 }
 }
 
 
@@ -127,17 +146,26 @@ func (t *traceTransport) CancelRequest(req *http.Request) {
 	}
 	}
 }
 }
 
 
-func spanNameFromURL(u *url.URL) string {
-	return u.Path
+func spanNameFromURL(req *http.Request) string {
+	return req.URL.Path
 }
 }
 
 
 func requestAttrs(r *http.Request) []trace.Attribute {
 func requestAttrs(r *http.Request) []trace.Attribute {
-	return []trace.Attribute{
+	userAgent := r.UserAgent()
+
+	attrs := make([]trace.Attribute, 0, 5)
+	attrs = append(attrs,
 		trace.StringAttribute(PathAttribute, r.URL.Path),
 		trace.StringAttribute(PathAttribute, r.URL.Path),
-		trace.StringAttribute(HostAttribute, r.URL.Host),
+		trace.StringAttribute(URLAttribute, r.URL.String()),
+		trace.StringAttribute(HostAttribute, r.Host),
 		trace.StringAttribute(MethodAttribute, r.Method),
 		trace.StringAttribute(MethodAttribute, r.Method),
-		trace.StringAttribute(UserAgentAttribute, r.UserAgent()),
+	)
+
+	if userAgent != "" {
+		attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent))
 	}
 	}
+
+	return attrs
 }
 }
 
 
 func responseAttrs(resp *http.Response) []trace.Attribute {
 func responseAttrs(resp *http.Response) []trace.Attribute {
@@ -146,7 +174,7 @@ func responseAttrs(resp *http.Response) []trace.Attribute {
 	}
 	}
 }
 }
 
 
-// HTTPStatusToTraceStatus converts the HTTP status code to a trace.Status that
+// TraceStatus is a utility to convert the HTTP status code to a trace.Status that
 // represents the outcome as closely as possible.
 // represents the outcome as closely as possible.
 func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
 func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
 	var code int32
 	var code int32
@@ -158,6 +186,8 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
 		code = trace.StatusCodeCancelled
 		code = trace.StatusCodeCancelled
 	case http.StatusBadRequest:
 	case http.StatusBadRequest:
 		code = trace.StatusCodeInvalidArgument
 		code = trace.StatusCodeInvalidArgument
+	case http.StatusUnprocessableEntity:
+		code = trace.StatusCodeInvalidArgument
 	case http.StatusGatewayTimeout:
 	case http.StatusGatewayTimeout:
 		code = trace.StatusCodeDeadlineExceeded
 		code = trace.StatusCodeDeadlineExceeded
 	case http.StatusNotFound:
 	case http.StatusNotFound:
@@ -174,26 +204,41 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
 		code = trace.StatusCodeUnavailable
 		code = trace.StatusCodeUnavailable
 	case http.StatusOK:
 	case http.StatusOK:
 		code = trace.StatusCodeOK
 		code = trace.StatusCodeOK
+	case http.StatusConflict:
+		code = trace.StatusCodeAlreadyExists
 	}
 	}
+
 	return trace.Status{Code: code, Message: codeToStr[code]}
 	return trace.Status{Code: code, Message: codeToStr[code]}
 }
 }
 
 
 var codeToStr = map[int32]string{
 var codeToStr = map[int32]string{
-	trace.StatusCodeOK:                 `"OK"`,
-	trace.StatusCodeCancelled:          `"CANCELLED"`,
-	trace.StatusCodeUnknown:            `"UNKNOWN"`,
-	trace.StatusCodeInvalidArgument:    `"INVALID_ARGUMENT"`,
-	trace.StatusCodeDeadlineExceeded:   `"DEADLINE_EXCEEDED"`,
-	trace.StatusCodeNotFound:           `"NOT_FOUND"`,
-	trace.StatusCodeAlreadyExists:      `"ALREADY_EXISTS"`,
-	trace.StatusCodePermissionDenied:   `"PERMISSION_DENIED"`,
-	trace.StatusCodeResourceExhausted:  `"RESOURCE_EXHAUSTED"`,
-	trace.StatusCodeFailedPrecondition: `"FAILED_PRECONDITION"`,
-	trace.StatusCodeAborted:            `"ABORTED"`,
-	trace.StatusCodeOutOfRange:         `"OUT_OF_RANGE"`,
-	trace.StatusCodeUnimplemented:      `"UNIMPLEMENTED"`,
-	trace.StatusCodeInternal:           `"INTERNAL"`,
-	trace.StatusCodeUnavailable:        `"UNAVAILABLE"`,
-	trace.StatusCodeDataLoss:           `"DATA_LOSS"`,
-	trace.StatusCodeUnauthenticated:    `"UNAUTHENTICATED"`,
+	trace.StatusCodeOK:                 `OK`,
+	trace.StatusCodeCancelled:          `CANCELLED`,
+	trace.StatusCodeUnknown:            `UNKNOWN`,
+	trace.StatusCodeInvalidArgument:    `INVALID_ARGUMENT`,
+	trace.StatusCodeDeadlineExceeded:   `DEADLINE_EXCEEDED`,
+	trace.StatusCodeNotFound:           `NOT_FOUND`,
+	trace.StatusCodeAlreadyExists:      `ALREADY_EXISTS`,
+	trace.StatusCodePermissionDenied:   `PERMISSION_DENIED`,
+	trace.StatusCodeResourceExhausted:  `RESOURCE_EXHAUSTED`,
+	trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`,
+	trace.StatusCodeAborted:            `ABORTED`,
+	trace.StatusCodeOutOfRange:         `OUT_OF_RANGE`,
+	trace.StatusCodeUnimplemented:      `UNIMPLEMENTED`,
+	trace.StatusCodeInternal:           `INTERNAL`,
+	trace.StatusCodeUnavailable:        `UNAVAILABLE`,
+	trace.StatusCodeDataLoss:           `DATA_LOSS`,
+	trace.StatusCodeUnauthenticated:    `UNAUTHENTICATED`,
+}
+
+func isHealthEndpoint(path string) bool {
+	// Health checking is pretty frequent and
+	// traces collected for health endpoints
+	// can be extremely noisy and expensive.
+	// Disable canonical health checking endpoints
+	// like /healthz and /_ah/health for now.
+	if path == "/healthz" || path == "/_ah/health" {
+		return true
+	}
+	return false
 }
 }

+ 44 - 0
vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go

@@ -0,0 +1,44 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ochttp
+
+import (
+	"io"
+)
+
+// wrappedBody returns a wrapped version of the original
+// Body and only implements the same combination of additional
+// interfaces as the original.
+func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser {
+	var (
+		wr, i0 = body.(io.Writer)
+	)
+	switch {
+	case !i0:
+		return struct {
+			io.ReadCloser
+		}{wrapper}
+
+	case i0:
+		return struct {
+			io.ReadCloser
+			io.Writer
+		}{wrapper, wr}
+	default:
+		return struct {
+			io.ReadCloser
+		}{wrapper}
+	}
+}

+ 164 - 0
vendor/go.opencensus.io/resource/resource.go

@@ -0,0 +1,164 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package resource provides functionality for resource, which capture
+// identifying information about the entities for which signals are exported.
+package resource
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// Environment variables used by FromEnv to decode a resource.
+const (
+	EnvVarType   = "OC_RESOURCE_TYPE"
+	EnvVarLabels = "OC_RESOURCE_LABELS"
+)
+
+// Resource describes an entity about which identifying information and metadata is exposed.
+// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace.
+type Resource struct {
+	Type   string
+	Labels map[string]string
+}
+
+// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable.
+func EncodeLabels(labels map[string]string) string {
+	sortedKeys := make([]string, 0, len(labels))
+	for k := range labels {
+		sortedKeys = append(sortedKeys, k)
+	}
+	sort.Strings(sortedKeys)
+
+	s := ""
+	for i, k := range sortedKeys {
+		if i > 0 {
+			s += ","
+		}
+		s += k + "=" + strconv.Quote(labels[k])
+	}
+	return s
+}
+
+var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`)
+
+// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable.
+// A list of labels of the form `<key1>="<value1>",<key2>="<value2>",...` is accepted.
+// Domain names and paths are accepted as label keys.
+// Most users will want to use FromEnv instead.
+func DecodeLabels(s string) (map[string]string, error) {
+	m := map[string]string{}
+	// Ensure a trailing comma, which allows us to keep the regex simpler
+	s = strings.TrimRight(strings.TrimSpace(s), ",") + ","
+
+	for len(s) > 0 {
+		match := labelRegex.FindStringSubmatch(s)
+		if len(match) == 0 {
+			return nil, fmt.Errorf("invalid label formatting, remainder: %s", s)
+		}
+		v := match[2]
+		if v == "" {
+			v = match[3]
+		} else {
+			var err error
+			if v, err = strconv.Unquote(v); err != nil {
+				return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err)
+			}
+		}
+		m[match[1]] = v
+
+		s = s[len(match[0]):]
+	}
+	return m, nil
+}
+
+// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE
+// and OC_RESOURCE_labelS environment variables.
+func FromEnv(context.Context) (*Resource, error) {
+	res := &Resource{
+		Type: strings.TrimSpace(os.Getenv(EnvVarType)),
+	}
+	labels := strings.TrimSpace(os.Getenv(EnvVarLabels))
+	if labels == "" {
+		return res, nil
+	}
+	var err error
+	if res.Labels, err = DecodeLabels(labels); err != nil {
+		return nil, err
+	}
+	return res, nil
+}
+
+var _ Detector = FromEnv
+
+// merge resource information from b into a. In case of a collision, a takes precedence.
+func merge(a, b *Resource) *Resource {
+	if a == nil {
+		return b
+	}
+	if b == nil {
+		return a
+	}
+	res := &Resource{
+		Type:   a.Type,
+		Labels: map[string]string{},
+	}
+	if res.Type == "" {
+		res.Type = b.Type
+	}
+	for k, v := range b.Labels {
+		res.Labels[k] = v
+	}
+	// Labels from resource a overwrite labels from resource b.
+	for k, v := range a.Labels {
+		res.Labels[k] = v
+	}
+	return res
+}
+
+// Detector attempts to detect resource information.
+// If the detector cannot find resource information, the returned resource is nil but no
+// error is returned.
+// An error is only returned on unexpected failures.
+type Detector func(context.Context) (*Resource, error)
+
+// MultiDetector returns a Detector that calls all input detectors in order and
+// merges each result with the previous one. In case a type of label key is already set,
+// the first set value is takes precedence.
+// It returns on the first error that a sub-detector encounters.
+func MultiDetector(detectors ...Detector) Detector {
+	return func(ctx context.Context) (*Resource, error) {
+		return detectAll(ctx, detectors...)
+	}
+}
+
+// detectall calls all input detectors sequentially an merges each result with the previous one.
+// It returns on the first error that a sub-detector encounters.
+func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) {
+	var res *Resource
+	for _, d := range detectors {
+		r, err := d(ctx)
+		if err != nil {
+			return nil, err
+		}
+		res = merge(res, r)
+	}
+	return res, nil
+}

+ 26 - 12
vendor/go.opencensus.io/stats/doc.go

@@ -21,35 +21,49 @@ aggregate the collected data, and export the aggregated data.
 
 
 Measures
 Measures
 
 
-A measure represents a type of metric to be tracked and recorded.
+A measure represents a type of data point to be tracked and recorded.
 For example, latency, request Mb/s, and response Mb/s are measures
 For example, latency, request Mb/s, and response Mb/s are measures
 to collect from a server.
 to collect from a server.
 
 
-Each measure needs to be registered before being used. Measure
-constructors such as Int64 and Float64 automatically
+Measure constructors such as Int64 and Float64 automatically
 register the measure by the given name. Each registered measure needs
 register the measure by the given name. Each registered measure needs
 to be unique by name. Measures also have a description and a unit.
 to be unique by name. Measures also have a description and a unit.
 
 
-Libraries can define and export measures for their end users to
-create views and collect instrumentation data.
+Libraries can define and export measures. Application authors can then
+create views and collect and break down measures by the tags they are
+interested in.
 
 
 Recording measurements
 Recording measurements
 
 
 Measurement is a data point to be collected for a measure. For example,
 Measurement is a data point to be collected for a measure. For example,
 for a latency (ms) measure, 100 is a measurement that represents a 100ms
 for a latency (ms) measure, 100 is a measurement that represents a 100ms
-latency event. Users collect data points on the existing measures with
+latency event. Measurements are created from measures with
 the current context. Tags from the current context are recorded with the
 the current context. Tags from the current context are recorded with the
 measurements if they are any.
 measurements if they are any.
 
 
-Recorded measurements are dropped immediately if user is not aggregating
-them via views. Users don't necessarily need to conditionally enable/disable
+Recorded measurements are dropped immediately if no views are registered for them.
+There is usually no need to conditionally enable and disable
 recording to reduce cost. Recording of measurements is cheap.
 recording to reduce cost. Recording of measurements is cheap.
 
 
-Libraries can always record measurements, and end-users can later decide
+Libraries can always record measurements, and applications can later decide
 on which measurements they want to collect by registering views. This allows
 on which measurements they want to collect by registering views. This allows
 libraries to turn on the instrumentation by default.
 libraries to turn on the instrumentation by default.
+
+Exemplars
+
+For a given recorded measurement, the associated exemplar is a diagnostic map
+that gives more information about the measurement.
+
+When aggregated using a Distribution aggregation, an exemplar is kept for each
+bucket in the Distribution. This allows you to easily find an example of a
+measurement that fell into each bucket.
+
+For example, if you also use the OpenCensus trace package and you
+record a measurement with a context that contains a sampled trace span,
+then the trace span will be added to the exemplar associated with the measurement.
+
+When exported to a supporting back end, you should be able to easily navigate
+to example traces that fell into each bucket in the Distribution.
+
 */
 */
 package stats // import "go.opencensus.io/stats"
 package stats // import "go.opencensus.io/stats"
-
-// TODO(acetechnologist): Add a link to the language independent OpenCensus
-// spec when it is available.

+ 1 - 1
vendor/go.opencensus.io/stats/internal/record.go

@@ -19,7 +19,7 @@ import (
 )
 )
 
 
 // DefaultRecorder will be called for each Record call.
 // DefaultRecorder will be called for each Record call.
-var DefaultRecorder func(*tag.Map, interface{})
+var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{})
 
 
 // SubscriptionReporter reports when a view subscribed with a measure.
 // SubscriptionReporter reports when a view subscribed with a measure.
 var SubscriptionReporter func(measure string)
 var SubscriptionReporter func(measure string)

+ 23 - 10
vendor/go.opencensus.io/stats/measure.go

@@ -20,19 +20,31 @@ import (
 	"sync/atomic"
 	"sync/atomic"
 )
 )
 
 
-// Measure represents a type of metric to be tracked and recorded.
-// For example, latency, request Mb/s, and response Mb/s are measures
+// Measure represents a single numeric value to be tracked and recorded.
+// For example, latency, request bytes, and response bytes could be measures
 // to collect from a server.
 // to collect from a server.
 //
 //
-// Each measure needs to be registered before being used.
-// Measure constructors such as Int64 and
-// Float64 automatically registers the measure
-// by the given name.
-// Each registered measure needs to be unique by name.
-// Measures also have a description and a unit.
+// Measures by themselves have no outside effects. In order to be exported,
+// the measure needs to be used in a View. If no Views are defined over a
+// measure, there is very little cost in recording it.
 type Measure interface {
 type Measure interface {
+	// Name returns the name of this measure.
+	//
+	// Measure names are globally unique (among all libraries linked into your program).
+	// We recommend prefixing the measure name with a domain name relevant to your
+	// project or application.
+	//
+	// Measure names are never sent over the wire or exported to backends.
+	// They are only used to create Views.
 	Name() string
 	Name() string
+
+	// Description returns the human-readable description of this measure.
 	Description() string
 	Description() string
+
+	// Unit returns the units for the values this measure takes on.
+	//
+	// Units are encoded according to the case-sensitive abbreviations from the
+	// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
 	Unit() string
 	Unit() string
 }
 }
 
 
@@ -81,8 +93,9 @@ func registerMeasureHandle(name, desc, unit string) *measureDescriptor {
 // provides methods to create measurements of their kind. For example, Int64Measure
 // provides methods to create measurements of their kind. For example, Int64Measure
 // provides M to convert an int64 into a measurement.
 // provides M to convert an int64 into a measurement.
 type Measurement struct {
 type Measurement struct {
-	v float64
-	m Measure
+	v    float64
+	m    Measure
+	desc *measureDescriptor
 }
 }
 
 
 // Value returns the value of the Measurement as a float64.
 // Value returns the value of the Measurement as a float64.

+ 24 - 21
vendor/go.opencensus.io/stats/measure_float64.go

@@ -15,38 +15,41 @@
 
 
 package stats
 package stats
 
 
-// Float64Measure is a measure of type float64.
+// Float64Measure is a measure for float64 values.
 type Float64Measure struct {
 type Float64Measure struct {
-	md *measureDescriptor
+	desc *measureDescriptor
+}
+
+// M creates a new float64 measurement.
+// Use Record to record measurements.
+func (m *Float64Measure) M(v float64) Measurement {
+	return Measurement{
+		m:    m,
+		desc: m.desc,
+		v:    v,
+	}
+}
+
+// Float64 creates a new measure for float64 values.
+//
+// See the documentation for interface Measure for more guidance on the
+// parameters of this function.
+func Float64(name, description, unit string) *Float64Measure {
+	mi := registerMeasureHandle(name, description, unit)
+	return &Float64Measure{mi}
 }
 }
 
 
 // Name returns the name of the measure.
 // Name returns the name of the measure.
 func (m *Float64Measure) Name() string {
 func (m *Float64Measure) Name() string {
-	return m.md.name
+	return m.desc.name
 }
 }
 
 
 // Description returns the description of the measure.
 // Description returns the description of the measure.
 func (m *Float64Measure) Description() string {
 func (m *Float64Measure) Description() string {
-	return m.md.description
+	return m.desc.description
 }
 }
 
 
 // Unit returns the unit of the measure.
 // Unit returns the unit of the measure.
 func (m *Float64Measure) Unit() string {
 func (m *Float64Measure) Unit() string {
-	return m.md.unit
-}
-
-// M creates a new float64 measurement.
-// Use Record to record measurements.
-func (m *Float64Measure) M(v float64) Measurement {
-	if !m.md.subscribed() {
-		return Measurement{}
-	}
-	return Measurement{m: m, v: v}
-}
-
-// Float64 creates a new measure of type Float64Measure.
-// It never returns an error.
-func Float64(name, description, unit string) *Float64Measure {
-	mi := registerMeasureHandle(name, description, unit)
-	return &Float64Measure{mi}
+	return m.desc.unit
 }
 }

+ 24 - 21
vendor/go.opencensus.io/stats/measure_int64.go

@@ -15,38 +15,41 @@
 
 
 package stats
 package stats
 
 
-// Int64Measure is a measure of type int64.
+// Int64Measure is a measure for int64 values.
 type Int64Measure struct {
 type Int64Measure struct {
-	md *measureDescriptor
+	desc *measureDescriptor
+}
+
+// M creates a new int64 measurement.
+// Use Record to record measurements.
+func (m *Int64Measure) M(v int64) Measurement {
+	return Measurement{
+		m:    m,
+		desc: m.desc,
+		v:    float64(v),
+	}
+}
+
+// Int64 creates a new measure for int64 values.
+//
+// See the documentation for interface Measure for more guidance on the
+// parameters of this function.
+func Int64(name, description, unit string) *Int64Measure {
+	mi := registerMeasureHandle(name, description, unit)
+	return &Int64Measure{mi}
 }
 }
 
 
 // Name returns the name of the measure.
 // Name returns the name of the measure.
 func (m *Int64Measure) Name() string {
 func (m *Int64Measure) Name() string {
-	return m.md.name
+	return m.desc.name
 }
 }
 
 
 // Description returns the description of the measure.
 // Description returns the description of the measure.
 func (m *Int64Measure) Description() string {
 func (m *Int64Measure) Description() string {
-	return m.md.description
+	return m.desc.description
 }
 }
 
 
 // Unit returns the unit of the measure.
 // Unit returns the unit of the measure.
 func (m *Int64Measure) Unit() string {
 func (m *Int64Measure) Unit() string {
-	return m.md.unit
-}
-
-// M creates a new int64 measurement.
-// Use Record to record measurements.
-func (m *Int64Measure) M(v int64) Measurement {
-	if !m.md.subscribed() {
-		return Measurement{}
-	}
-	return Measurement{m: m, v: float64(v)}
-}
-
-// Int64 creates a new measure of type Int64Measure.
-// It never returns an error.
-func Int64(name, description, unit string) *Int64Measure {
-	mi := registerMeasureHandle(name, description, unit)
-	return &Int64Measure{mi}
+	return m.desc.unit
 }
 }

+ 74 - 9
vendor/go.opencensus.io/stats/record.go

@@ -18,6 +18,7 @@ package stats
 import (
 import (
 	"context"
 	"context"
 
 
+	"go.opencensus.io/metric/metricdata"
 	"go.opencensus.io/stats/internal"
 	"go.opencensus.io/stats/internal"
 	"go.opencensus.io/tag"
 	"go.opencensus.io/tag"
 )
 )
@@ -30,23 +31,87 @@ func init() {
 	}
 	}
 }
 }
 
 
-// Record records one or multiple measurements with the same tags at once.
+type recordOptions struct {
+	attachments  metricdata.Attachments
+	mutators     []tag.Mutator
+	measurements []Measurement
+}
+
+// WithAttachments applies provided exemplar attachments.
+func WithAttachments(attachments metricdata.Attachments) Options {
+	return func(ro *recordOptions) {
+		ro.attachments = attachments
+	}
+}
+
+// WithTags applies provided tag mutators.
+func WithTags(mutators ...tag.Mutator) Options {
+	return func(ro *recordOptions) {
+		ro.mutators = mutators
+	}
+}
+
+// WithMeasurements applies provided measurements.
+func WithMeasurements(measurements ...Measurement) Options {
+	return func(ro *recordOptions) {
+		ro.measurements = measurements
+	}
+}
+
+// Options apply changes to recordOptions.
+type Options func(*recordOptions)
+
+func createRecordOption(ros ...Options) *recordOptions {
+	o := &recordOptions{}
+	for _, ro := range ros {
+		ro(o)
+	}
+	return o
+}
+
+// Record records one or multiple measurements with the same context at once.
 // If there are any tags in the context, measurements will be tagged with them.
 // If there are any tags in the context, measurements will be tagged with them.
 func Record(ctx context.Context, ms ...Measurement) {
 func Record(ctx context.Context, ms ...Measurement) {
-	if len(ms) == 0 {
-		return
+	RecordWithOptions(ctx, WithMeasurements(ms...))
+}
+
+// RecordWithTags records one or multiple measurements at once.
+//
+// Measurements will be tagged with the tags in the context mutated by the mutators.
+// RecordWithTags is useful if you want to record with tag mutations but don't want
+// to propagate the mutations in the context.
+func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error {
+	return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...))
+}
+
+// RecordWithOptions records measurements from the given options (if any) against context
+// and tags and attachments in the options (if any).
+// If there are any tags in the context, measurements will be tagged with them.
+func RecordWithOptions(ctx context.Context, ros ...Options) error {
+	o := createRecordOption(ros...)
+	if len(o.measurements) == 0 {
+		return nil
 	}
 	}
-	var record bool
-	for _, m := range ms {
-		if (m != Measurement{}) {
+	recorder := internal.DefaultRecorder
+	if recorder == nil {
+		return nil
+	}
+	record := false
+	for _, m := range o.measurements {
+		if m.desc.subscribed() {
 			record = true
 			record = true
 			break
 			break
 		}
 		}
 	}
 	}
 	if !record {
 	if !record {
-		return
+		return nil
 	}
 	}
-	if internal.DefaultRecorder != nil {
-		internal.DefaultRecorder(tag.FromContext(ctx), ms)
+	if len(o.mutators) > 0 {
+		var err error
+		if ctx, err = tag.New(ctx, o.mutators...); err != nil {
+			return err
+		}
 	}
 	}
+	recorder(tag.FromContext(ctx), o.measurements, o.attachments)
+	return nil
 }
 }

+ 1 - 0
vendor/go.opencensus.io/stats/units.go

@@ -22,4 +22,5 @@ const (
 	UnitDimensionless = "1"
 	UnitDimensionless = "1"
 	UnitBytes         = "By"
 	UnitBytes         = "By"
 	UnitMilliseconds  = "ms"
 	UnitMilliseconds  = "ms"
+	UnitSeconds       = "s"
 )
 )

+ 6 - 5
vendor/go.opencensus.io/stats/view/aggregation.go

@@ -82,7 +82,7 @@ func Sum() *Aggregation {
 // Distribution indicates that the desired aggregation is
 // Distribution indicates that the desired aggregation is
 // a histogram distribution.
 // a histogram distribution.
 //
 //
-// An distribution aggregation may contain a histogram of the values in the
+// A distribution aggregation may contain a histogram of the values in the
 // population. The bucket boundaries for that histogram are described
 // population. The bucket boundaries for that histogram are described
 // by the bounds. This defines len(bounds)+1 buckets.
 // by the bounds. This defines len(bounds)+1 buckets.
 //
 //
@@ -99,13 +99,14 @@ func Sum() *Aggregation {
 // If len(bounds) is 1 then there is no finite buckets, and that single
 // If len(bounds) is 1 then there is no finite buckets, and that single
 // element is the common boundary of the overflow and underflow buckets.
 // element is the common boundary of the overflow and underflow buckets.
 func Distribution(bounds ...float64) *Aggregation {
 func Distribution(bounds ...float64) *Aggregation {
-	return &Aggregation{
+	agg := &Aggregation{
 		Type:    AggTypeDistribution,
 		Type:    AggTypeDistribution,
 		Buckets: bounds,
 		Buckets: bounds,
-		newData: func() AggregationData {
-			return newDistributionData(bounds)
-		},
 	}
 	}
+	agg.newData = func() AggregationData {
+		return newDistributionData(agg)
+	}
+	return agg
 }
 }
 
 
 // LastValue only reports the last value recorded using this
 // LastValue only reports the last value recorded using this

+ 125 - 39
vendor/go.opencensus.io/stats/view/aggregation_data.go

@@ -17,6 +17,9 @@ package view
 
 
 import (
 import (
 	"math"
 	"math"
+	"time"
+
+	"go.opencensus.io/metric/metricdata"
 )
 )
 
 
 // AggregationData represents an aggregated value from a collection.
 // AggregationData represents an aggregated value from a collection.
@@ -24,9 +27,10 @@ import (
 // Mosts users won't directly access aggregration data.
 // Mosts users won't directly access aggregration data.
 type AggregationData interface {
 type AggregationData interface {
 	isAggregationData() bool
 	isAggregationData() bool
-	addSample(v float64)
+	addSample(v float64, attachments map[string]interface{}, t time.Time)
 	clone() AggregationData
 	clone() AggregationData
 	equal(other AggregationData) bool
 	equal(other AggregationData) bool
+	toPoint(t metricdata.Type, time time.Time) metricdata.Point
 }
 }
 
 
 const epsilon = 1e-9
 const epsilon = 1e-9
@@ -41,7 +45,7 @@ type CountData struct {
 
 
 func (a *CountData) isAggregationData() bool { return true }
 func (a *CountData) isAggregationData() bool { return true }
 
 
-func (a *CountData) addSample(v float64) {
+func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) {
 	a.Value = a.Value + 1
 	a.Value = a.Value + 1
 }
 }
 
 
@@ -58,6 +62,15 @@ func (a *CountData) equal(other AggregationData) bool {
 	return a.Value == a2.Value
 	return a.Value == a2.Value
 }
 }
 
 
+func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
+	switch metricType {
+	case metricdata.TypeCumulativeInt64:
+		return metricdata.NewInt64Point(t, a.Value)
+	default:
+		panic("unsupported metricdata.Type")
+	}
+}
+
 // SumData is the aggregated data for the Sum aggregation.
 // SumData is the aggregated data for the Sum aggregation.
 // A sum aggregation processes data and sums up the recordings.
 // A sum aggregation processes data and sums up the recordings.
 //
 //
@@ -68,8 +81,8 @@ type SumData struct {
 
 
 func (a *SumData) isAggregationData() bool { return true }
 func (a *SumData) isAggregationData() bool { return true }
 
 
-func (a *SumData) addSample(f float64) {
-	a.Value += f
+func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) {
+	a.Value += v
 }
 }
 
 
 func (a *SumData) clone() AggregationData {
 func (a *SumData) clone() AggregationData {
@@ -84,26 +97,45 @@ func (a *SumData) equal(other AggregationData) bool {
 	return math.Pow(a.Value-a2.Value, 2) < epsilon
 	return math.Pow(a.Value-a2.Value, 2) < epsilon
 }
 }
 
 
+func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
+	switch metricType {
+	case metricdata.TypeCumulativeInt64:
+		return metricdata.NewInt64Point(t, int64(a.Value))
+	case metricdata.TypeCumulativeFloat64:
+		return metricdata.NewFloat64Point(t, a.Value)
+	default:
+		panic("unsupported metricdata.Type")
+	}
+}
+
 // DistributionData is the aggregated data for the
 // DistributionData is the aggregated data for the
 // Distribution aggregation.
 // Distribution aggregation.
 //
 //
 // Most users won't directly access distribution data.
 // Most users won't directly access distribution data.
+//
+// For a distribution with N bounds, the associated DistributionData will have
+// N+1 buckets.
 type DistributionData struct {
 type DistributionData struct {
-	Count           int64     // number of data points aggregated
-	Min             float64   // minimum value in the distribution
-	Max             float64   // max value in the distribution
-	Mean            float64   // mean of the distribution
-	SumOfSquaredDev float64   // sum of the squared deviation from the mean
-	CountPerBucket  []int64   // number of occurrences per bucket
-	bounds          []float64 // histogram distribution of the values
+	Count           int64   // number of data points aggregated
+	Min             float64 // minimum value in the distribution
+	Max             float64 // max value in the distribution
+	Mean            float64 // mean of the distribution
+	SumOfSquaredDev float64 // sum of the squared deviation from the mean
+	CountPerBucket  []int64 // number of occurrences per bucket
+	// ExemplarsPerBucket is slice the same length as CountPerBucket containing
+	// an exemplar for the associated bucket, or nil.
+	ExemplarsPerBucket []*metricdata.Exemplar
+	bounds             []float64 // histogram distribution of the values
 }
 }
 
 
-func newDistributionData(bounds []float64) *DistributionData {
+func newDistributionData(agg *Aggregation) *DistributionData {
+	bucketCount := len(agg.Buckets) + 1
 	return &DistributionData{
 	return &DistributionData{
-		CountPerBucket: make([]int64, len(bounds)+1),
-		bounds:         bounds,
-		Min:            math.MaxFloat64,
-		Max:            math.SmallestNonzeroFloat64,
+		CountPerBucket:     make([]int64, bucketCount),
+		ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount),
+		bounds:             agg.Buckets,
+		Min:                math.MaxFloat64,
+		Max:                math.SmallestNonzeroFloat64,
 	}
 	}
 }
 }
 
 
@@ -119,46 +151,62 @@ func (a *DistributionData) variance() float64 {
 
 
 func (a *DistributionData) isAggregationData() bool { return true }
 func (a *DistributionData) isAggregationData() bool { return true }
 
 
-func (a *DistributionData) addSample(f float64) {
-	if f < a.Min {
-		a.Min = f
+// TODO(songy23): support exemplar attachments.
+func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) {
+	if v < a.Min {
+		a.Min = v
 	}
 	}
-	if f > a.Max {
-		a.Max = f
+	if v > a.Max {
+		a.Max = v
 	}
 	}
 	a.Count++
 	a.Count++
-	a.incrementBucketCount(f)
+	a.addToBucket(v, attachments, t)
 
 
 	if a.Count == 1 {
 	if a.Count == 1 {
-		a.Mean = f
+		a.Mean = v
 		return
 		return
 	}
 	}
 
 
 	oldMean := a.Mean
 	oldMean := a.Mean
-	a.Mean = a.Mean + (f-a.Mean)/float64(a.Count)
-	a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean)
+	a.Mean = a.Mean + (v-a.Mean)/float64(a.Count)
+	a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean)
 }
 }
 
 
-func (a *DistributionData) incrementBucketCount(f float64) {
-	if len(a.bounds) == 0 {
-		a.CountPerBucket[0]++
-		return
+func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) {
+	var count *int64
+	var i int
+	var b float64
+	for i, b = range a.bounds {
+		if v < b {
+			count = &a.CountPerBucket[i]
+			break
+		}
+	}
+	if count == nil { // Last bucket.
+		i = len(a.bounds)
+		count = &a.CountPerBucket[i]
+	}
+	*count++
+	if exemplar := getExemplar(v, attachments, t); exemplar != nil {
+		a.ExemplarsPerBucket[i] = exemplar
 	}
 	}
+}
 
 
-	for i, b := range a.bounds {
-		if f < b {
-			a.CountPerBucket[i]++
-			return
-		}
+func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar {
+	if len(attachments) == 0 {
+		return nil
+	}
+	return &metricdata.Exemplar{
+		Value:       v,
+		Timestamp:   t,
+		Attachments: attachments,
 	}
 	}
-	a.CountPerBucket[len(a.bounds)]++
 }
 }
 
 
 func (a *DistributionData) clone() AggregationData {
 func (a *DistributionData) clone() AggregationData {
-	counts := make([]int64, len(a.CountPerBucket))
-	copy(counts, a.CountPerBucket)
 	c := *a
 	c := *a
-	c.CountPerBucket = counts
+	c.CountPerBucket = append([]int64(nil), a.CountPerBucket...)
+	c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...)
 	return &c
 	return &c
 }
 }
 
 
@@ -181,6 +229,33 @@ func (a *DistributionData) equal(other AggregationData) bool {
 	return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon
 	return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon
 }
 }
 
 
+func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
+	switch metricType {
+	case metricdata.TypeCumulativeDistribution:
+		buckets := []metricdata.Bucket{}
+		for i := 0; i < len(a.CountPerBucket); i++ {
+			buckets = append(buckets, metricdata.Bucket{
+				Count:    a.CountPerBucket[i],
+				Exemplar: a.ExemplarsPerBucket[i],
+			})
+		}
+		bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds}
+
+		val := &metricdata.Distribution{
+			Count:                 a.Count,
+			Sum:                   a.Sum(),
+			SumOfSquaredDeviation: a.SumOfSquaredDev,
+			BucketOptions:         bucketOptions,
+			Buckets:               buckets,
+		}
+		return metricdata.NewDistributionPoint(t, val)
+
+	default:
+		// TODO: [rghetia] when we have a use case for TypeGaugeDistribution.
+		panic("unsupported metricdata.Type")
+	}
+}
+
 // LastValueData returns the last value recorded for LastValue aggregation.
 // LastValueData returns the last value recorded for LastValue aggregation.
 type LastValueData struct {
 type LastValueData struct {
 	Value float64
 	Value float64
@@ -190,7 +265,7 @@ func (l *LastValueData) isAggregationData() bool {
 	return true
 	return true
 }
 }
 
 
-func (l *LastValueData) addSample(v float64) {
+func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) {
 	l.Value = v
 	l.Value = v
 }
 }
 
 
@@ -205,3 +280,14 @@ func (l *LastValueData) equal(other AggregationData) bool {
 	}
 	}
 	return l.Value == a2.Value
 	return l.Value == a2.Value
 }
 }
+
+func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
+	switch metricType {
+	case metricdata.TypeGaugeInt64:
+		return metricdata.NewInt64Point(t, int64(l.Value))
+	case metricdata.TypeGaugeFloat64:
+		return metricdata.NewFloat64Point(t, l.Value)
+	default:
+		panic("unsupported metricdata.Type")
+	}
+}

+ 6 - 4
vendor/go.opencensus.io/stats/view/collector.go

@@ -17,6 +17,7 @@ package view
 
 
 import (
 import (
 	"sort"
 	"sort"
+	"time"
 
 
 	"go.opencensus.io/internal/tagencoding"
 	"go.opencensus.io/internal/tagencoding"
 	"go.opencensus.io/tag"
 	"go.opencensus.io/tag"
@@ -31,20 +32,21 @@ type collector struct {
 	a *Aggregation
 	a *Aggregation
 }
 }
 
 
-func (c *collector) addSample(s string, v float64) {
+func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) {
 	aggregator, ok := c.signatures[s]
 	aggregator, ok := c.signatures[s]
 	if !ok {
 	if !ok {
 		aggregator = c.a.newData()
 		aggregator = c.a.newData()
 		c.signatures[s] = aggregator
 		c.signatures[s] = aggregator
 	}
 	}
-	aggregator.addSample(v)
+	aggregator.addSample(v, attachments, t)
 }
 }
 
 
+// collectRows returns a snapshot of the collected Row values.
 func (c *collector) collectedRows(keys []tag.Key) []*Row {
 func (c *collector) collectedRows(keys []tag.Key) []*Row {
-	var rows []*Row
+	rows := make([]*Row, 0, len(c.signatures))
 	for sig, aggregator := range c.signatures {
 	for sig, aggregator := range c.signatures {
 		tags := decodeTags([]byte(sig), keys)
 		tags := decodeTags([]byte(sig), keys)
-		row := &Row{tags, aggregator}
+		row := &Row{Tags: tags, Data: aggregator.clone()}
 		rows = append(rows, row)
 		rows = append(rows, row)
 	}
 	}
 	return rows
 	return rows

+ 28 - 27
vendor/go.opencensus.io/stats/view/doc.go

@@ -13,33 +13,34 @@
 // limitations under the License.
 // limitations under the License.
 //
 //
 
 
-/*
-Package view contains support for collecting and exposing aggregates over stats.
-
-In order to collect measurements, views need to be defined and registered.
-A view allows recorded measurements to be filtered and aggregated over a time window.
-
-All recorded measurements can be filtered by a list of tags.
-
-OpenCensus provides several aggregation methods: count, distribution and sum.
-Count aggregation only counts the number of measurement points. Distribution
-aggregation provides statistical summary of the aggregated data. Sum distribution
-sums up the measurement points. Aggregations are cumulative.
-
-Users can dynamically create and delete views.
-
-Libraries can export their own views and claim the view names
-by registering them themselves.
-
-Exporting
-
-Collected and aggregated data can be exported to a metric collection
-backend by registering its exporter.
-
-Multiple exporters can be registered to upload the data to various
-different backends. Users need to unregister the exporters once they
-no longer are needed.
-*/
+// Package view contains support for collecting and exposing aggregates over stats.
+//
+// In order to collect measurements, views need to be defined and registered.
+// A view allows recorded measurements to be filtered and aggregated.
+//
+// All recorded measurements can be grouped by a list of tags.
+//
+// OpenCensus provides several aggregation methods: Count, Distribution and Sum.
+//
+// Count only counts the number of measurement points recorded.
+// Distribution provides statistical summary of the aggregated data by counting
+// how many recorded measurements fall into each bucket.
+// Sum adds up the measurement values.
+// LastValue just keeps track of the most recently recorded measurement value.
+// All aggregations are cumulative.
+//
+// Views can be registered and unregistered at any time during program execution.
+//
+// Libraries can define views but it is recommended that in most cases registering
+// views be left up to applications.
+//
+// Exporting
+//
+// Collected and aggregated data can be exported to a metric collection
+// backend by registering its exporter.
+//
+// Multiple exporters can be registered to upload the data to various
+// different back ends.
 package view // import "go.opencensus.io/stats/view"
 package view // import "go.opencensus.io/stats/view"
 
 
 // TODO(acetechnologist): Add a link to the language independent OpenCensus
 // TODO(acetechnologist): Add a link to the language independent OpenCensus

+ 3 - 0
vendor/go.opencensus.io/stats/view/export.go

@@ -27,6 +27,9 @@ var (
 // Exporter takes a significant amount of time to
 // Exporter takes a significant amount of time to
 // process a Data, that work should be done on another goroutine.
 // process a Data, that work should be done on another goroutine.
 //
 //
+// It is safe to assume that ExportView will not be called concurrently from
+// multiple goroutines.
+//
 // The Data should not be modified.
 // The Data should not be modified.
 type Exporter interface {
 type Exporter interface {
 	ExportView(viewData *Data)
 	ExportView(viewData *Data)

+ 54 - 16
vendor/go.opencensus.io/stats/view/view.go

@@ -17,19 +17,20 @@ package view
 
 
 import (
 import (
 	"bytes"
 	"bytes"
+	"errors"
 	"fmt"
 	"fmt"
 	"reflect"
 	"reflect"
 	"sort"
 	"sort"
 	"sync/atomic"
 	"sync/atomic"
 	"time"
 	"time"
 
 
+	"go.opencensus.io/metric/metricdata"
 	"go.opencensus.io/stats"
 	"go.opencensus.io/stats"
-	"go.opencensus.io/stats/internal"
 	"go.opencensus.io/tag"
 	"go.opencensus.io/tag"
 )
 )
 
 
 // View allows users to aggregate the recorded stats.Measurements.
 // View allows users to aggregate the recorded stats.Measurements.
-// Views need to be passed to the Subscribe function to be before data will be
+// Views need to be passed to the Register function before data will be
 // collected and sent to Exporters.
 // collected and sent to Exporters.
 type View struct {
 type View struct {
 	Name        string // Name of View. Must be unique. If unset, will default to the name of the Measure.
 	Name        string // Name of View. Must be unique. If unset, will default to the name of the Measure.
@@ -42,7 +43,7 @@ type View struct {
 	// Measure is a stats.Measure to aggregate in this view.
 	// Measure is a stats.Measure to aggregate in this view.
 	Measure stats.Measure
 	Measure stats.Measure
 
 
-	// Aggregation is the aggregation function tp apply to the set of Measurements.
+	// Aggregation is the aggregation function to apply to the set of Measurements.
 	Aggregation *Aggregation
 	Aggregation *Aggregation
 }
 }
 
 
@@ -67,14 +68,19 @@ func (v *View) same(other *View) bool {
 		v.Measure.Name() == other.Measure.Name()
 		v.Measure.Name() == other.Measure.Name()
 }
 }
 
 
+// ErrNegativeBucketBounds error returned if histogram contains negative bounds.
+//
+// Deprecated: this should not be public.
+var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported")
+
 // canonicalize canonicalizes v by setting explicit
 // canonicalize canonicalizes v by setting explicit
 // defaults for Name and Description and sorting the TagKeys
 // defaults for Name and Description and sorting the TagKeys
 func (v *View) canonicalize() error {
 func (v *View) canonicalize() error {
 	if v.Measure == nil {
 	if v.Measure == nil {
-		return fmt.Errorf("cannot subscribe view %q: measure not set", v.Name)
+		return fmt.Errorf("cannot register view %q: measure not set", v.Name)
 	}
 	}
 	if v.Aggregation == nil {
 	if v.Aggregation == nil {
-		return fmt.Errorf("cannot subscribe view %q: aggregation not set", v.Name)
+		return fmt.Errorf("cannot register view %q: aggregation not set", v.Name)
 	}
 	}
 	if v.Name == "" {
 	if v.Name == "" {
 		v.Name = v.Measure.Name()
 		v.Name = v.Measure.Name()
@@ -88,20 +94,40 @@ func (v *View) canonicalize() error {
 	sort.Slice(v.TagKeys, func(i, j int) bool {
 	sort.Slice(v.TagKeys, func(i, j int) bool {
 		return v.TagKeys[i].Name() < v.TagKeys[j].Name()
 		return v.TagKeys[i].Name() < v.TagKeys[j].Name()
 	})
 	})
+	sort.Float64s(v.Aggregation.Buckets)
+	for _, b := range v.Aggregation.Buckets {
+		if b < 0 {
+			return ErrNegativeBucketBounds
+		}
+	}
+	// drop 0 bucket silently.
+	v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...)
+
 	return nil
 	return nil
 }
 }
 
 
+func dropZeroBounds(bounds ...float64) []float64 {
+	for i, bound := range bounds {
+		if bound > 0 {
+			return bounds[i:]
+		}
+	}
+	return []float64{}
+}
+
 // viewInternal is the internal representation of a View.
 // viewInternal is the internal representation of a View.
 type viewInternal struct {
 type viewInternal struct {
-	view       *View  // view is the canonicalized View definition associated with this view.
-	subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access
-	collector  *collector
+	view             *View  // view is the canonicalized View definition associated with this view.
+	subscribed       uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access
+	collector        *collector
+	metricDescriptor *metricdata.Descriptor
 }
 }
 
 
 func newViewInternal(v *View) (*viewInternal, error) {
 func newViewInternal(v *View) (*viewInternal, error) {
 	return &viewInternal{
 	return &viewInternal{
-		view:      v,
-		collector: &collector{make(map[string]AggregationData), v.Aggregation},
+		view:             v,
+		collector:        &collector{make(map[string]AggregationData), v.Aggregation},
+		metricDescriptor: viewToMetricDescriptor(v),
 	}, nil
 	}, nil
 }
 }
 
 
@@ -127,12 +153,12 @@ func (v *viewInternal) collectedRows() []*Row {
 	return v.collector.collectedRows(v.view.TagKeys)
 	return v.collector.collectedRows(v.view.TagKeys)
 }
 }
 
 
-func (v *viewInternal) addSample(m *tag.Map, val float64) {
+func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) {
 	if !v.isSubscribed() {
 	if !v.isSubscribed() {
 		return
 		return
 	}
 	}
 	sig := string(encodeWithKeys(m, v.view.TagKeys))
 	sig := string(encodeWithKeys(m, v.view.TagKeys))
-	v.collector.addSample(sig, val)
+	v.collector.addSample(sig, val, attachments, t)
 }
 }
 
 
 // A Data is a set of rows about usage of the single measure associated
 // A Data is a set of rows about usage of the single measure associated
@@ -163,7 +189,7 @@ func (r *Row) String() string {
 }
 }
 
 
 // Equal returns true if both rows are equal. Tags are expected to be ordered
 // Equal returns true if both rows are equal. Tags are expected to be ordered
-// by the key name. Even both rows have the same tags but the tags appear in
+// by the key name. Even if both rows have the same tags but the tags appear in
 // different orders it will return false.
 // different orders it will return false.
 func (r *Row) Equal(other *Row) bool {
 func (r *Row) Equal(other *Row) bool {
 	if r == other {
 	if r == other {
@@ -172,11 +198,23 @@ func (r *Row) Equal(other *Row) bool {
 	return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data)
 	return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data)
 }
 }
 
 
+const maxNameLength = 255
+
+// Returns true if the given string contains only printable characters.
+func isPrintable(str string) bool {
+	for _, r := range str {
+		if !(r >= ' ' && r <= '~') {
+			return false
+		}
+	}
+	return true
+}
+
 func checkViewName(name string) error {
 func checkViewName(name string) error {
-	if len(name) > internal.MaxNameLength {
-		return fmt.Errorf("view name cannot be larger than %v", internal.MaxNameLength)
+	if len(name) > maxNameLength {
+		return fmt.Errorf("view name cannot be larger than %v", maxNameLength)
 	}
 	}
-	if !internal.IsPrintable(name) {
+	if !isPrintable(name) {
 		return fmt.Errorf("view name needs to be an ASCII string")
 		return fmt.Errorf("view name needs to be an ASCII string")
 	}
 	}
 	return nil
 	return nil

+ 149 - 0
vendor/go.opencensus.io/stats/view/view_to_metric.go

@@ -0,0 +1,149 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package view
+
+import (
+	"time"
+
+	"go.opencensus.io/metric/metricdata"
+	"go.opencensus.io/stats"
+)
+
+func getUnit(unit string) metricdata.Unit {
+	switch unit {
+	case "1":
+		return metricdata.UnitDimensionless
+	case "ms":
+		return metricdata.UnitMilliseconds
+	case "By":
+		return metricdata.UnitBytes
+	}
+	return metricdata.UnitDimensionless
+}
+
+func getType(v *View) metricdata.Type {
+	m := v.Measure
+	agg := v.Aggregation
+
+	switch agg.Type {
+	case AggTypeSum:
+		switch m.(type) {
+		case *stats.Int64Measure:
+			return metricdata.TypeCumulativeInt64
+		case *stats.Float64Measure:
+			return metricdata.TypeCumulativeFloat64
+		default:
+			panic("unexpected measure type")
+		}
+	case AggTypeDistribution:
+		return metricdata.TypeCumulativeDistribution
+	case AggTypeLastValue:
+		switch m.(type) {
+		case *stats.Int64Measure:
+			return metricdata.TypeGaugeInt64
+		case *stats.Float64Measure:
+			return metricdata.TypeGaugeFloat64
+		default:
+			panic("unexpected measure type")
+		}
+	case AggTypeCount:
+		switch m.(type) {
+		case *stats.Int64Measure:
+			return metricdata.TypeCumulativeInt64
+		case *stats.Float64Measure:
+			return metricdata.TypeCumulativeInt64
+		default:
+			panic("unexpected measure type")
+		}
+	default:
+		panic("unexpected aggregation type")
+	}
+}
+
+func getLabelKeys(v *View) []metricdata.LabelKey {
+	labelKeys := []metricdata.LabelKey{}
+	for _, k := range v.TagKeys {
+		labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()})
+	}
+	return labelKeys
+}
+
+func viewToMetricDescriptor(v *View) *metricdata.Descriptor {
+	return &metricdata.Descriptor{
+		Name:        v.Name,
+		Description: v.Description,
+		Unit:        convertUnit(v),
+		Type:        getType(v),
+		LabelKeys:   getLabelKeys(v),
+	}
+}
+
+func convertUnit(v *View) metricdata.Unit {
+	switch v.Aggregation.Type {
+	case AggTypeCount:
+		return metricdata.UnitDimensionless
+	default:
+		return getUnit(v.Measure.Unit())
+	}
+}
+
+func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue {
+	labelValues := []metricdata.LabelValue{}
+	tagMap := make(map[string]string)
+	for _, tag := range row.Tags {
+		tagMap[tag.Key.Name()] = tag.Value
+	}
+
+	for _, key := range expectedKeys {
+		if val, ok := tagMap[key.Key]; ok {
+			labelValues = append(labelValues, metricdata.NewLabelValue(val))
+		} else {
+			labelValues = append(labelValues, metricdata.LabelValue{})
+		}
+	}
+	return labelValues
+}
+
+func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries {
+	return &metricdata.TimeSeries{
+		Points:      []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)},
+		LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys),
+		StartTime:   startTime,
+	}
+}
+
+func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric {
+	if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 ||
+		v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 {
+		startTime = time.Time{}
+	}
+
+	rows := v.collectedRows()
+	if len(rows) == 0 {
+		return nil
+	}
+
+	ts := []*metricdata.TimeSeries{}
+	for _, row := range rows {
+		ts = append(ts, rowToTimeseries(v, row, now, startTime))
+	}
+
+	m := &metricdata.Metric{
+		Descriptor: *v.metricDescriptor,
+		TimeSeries: ts,
+	}
+	return m
+}

+ 94 - 44
vendor/go.opencensus.io/stats/view/worker.go

@@ -17,8 +17,11 @@ package view
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"sync"
 	"time"
 	"time"
 
 
+	"go.opencensus.io/metric/metricdata"
+	"go.opencensus.io/metric/metricproducer"
 	"go.opencensus.io/stats"
 	"go.opencensus.io/stats"
 	"go.opencensus.io/stats/internal"
 	"go.opencensus.io/stats/internal"
 	"go.opencensus.io/tag"
 	"go.opencensus.io/tag"
@@ -43,14 +46,15 @@ type worker struct {
 	timer      *time.Ticker
 	timer      *time.Ticker
 	c          chan command
 	c          chan command
 	quit, done chan bool
 	quit, done chan bool
+	mu         sync.RWMutex
 }
 }
 
 
 var defaultWorker *worker
 var defaultWorker *worker
 
 
 var defaultReportingDuration = 10 * time.Second
 var defaultReportingDuration = 10 * time.Second
 
 
-// Find returns a subscribed view associated with this name.
-// If no subscribed view is found, nil is returned.
+// Find returns a registered view associated with this name.
+// If no registered view is found, nil is returned.
 func Find(name string) (v *View) {
 func Find(name string) (v *View) {
 	req := &getViewByNameReq{
 	req := &getViewByNameReq{
 		name: name,
 		name: name,
@@ -62,13 +66,8 @@ func Find(name string) (v *View) {
 }
 }
 
 
 // Register begins collecting data for the given views.
 // Register begins collecting data for the given views.
-// Once a view is subscribed, it reports data to the registered exporters.
+// Once a view is registered, it reports data to the registered exporters.
 func Register(views ...*View) error {
 func Register(views ...*View) error {
-	for _, v := range views {
-		if err := v.canonicalize(); err != nil {
-			return err
-		}
-	}
 	req := &registerViewReq{
 	req := &registerViewReq{
 		views: views,
 		views: views,
 		err:   make(chan error),
 		err:   make(chan error),
@@ -94,6 +93,8 @@ func Unregister(views ...*View) {
 	<-req.done
 	<-req.done
 }
 }
 
 
+// RetrieveData gets a snapshot of the data collected for the the view registered
+// with the given name. It is intended for testing only.
 func RetrieveData(viewName string) ([]*Row, error) {
 func RetrieveData(viewName string) ([]*Row, error) {
 	req := &retrieveDataReq{
 	req := &retrieveDataReq{
 		now: time.Now(),
 		now: time.Now(),
@@ -105,17 +106,23 @@ func RetrieveData(viewName string) ([]*Row, error) {
 	return resp.rows, resp.err
 	return resp.rows, resp.err
 }
 }
 
 
-func record(tags *tag.Map, ms interface{}) {
+func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
 	req := &recordReq{
 	req := &recordReq{
-		tm: tags,
-		ms: ms.([]stats.Measurement),
+		tm:          tags,
+		ms:          ms.([]stats.Measurement),
+		attachments: attachments,
+		t:           time.Now(),
 	}
 	}
 	defaultWorker.c <- req
 	defaultWorker.c <- req
 }
 }
 
 
 // SetReportingPeriod sets the interval between reporting aggregated views in
 // SetReportingPeriod sets the interval between reporting aggregated views in
-// the program. If duration is less than or
-// equal to zero, it enables the default behavior.
+// the program. If duration is less than or equal to zero, it enables the
+// default behavior.
+//
+// Note: each exporter makes different promises about what the lowest supported
+// duration is. For example, the Stackdriver exporter recommends a value no
+// lower than 1 minute. Consult each exporter per your needs.
 func SetReportingPeriod(d time.Duration) {
 func SetReportingPeriod(d time.Duration) {
 	// TODO(acetechnologist): ensure that the duration d is more than a certain
 	// TODO(acetechnologist): ensure that the duration d is more than a certain
 	// value. e.g. 1s
 	// value. e.g. 1s
@@ -140,6 +147,9 @@ func newWorker() *worker {
 }
 }
 
 
 func (w *worker) start() {
 func (w *worker) start() {
+	prodMgr := metricproducer.GlobalManager()
+	prodMgr.AddProducer(w)
+
 	for {
 	for {
 		select {
 		select {
 		case cmd := <-w.c:
 		case cmd := <-w.c:
@@ -156,6 +166,9 @@ func (w *worker) start() {
 }
 }
 
 
 func (w *worker) stop() {
 func (w *worker) stop() {
+	prodMgr := metricproducer.GlobalManager()
+	prodMgr.DeleteProducer(w)
+
 	w.quit <- true
 	w.quit <- true
 	<-w.done
 	<-w.done
 }
 }
@@ -173,13 +186,15 @@ func (w *worker) getMeasureRef(name string) *measureRef {
 }
 }
 
 
 func (w *worker) tryRegisterView(v *View) (*viewInternal, error) {
 func (w *worker) tryRegisterView(v *View) (*viewInternal, error) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
 	vi, err := newViewInternal(v)
 	vi, err := newViewInternal(v)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 	if x, ok := w.views[vi.view.Name]; ok {
 	if x, ok := w.views[vi.view.Name]; ok {
 		if !x.view.same(vi.view) {
 		if !x.view.same(vi.view) {
-			return nil, fmt.Errorf("cannot subscribe view %q; a different view with the same name is already subscribed", v.Name)
+			return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name)
 		}
 		}
 
 
 		// the view is already registered so there is nothing to do and the
 		// the view is already registered so there is nothing to do and the
@@ -192,40 +207,75 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) {
 	return vi, nil
 	return vi, nil
 }
 }
 
 
+func (w *worker) unregisterView(viewName string) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	delete(w.views, viewName)
+}
+
+func (w *worker) reportView(v *viewInternal, now time.Time) {
+	if !v.isSubscribed() {
+		return
+	}
+	rows := v.collectedRows()
+	_, ok := w.startTimes[v]
+	if !ok {
+		w.startTimes[v] = now
+	}
+	viewData := &Data{
+		View:  v.view,
+		Start: w.startTimes[v],
+		End:   time.Now(),
+		Rows:  rows,
+	}
+	exportersMu.Lock()
+	for e := range exporters {
+		e.ExportView(viewData)
+	}
+	exportersMu.Unlock()
+}
+
 func (w *worker) reportUsage(now time.Time) {
 func (w *worker) reportUsage(now time.Time) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
 	for _, v := range w.views {
 	for _, v := range w.views {
-		if !v.isSubscribed() {
-			continue
-		}
-		rows := v.collectedRows()
-		_, ok := w.startTimes[v]
-		if !ok {
-			w.startTimes[v] = now
-		}
-		// Make sure collector is never going
-		// to mutate the exported data.
-		rows = deepCopyRowData(rows)
-		viewData := &Data{
-			View:  v.view,
-			Start: w.startTimes[v],
-			End:   time.Now(),
-			Rows:  rows,
-		}
-		exportersMu.Lock()
-		for e := range exporters {
-			e.ExportView(viewData)
-		}
-		exportersMu.Unlock()
+		w.reportView(v, now)
 	}
 	}
 }
 }
 
 
-func deepCopyRowData(rows []*Row) []*Row {
-	newRows := make([]*Row, 0, len(rows))
-	for _, r := range rows {
-		newRows = append(newRows, &Row{
-			Data: r.Data.clone(),
-			Tags: r.Tags,
-		})
+func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric {
+	if !v.isSubscribed() {
+		return nil
+	}
+
+	_, ok := w.startTimes[v]
+	if !ok {
+		w.startTimes[v] = now
+	}
+
+	var startTime time.Time
+	if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 ||
+		v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 {
+		startTime = time.Time{}
+	} else {
+		startTime = w.startTimes[v]
+	}
+
+	return viewToMetric(v, now, startTime)
+}
+
+// Read reads all view data and returns them as metrics.
+// It is typically invoked by metric reader to export stats in metric format.
+func (w *worker) Read() []*metricdata.Metric {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	now := time.Now()
+	metrics := make([]*metricdata.Metric, 0, len(w.views))
+	for _, v := range w.views {
+		metric := w.toMetric(v, now)
+		if metric != nil {
+			metrics = append(metrics, metric)
+		}
 	}
 	}
-	return newRows
+	return metrics
 }
 }

+ 22 - 7
vendor/go.opencensus.io/stats/view/worker_commands.go

@@ -56,6 +56,12 @@ type registerViewReq struct {
 }
 }
 
 
 func (cmd *registerViewReq) handleCommand(w *worker) {
 func (cmd *registerViewReq) handleCommand(w *worker) {
+	for _, v := range cmd.views {
+		if err := v.canonicalize(); err != nil {
+			cmd.err <- err
+			return
+		}
+	}
 	var errstr []string
 	var errstr []string
 	for _, view := range cmd.views {
 	for _, view := range cmd.views {
 		vi, err := w.tryRegisterView(view)
 		vi, err := w.tryRegisterView(view)
@@ -73,7 +79,7 @@ func (cmd *registerViewReq) handleCommand(w *worker) {
 	}
 	}
 }
 }
 
 
-// unregisterFromViewReq is the command to unsubscribe to a view. Has no
+// unregisterFromViewReq is the command to unregister to a view. Has no
 // impact on the data collection for client that are pulling data from the
 // impact on the data collection for client that are pulling data from the
 // library.
 // library.
 type unregisterFromViewReq struct {
 type unregisterFromViewReq struct {
@@ -88,13 +94,16 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) {
 			continue
 			continue
 		}
 		}
 
 
+		// Report pending data for this view before removing it.
+		w.reportView(vi, time.Now())
+
 		vi.unsubscribe()
 		vi.unsubscribe()
 		if !vi.isSubscribed() {
 		if !vi.isSubscribed() {
 			// this was the last subscription and view is not collecting anymore.
 			// this was the last subscription and view is not collecting anymore.
 			// The collected data can be cleared.
 			// The collected data can be cleared.
 			vi.clearRows()
 			vi.clearRows()
 		}
 		}
-		delete(w.views, name)
+		w.unregisterView(name)
 	}
 	}
 	cmd.done <- struct{}{}
 	cmd.done <- struct{}{}
 }
 }
@@ -112,6 +121,8 @@ type retrieveDataResp struct {
 }
 }
 
 
 func (cmd *retrieveDataReq) handleCommand(w *worker) {
 func (cmd *retrieveDataReq) handleCommand(w *worker) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
 	vi, ok := w.views[cmd.v]
 	vi, ok := w.views[cmd.v]
 	if !ok {
 	if !ok {
 		cmd.c <- &retrieveDataResp{
 		cmd.c <- &retrieveDataResp{
@@ -137,24 +148,28 @@ func (cmd *retrieveDataReq) handleCommand(w *worker) {
 // recordReq is the command to record data related to multiple measures
 // recordReq is the command to record data related to multiple measures
 // at once.
 // at once.
 type recordReq struct {
 type recordReq struct {
-	tm *tag.Map
-	ms []stats.Measurement
+	tm          *tag.Map
+	ms          []stats.Measurement
+	attachments map[string]interface{}
+	t           time.Time
 }
 }
 
 
 func (cmd *recordReq) handleCommand(w *worker) {
 func (cmd *recordReq) handleCommand(w *worker) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
 	for _, m := range cmd.ms {
 	for _, m := range cmd.ms {
-		if (m == stats.Measurement{}) { // not subscribed
+		if (m == stats.Measurement{}) { // not registered
 			continue
 			continue
 		}
 		}
 		ref := w.getMeasureRef(m.Measure().Name())
 		ref := w.getMeasureRef(m.Measure().Name())
 		for v := range ref.views {
 		for v := range ref.views {
-			v.addSample(cmd.tm, m.Value())
+			v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now())
 		}
 		}
 	}
 	}
 }
 }
 
 
 // setReportingPeriodReq is the command to modify the duration between
 // setReportingPeriodReq is the command to modify the duration between
-// reporting the collected data to the subscribed clients.
+// reporting the collected data to the registered clients.
 type setReportingPeriodReq struct {
 type setReportingPeriodReq struct {
 	d time.Duration
 	d time.Duration
 	c chan bool
 	c chan bool

+ 3 - 1
vendor/go.opencensus.io/tag/context.go

@@ -15,7 +15,9 @@
 
 
 package tag
 package tag
 
 
-import "context"
+import (
+	"context"
+)
 
 
 // FromContext returns the tag map stored in the context.
 // FromContext returns the tag map stored in the context.
 func FromContext(ctx context.Context) *Map {
 func FromContext(ctx context.Context) *Map {

+ 10 - 1
vendor/go.opencensus.io/tag/key.go

@@ -21,7 +21,7 @@ type Key struct {
 }
 }
 
 
 // NewKey creates or retrieves a string key identified by name.
 // NewKey creates or retrieves a string key identified by name.
-// Calling NewKey consequently with the same name returns the same key.
+// Calling NewKey more than once with the same name returns the same key.
 func NewKey(name string) (Key, error) {
 func NewKey(name string) (Key, error) {
 	if !checkKeyName(name) {
 	if !checkKeyName(name) {
 		return Key{}, errInvalidKeyName
 		return Key{}, errInvalidKeyName
@@ -29,6 +29,15 @@ func NewKey(name string) (Key, error) {
 	return Key{name: name}, nil
 	return Key{name: name}, nil
 }
 }
 
 
+// MustNewKey returns a key with the given name, and panics if name is an invalid key name.
+func MustNewKey(name string) Key {
+	k, err := NewKey(name)
+	if err != nil {
+		panic(err)
+	}
+	return k
+}
+
 // Name returns the name of the key.
 // Name returns the name of the key.
 func (k Key) Name() string {
 func (k Key) Name() string {
 	return k.name
 	return k.name

+ 49 - 17
vendor/go.opencensus.io/tag/map.go

@@ -28,10 +28,15 @@ type Tag struct {
 	Value string
 	Value string
 }
 }
 
 
+type tagContent struct {
+	value string
+	m     metadatas
+}
+
 // Map is a map of tags. Use New to create a context containing
 // Map is a map of tags. Use New to create a context containing
 // a new Map.
 // a new Map.
 type Map struct {
 type Map struct {
-	m map[Key]string
+	m map[Key]tagContent
 }
 }
 
 
 // Value returns the value for the key if a value for the key exists.
 // Value returns the value for the key if a value for the key exists.
@@ -40,7 +45,7 @@ func (m *Map) Value(k Key) (string, bool) {
 		return "", false
 		return "", false
 	}
 	}
 	v, ok := m.m[k]
 	v, ok := m.m[k]
-	return v, ok
+	return v.value, ok
 }
 }
 
 
 func (m *Map) String() string {
 func (m *Map) String() string {
@@ -62,21 +67,21 @@ func (m *Map) String() string {
 	return buffer.String()
 	return buffer.String()
 }
 }
 
 
-func (m *Map) insert(k Key, v string) {
+func (m *Map) insert(k Key, v string, md metadatas) {
 	if _, ok := m.m[k]; ok {
 	if _, ok := m.m[k]; ok {
 		return
 		return
 	}
 	}
-	m.m[k] = v
+	m.m[k] = tagContent{value: v, m: md}
 }
 }
 
 
-func (m *Map) update(k Key, v string) {
+func (m *Map) update(k Key, v string, md metadatas) {
 	if _, ok := m.m[k]; ok {
 	if _, ok := m.m[k]; ok {
-		m.m[k] = v
+		m.m[k] = tagContent{value: v, m: md}
 	}
 	}
 }
 }
 
 
-func (m *Map) upsert(k Key, v string) {
-	m.m[k] = v
+func (m *Map) upsert(k Key, v string, md metadatas) {
+	m.m[k] = tagContent{value: v, m: md}
 }
 }
 
 
 func (m *Map) delete(k Key) {
 func (m *Map) delete(k Key) {
@@ -84,7 +89,7 @@ func (m *Map) delete(k Key) {
 }
 }
 
 
 func newMap() *Map {
 func newMap() *Map {
-	return &Map{m: make(map[Key]string)}
+	return &Map{m: make(map[Key]tagContent)}
 }
 }
 
 
 // Mutator modifies a tag map.
 // Mutator modifies a tag map.
@@ -95,13 +100,17 @@ type Mutator interface {
 // Insert returns a mutator that inserts a
 // Insert returns a mutator that inserts a
 // value associated with k. If k already exists in the tag map,
 // value associated with k. If k already exists in the tag map,
 // mutator doesn't update the value.
 // mutator doesn't update the value.
-func Insert(k Key, v string) Mutator {
+// Metadata applies metadata to the tag. It is optional.
+// Metadatas are applied in the order in which it is provided.
+// If more than one metadata updates the same attribute then
+// the update from the last metadata prevails.
+func Insert(k Key, v string, mds ...Metadata) Mutator {
 	return &mutator{
 	return &mutator{
 		fn: func(m *Map) (*Map, error) {
 		fn: func(m *Map) (*Map, error) {
 			if !checkValue(v) {
 			if !checkValue(v) {
 				return nil, errInvalidValue
 				return nil, errInvalidValue
 			}
 			}
-			m.insert(k, v)
+			m.insert(k, v, createMetadatas(mds...))
 			return m, nil
 			return m, nil
 		},
 		},
 	}
 	}
@@ -110,13 +119,17 @@ func Insert(k Key, v string) Mutator {
 // Update returns a mutator that updates the
 // Update returns a mutator that updates the
 // value of the tag associated with k with v. If k doesn't
 // value of the tag associated with k with v. If k doesn't
 // exists in the tag map, the mutator doesn't insert the value.
 // exists in the tag map, the mutator doesn't insert the value.
-func Update(k Key, v string) Mutator {
+// Metadata applies metadata to the tag. It is optional.
+// Metadatas are applied in the order in which it is provided.
+// If more than one metadata updates the same attribute then
+// the update from the last metadata prevails.
+func Update(k Key, v string, mds ...Metadata) Mutator {
 	return &mutator{
 	return &mutator{
 		fn: func(m *Map) (*Map, error) {
 		fn: func(m *Map) (*Map, error) {
 			if !checkValue(v) {
 			if !checkValue(v) {
 				return nil, errInvalidValue
 				return nil, errInvalidValue
 			}
 			}
-			m.update(k, v)
+			m.update(k, v, createMetadatas(mds...))
 			return m, nil
 			return m, nil
 		},
 		},
 	}
 	}
@@ -126,18 +139,37 @@ func Update(k Key, v string) Mutator {
 // value of the tag associated with k with v. It inserts the
 // value of the tag associated with k with v. It inserts the
 // value if k doesn't exist already. It mutates the value
 // value if k doesn't exist already. It mutates the value
 // if k already exists.
 // if k already exists.
-func Upsert(k Key, v string) Mutator {
+// Metadata applies metadata to the tag. It is optional.
+// Metadatas are applied in the order in which it is provided.
+// If more than one metadata updates the same attribute then
+// the update from the last metadata prevails.
+func Upsert(k Key, v string, mds ...Metadata) Mutator {
 	return &mutator{
 	return &mutator{
 		fn: func(m *Map) (*Map, error) {
 		fn: func(m *Map) (*Map, error) {
 			if !checkValue(v) {
 			if !checkValue(v) {
 				return nil, errInvalidValue
 				return nil, errInvalidValue
 			}
 			}
-			m.upsert(k, v)
+			m.upsert(k, v, createMetadatas(mds...))
 			return m, nil
 			return m, nil
 		},
 		},
 	}
 	}
 }
 }
 
 
+func createMetadatas(mds ...Metadata) metadatas {
+	var metas metadatas
+	if len(mds) > 0 {
+		for _, md := range mds {
+			if md != nil {
+				md(&metas)
+			}
+		}
+	} else {
+		WithTTL(TTLUnlimitedPropagation)(&metas)
+	}
+	return metas
+
+}
+
 // Delete returns a mutator that deletes
 // Delete returns a mutator that deletes
 // the value associated with k.
 // the value associated with k.
 func Delete(k Key) Mutator {
 func Delete(k Key) Mutator {
@@ -160,10 +192,10 @@ func New(ctx context.Context, mutator ...Mutator) (context.Context, error) {
 			if !checkKeyName(k.Name()) {
 			if !checkKeyName(k.Name()) {
 				return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName)
 				return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName)
 			}
 			}
-			if !checkValue(v) {
+			if !checkValue(v.value) {
 				return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue)
 				return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue)
 			}
 			}
-			m.insert(k, v)
+			m.insert(k, v.value, v.m)
 		}
 		}
 	}
 	}
 	var err error
 	var err error

+ 31 - 13
vendor/go.opencensus.io/tag/map_codec.go

@@ -162,14 +162,19 @@ func (eg *encoderGRPC) bytes() []byte {
 // Encode encodes the tag map into a []byte. It is useful to propagate
 // Encode encodes the tag map into a []byte. It is useful to propagate
 // the tag maps on wire in binary format.
 // the tag maps on wire in binary format.
 func Encode(m *Map) []byte {
 func Encode(m *Map) []byte {
+	if m == nil {
+		return nil
+	}
 	eg := &encoderGRPC{
 	eg := &encoderGRPC{
 		buf: make([]byte, len(m.m)),
 		buf: make([]byte, len(m.m)),
 	}
 	}
-	eg.writeByte(byte(tagsVersionID))
+	eg.writeByte(tagsVersionID)
 	for k, v := range m.m {
 	for k, v := range m.m {
-		eg.writeByte(byte(keyTypeString))
-		eg.writeStringWithVarintLen(k.name)
-		eg.writeBytesWithVarintLen([]byte(v))
+		if v.m.ttl.ttl == valueTTLUnlimitedPropagation {
+			eg.writeByte(byte(keyTypeString))
+			eg.writeStringWithVarintLen(k.name)
+			eg.writeBytesWithVarintLen([]byte(v.value))
+		}
 	}
 	}
 	return eg.bytes()
 	return eg.bytes()
 }
 }
@@ -177,45 +182,58 @@ func Encode(m *Map) []byte {
 // Decode decodes the given []byte into a tag map.
 // Decode decodes the given []byte into a tag map.
 func Decode(bytes []byte) (*Map, error) {
 func Decode(bytes []byte) (*Map, error) {
 	ts := newMap()
 	ts := newMap()
+	err := DecodeEach(bytes, ts.upsert)
+	if err != nil {
+		// no partial failures
+		return nil, err
+	}
+	return ts, nil
+}
 
 
+// DecodeEach decodes the given serialized tag map, calling handler for each
+// tag key and value decoded.
+func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error {
 	eg := &encoderGRPC{
 	eg := &encoderGRPC{
 		buf: bytes,
 		buf: bytes,
 	}
 	}
 	if len(eg.buf) == 0 {
 	if len(eg.buf) == 0 {
-		return ts, nil
+		return nil
 	}
 	}
 
 
 	version := eg.readByte()
 	version := eg.readByte()
 	if version > tagsVersionID {
 	if version > tagsVersionID {
-		return nil, fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID)
+		return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID)
 	}
 	}
 
 
 	for !eg.readEnded() {
 	for !eg.readEnded() {
 		typ := keyType(eg.readByte())
 		typ := keyType(eg.readByte())
 
 
 		if typ != keyTypeString {
 		if typ != keyTypeString {
-			return nil, fmt.Errorf("cannot decode: invalid key type: %q", typ)
+			return fmt.Errorf("cannot decode: invalid key type: %q", typ)
 		}
 		}
 
 
 		k, err := eg.readBytesWithVarintLen()
 		k, err := eg.readBytesWithVarintLen()
 		if err != nil {
 		if err != nil {
-			return nil, err
+			return err
 		}
 		}
 
 
 		v, err := eg.readBytesWithVarintLen()
 		v, err := eg.readBytesWithVarintLen()
 		if err != nil {
 		if err != nil {
-			return nil, err
+			return err
 		}
 		}
 
 
 		key, err := NewKey(string(k))
 		key, err := NewKey(string(k))
 		if err != nil {
 		if err != nil {
-			return nil, err // no partial failures
+			return err
 		}
 		}
 		val := string(v)
 		val := string(v)
 		if !checkValue(val) {
 		if !checkValue(val) {
-			return nil, errInvalidValue // no partial failures
+			return errInvalidValue
+		}
+		fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation)))
+		if err != nil {
+			return err
 		}
 		}
-		ts.upsert(key, val)
 	}
 	}
-	return ts, nil
+	return nil
 }
 }

+ 52 - 0
vendor/go.opencensus.io/tag/metadata.go

@@ -0,0 +1,52 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package tag
+
+const (
+	// valueTTLNoPropagation prevents tag from propagating.
+	valueTTLNoPropagation = 0
+
+	// valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops.
+	valueTTLUnlimitedPropagation = -1
+)
+
+// TTL is metadata that specifies number of hops a tag can propagate.
+// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata
+type TTL struct {
+	ttl int
+}
+
+var (
+	// TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops.
+	TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation}
+
+	// TTLNoPropagation is TTL metadata that prevents tag from propagating.
+	TTLNoPropagation = TTL{ttl: valueTTLNoPropagation}
+)
+
+type metadatas struct {
+	ttl TTL
+}
+
+// Metadata applies metadatas specified by the function.
+type Metadata func(*metadatas)
+
+// WithTTL applies metadata with provided ttl.
+func WithTTL(ttl TTL) Metadata {
+	return func(m *metadatas) {
+		m.ttl = ttl
+	}
+}

+ 1 - 1
vendor/go.opencensus.io/tag/profile_19.go

@@ -25,7 +25,7 @@ func do(ctx context.Context, f func(ctx context.Context)) {
 	m := FromContext(ctx)
 	m := FromContext(ctx)
 	keyvals := make([]string, 0, 2*len(m.m))
 	keyvals := make([]string, 0, 2*len(m.m))
 	for k, v := range m.m {
 	for k, v := range m.m {
-		keyvals = append(keyvals, k.Name(), v)
+		keyvals = append(keyvals, k.Name(), v.value)
 	}
 	}
 	pprof.Do(ctx, pprof.Labels(keyvals...), f)
 	pprof.Do(ctx, pprof.Labels(keyvals...), f)
 }
 }

+ 7 - 2
vendor/go.opencensus.io/trace/basetypes.go

@@ -59,6 +59,11 @@ func Int64Attribute(key string, value int64) Attribute {
 	return Attribute{key: key, value: value}
 	return Attribute{key: key, value: value}
 }
 }
 
 
+// Float64Attribute returns a float64-valued attribute.
+func Float64Attribute(key string, value float64) Attribute {
+	return Attribute{key: key, value: value}
+}
+
 // StringAttribute returns a string-valued attribute.
 // StringAttribute returns a string-valued attribute.
 func StringAttribute(key string, value string) Attribute {
 func StringAttribute(key string, value string) Attribute {
 	return Attribute{key: key, value: value}
 	return Attribute{key: key, value: value}
@@ -71,8 +76,8 @@ type LinkType int32
 // LinkType values.
 // LinkType values.
 const (
 const (
 	LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown.
 	LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown.
-	LinkTypeChild                       // The current span is a child of the linked span.
-	LinkTypeParent                      // The current span is the parent of the linked span.
+	LinkTypeChild                       // The linked span is a child of the current span.
+	LinkTypeParent                      // The linked span is the parent of the current span.
 )
 )
 
 
 // Link represents a reference from one span to another span.
 // Link represents a reference from one span to another span.

+ 47 - 1
vendor/go.opencensus.io/trace/config.go

@@ -14,7 +14,11 @@
 
 
 package trace
 package trace
 
 
-import "go.opencensus.io/trace/internal"
+import (
+	"sync"
+
+	"go.opencensus.io/trace/internal"
+)
 
 
 // Config represents the global tracing configuration.
 // Config represents the global tracing configuration.
 type Config struct {
 type Config struct {
@@ -23,12 +27,42 @@ type Config struct {
 
 
 	// IDGenerator is for internal use only.
 	// IDGenerator is for internal use only.
 	IDGenerator internal.IDGenerator
 	IDGenerator internal.IDGenerator
+
+	// MaxAnnotationEventsPerSpan is max number of annotation events per span
+	MaxAnnotationEventsPerSpan int
+
+	// MaxMessageEventsPerSpan is max number of message events per span
+	MaxMessageEventsPerSpan int
+
+	// MaxAnnotationEventsPerSpan is max number of attributes per span
+	MaxAttributesPerSpan int
+
+	// MaxLinksPerSpan is max number of links per span
+	MaxLinksPerSpan int
 }
 }
 
 
+var configWriteMu sync.Mutex
+
+const (
+	// DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span
+	DefaultMaxAnnotationEventsPerSpan = 32
+
+	// DefaultMaxMessageEventsPerSpan is default max number of message events per span
+	DefaultMaxMessageEventsPerSpan = 128
+
+	// DefaultMaxAttributesPerSpan is default max number of attributes per span
+	DefaultMaxAttributesPerSpan = 32
+
+	// DefaultMaxLinksPerSpan is default max number of links per span
+	DefaultMaxLinksPerSpan = 32
+)
+
 // ApplyConfig applies changes to the global tracing configuration.
 // ApplyConfig applies changes to the global tracing configuration.
 //
 //
 // Fields not provided in the given config are going to be preserved.
 // Fields not provided in the given config are going to be preserved.
 func ApplyConfig(cfg Config) {
 func ApplyConfig(cfg Config) {
+	configWriteMu.Lock()
+	defer configWriteMu.Unlock()
 	c := *config.Load().(*Config)
 	c := *config.Load().(*Config)
 	if cfg.DefaultSampler != nil {
 	if cfg.DefaultSampler != nil {
 		c.DefaultSampler = cfg.DefaultSampler
 		c.DefaultSampler = cfg.DefaultSampler
@@ -36,5 +70,17 @@ func ApplyConfig(cfg Config) {
 	if cfg.IDGenerator != nil {
 	if cfg.IDGenerator != nil {
 		c.IDGenerator = cfg.IDGenerator
 		c.IDGenerator = cfg.IDGenerator
 	}
 	}
+	if cfg.MaxAnnotationEventsPerSpan > 0 {
+		c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan
+	}
+	if cfg.MaxMessageEventsPerSpan > 0 {
+		c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan
+	}
+	if cfg.MaxAttributesPerSpan > 0 {
+		c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan
+	}
+	if cfg.MaxLinksPerSpan > 0 {
+		c.MaxLinksPerSpan = cfg.MaxLinksPerSpan
+	}
 	config.Store(&c)
 	config.Store(&c)
 }
 }

+ 3 - 1
vendor/go.opencensus.io/trace/doc.go

@@ -32,6 +32,8 @@ to sample a subset of traces, or use AlwaysSample to collect a trace on every ru
 
 
     trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
     trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
 
 
+Be careful about using trace.AlwaysSample in a production application with
+significant traffic: a new trace will be started and exported for every request.
 
 
 Adding Spans to a Trace
 Adding Spans to a Trace
 
 
@@ -42,7 +44,7 @@ It is common to want to capture all the activity of a function call in a span. F
 this to work, the function must take a context.Context as a parameter. Add these two
 this to work, the function must take a context.Context as a parameter. Add these two
 lines to the top of the function:
 lines to the top of the function:
 
 
-    ctx, span := trace.StartSpan(ctx, "my.org/Run")
+    ctx, span := trace.StartSpan(ctx, "example.com/Run")
     defer span.End()
     defer span.End()
 
 
 StartSpan will create a new top-level span if the context
 StartSpan will create a new top-level span if the context

+ 38 - 0
vendor/go.opencensus.io/trace/evictedqueue.go

@@ -0,0 +1,38 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+type evictedQueue struct {
+	queue        []interface{}
+	capacity     int
+	droppedCount int
+}
+
+func newEvictedQueue(capacity int) *evictedQueue {
+	eq := &evictedQueue{
+		capacity: capacity,
+		queue:    make([]interface{}, 0),
+	}
+
+	return eq
+}
+
+func (eq *evictedQueue) add(value interface{}) {
+	if len(eq.queue) == eq.capacity {
+		eq.queue = eq.queue[1:]
+		eq.droppedCount++
+	}
+	eq.queue = append(eq.queue, value)
+}

+ 33 - 12
vendor/go.opencensus.io/trace/export.go

@@ -16,6 +16,7 @@ package trace
 
 
 import (
 import (
 	"sync"
 	"sync"
+	"sync/atomic"
 	"time"
 	"time"
 )
 )
 
 
@@ -30,9 +31,11 @@ type Exporter interface {
 	ExportSpan(s *SpanData)
 	ExportSpan(s *SpanData)
 }
 }
 
 
+type exportersMap map[Exporter]struct{}
+
 var (
 var (
-	exportersMu sync.Mutex
-	exporters   map[Exporter]struct{}
+	exporterMu sync.Mutex
+	exporters  atomic.Value
 )
 )
 
 
 // RegisterExporter adds to the list of Exporters that will receive sampled
 // RegisterExporter adds to the list of Exporters that will receive sampled
@@ -40,20 +43,31 @@ var (
 //
 //
 // Binaries can register exporters, libraries shouldn't register exporters.
 // Binaries can register exporters, libraries shouldn't register exporters.
 func RegisterExporter(e Exporter) {
 func RegisterExporter(e Exporter) {
-	exportersMu.Lock()
-	if exporters == nil {
-		exporters = make(map[Exporter]struct{})
+	exporterMu.Lock()
+	new := make(exportersMap)
+	if old, ok := exporters.Load().(exportersMap); ok {
+		for k, v := range old {
+			new[k] = v
+		}
 	}
 	}
-	exporters[e] = struct{}{}
-	exportersMu.Unlock()
+	new[e] = struct{}{}
+	exporters.Store(new)
+	exporterMu.Unlock()
 }
 }
 
 
 // UnregisterExporter removes from the list of Exporters the Exporter that was
 // UnregisterExporter removes from the list of Exporters the Exporter that was
 // registered with the given name.
 // registered with the given name.
 func UnregisterExporter(e Exporter) {
 func UnregisterExporter(e Exporter) {
-	exportersMu.Lock()
-	delete(exporters, e)
-	exportersMu.Unlock()
+	exporterMu.Lock()
+	new := make(exportersMap)
+	if old, ok := exporters.Load().(exportersMap); ok {
+		for k, v := range old {
+			new[k] = v
+		}
+	}
+	delete(new, e)
+	exporters.Store(new)
+	exporterMu.Unlock()
 }
 }
 
 
 // SpanData contains all the information collected by a Span.
 // SpanData contains all the information collected by a Span.
@@ -71,6 +85,13 @@ type SpanData struct {
 	Annotations   []Annotation
 	Annotations   []Annotation
 	MessageEvents []MessageEvent
 	MessageEvents []MessageEvent
 	Status
 	Status
-	Links           []Link
-	HasRemoteParent bool
+	Links                    []Link
+	HasRemoteParent          bool
+	DroppedAttributeCount    int
+	DroppedAnnotationCount   int
+	DroppedMessageEventCount int
+	DroppedLinkCount         int
+
+	// ChildSpanCount holds the number of child span created for this span.
+	ChildSpanCount int
 }
 }

+ 1 - 0
vendor/go.opencensus.io/trace/internal/internal.go

@@ -15,6 +15,7 @@
 // Package internal provides trace internals.
 // Package internal provides trace internals.
 package internal
 package internal
 
 
+// IDGenerator allows custom generators for TraceId and SpanId.
 type IDGenerator interface {
 type IDGenerator interface {
 	NewTraceID() [16]byte
 	NewTraceID() [16]byte
 	NewSpanID() [8]byte
 	NewSpanID() [8]byte

+ 61 - 0
vendor/go.opencensus.io/trace/lrumap.go

@@ -0,0 +1,61 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"github.com/golang/groupcache/lru"
+)
+
+// A simple lru.Cache wrapper that tracks the keys of the current contents and
+// the cumulative number of evicted items.
+type lruMap struct {
+	cacheKeys    map[lru.Key]bool
+	cache        *lru.Cache
+	droppedCount int
+}
+
+func newLruMap(size int) *lruMap {
+	lm := &lruMap{
+		cacheKeys:    make(map[lru.Key]bool),
+		cache:        lru.New(size),
+		droppedCount: 0,
+	}
+	lm.cache.OnEvicted = func(key lru.Key, value interface{}) {
+		delete(lm.cacheKeys, key)
+		lm.droppedCount++
+	}
+	return lm
+}
+
+func (lm lruMap) len() int {
+	return lm.cache.Len()
+}
+
+func (lm lruMap) keys() []interface{} {
+	keys := []interface{}{}
+	for k := range lm.cacheKeys {
+		keys = append(keys, k)
+	}
+	return keys
+}
+
+func (lm *lruMap) add(key, value interface{}) {
+	lm.cacheKeys[lru.Key(key)] = true
+	lm.cache.Add(lru.Key(key), value)
+}
+
+func (lm *lruMap) get(key interface{}) (interface{}, bool) {
+	return lm.cache.Get(key)
+}

+ 3 - 4
vendor/go.opencensus.io/trace/sampling.go

@@ -20,10 +20,6 @@ import (
 
 
 const defaultSamplingProbability = 1e-4
 const defaultSamplingProbability = 1e-4
 
 
-func newDefaultSampler() Sampler {
-	return ProbabilitySampler(defaultSamplingProbability)
-}
-
 // Sampler decides whether a trace should be sampled and exported.
 // Sampler decides whether a trace should be sampled and exported.
 type Sampler func(SamplingParameters) SamplingDecision
 type Sampler func(SamplingParameters) SamplingDecision
 
 
@@ -62,6 +58,9 @@ func ProbabilitySampler(fraction float64) Sampler {
 }
 }
 
 
 // AlwaysSample returns a Sampler that samples every trace.
 // AlwaysSample returns a Sampler that samples every trace.
+// Be careful about using this sampler in a production application with
+// significant traffic: a new trace will be started and exported for every
+// request.
 func AlwaysSample() Sampler {
 func AlwaysSample() Sampler {
 	return func(p SamplingParameters) SamplingDecision {
 	return func(p SamplingParameters) SamplingDecision {
 		return SamplingDecision{Sample: true}
 		return SamplingDecision{Sample: true}

+ 150 - 68
vendor/go.opencensus.io/trace/trace.go

@@ -25,6 +25,7 @@ import (
 	"time"
 	"time"
 
 
 	"go.opencensus.io/internal"
 	"go.opencensus.io/internal"
+	"go.opencensus.io/trace/tracestate"
 )
 )
 
 
 // Span represents a span of a trace.  It has an associated SpanContext, and
 // Span represents a span of a trace.  It has an associated SpanContext, and
@@ -41,6 +42,20 @@ type Span struct {
 	data        *SpanData
 	data        *SpanData
 	mu          sync.Mutex // protects the contents of *data (but not the pointer value.)
 	mu          sync.Mutex // protects the contents of *data (but not the pointer value.)
 	spanContext SpanContext
 	spanContext SpanContext
+
+	// lruAttributes are capped at configured limit. When the capacity is reached an oldest entry
+	// is removed to create room for a new entry.
+	lruAttributes *lruMap
+
+	// annotations are stored in FIFO queue capped by configured limit.
+	annotations *evictedQueue
+
+	// messageEvents are stored in FIFO queue capped by configured limit.
+	messageEvents *evictedQueue
+
+	// links are stored in FIFO queue capped by configured limit.
+	links *evictedQueue
+
 	// spanStore is the spanStore this span belongs to, if any, otherwise it is nil.
 	// spanStore is the spanStore this span belongs to, if any, otherwise it is nil.
 	*spanStore
 	*spanStore
 	endOnce sync.Once
 	endOnce sync.Once
@@ -88,6 +103,7 @@ type SpanContext struct {
 	TraceID      TraceID
 	TraceID      TraceID
 	SpanID       SpanID
 	SpanID       SpanID
 	TraceOptions TraceOptions
 	TraceOptions TraceOptions
+	Tracestate   *tracestate.Tracestate
 }
 }
 
 
 type contextKey struct{}
 type contextKey struct{}
@@ -98,13 +114,6 @@ func FromContext(ctx context.Context) *Span {
 	return s
 	return s
 }
 }
 
 
-// WithSpan returns a new context with the given Span attached.
-//
-// Deprecated: Use NewContext.
-func WithSpan(parent context.Context, s *Span) context.Context {
-	return NewContext(parent, s)
-}
-
 // NewContext returns a new context with the given Span attached.
 // NewContext returns a new context with the given Span attached.
 func NewContext(parent context.Context, s *Span) context.Context {
 func NewContext(parent context.Context, s *Span) context.Context {
 	return context.WithValue(parent, contextKey{}, s)
 	return context.WithValue(parent, contextKey{}, s)
@@ -154,10 +163,14 @@ func WithSampler(sampler Sampler) StartOption {
 
 
 // StartSpan starts a new child span of the current span in the context. If
 // StartSpan starts a new child span of the current span in the context. If
 // there is no span in the context, creates a new trace and span.
 // there is no span in the context, creates a new trace and span.
+//
+// Returned context contains the newly created span. You can use it to
+// propagate the returned span in process.
 func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
 func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
 	var opts StartOptions
 	var opts StartOptions
 	var parent SpanContext
 	var parent SpanContext
 	if p := FromContext(ctx); p != nil {
 	if p := FromContext(ctx); p != nil {
+		p.addChild()
 		parent = p.spanContext
 		parent = p.spanContext
 	}
 	}
 	for _, op := range o {
 	for _, op := range o {
@@ -174,6 +187,9 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont
 //
 //
 // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
 // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
 // preferred for cases where the parent is propagated via an incoming request.
 // preferred for cases where the parent is propagated via an incoming request.
+//
+// Returned context contains the newly created span. You can use it to
+// propagate the returned span in process.
 func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
 func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
 	var opts StartOptions
 	var opts StartOptions
 	for _, op := range o {
 	for _, op := range o {
@@ -185,26 +201,6 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont
 	return NewContext(ctx, span), span
 	return NewContext(ctx, span), span
 }
 }
 
 
-// NewSpan returns a new span.
-//
-// If parent is not nil, created span will be a child of the parent.
-//
-// Deprecated: Use StartSpan.
-func NewSpan(name string, parent *Span, o StartOptions) *Span {
-	var parentSpanContext SpanContext
-	if parent != nil {
-		parentSpanContext = parent.SpanContext()
-	}
-	return startSpanInternal(name, parent != nil, parentSpanContext, false, o)
-}
-
-// NewSpanWithRemoteParent returns a new span with the given parent SpanContext.
-//
-// Deprecated: Use StartSpanWithRemoteParent.
-func NewSpanWithRemoteParent(name string, parent SpanContext, o StartOptions) *Span {
-	return startSpanInternal(name, true, parent, true, o)
-}
-
 func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span {
 func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span {
 	span := &Span{}
 	span := &Span{}
 	span.spanContext = parent
 	span.spanContext = parent
@@ -245,6 +241,11 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa
 		Name:            name,
 		Name:            name,
 		HasRemoteParent: remoteParent,
 		HasRemoteParent: remoteParent,
 	}
 	}
+	span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan)
+	span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan)
+	span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan)
+	span.links = newEvictedQueue(cfg.MaxLinksPerSpan)
+
 	if hasParent {
 	if hasParent {
 		span.data.ParentSpanID = parent.SpanID
 		span.data.ParentSpanID = parent.SpanID
 	}
 	}
@@ -262,26 +263,29 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa
 
 
 // End ends the span.
 // End ends the span.
 func (s *Span) End() {
 func (s *Span) End() {
+	if s == nil {
+		return
+	}
+	if s.executionTracerTaskEnd != nil {
+		s.executionTracerTaskEnd()
+	}
 	if !s.IsRecordingEvents() {
 	if !s.IsRecordingEvents() {
 		return
 		return
 	}
 	}
 	s.endOnce.Do(func() {
 	s.endOnce.Do(func() {
-		if s.executionTracerTaskEnd != nil {
-			s.executionTracerTaskEnd()
-		}
-		// TODO: optimize to avoid this call if sd won't be used.
-		sd := s.makeSpanData()
-		sd.EndTime = internal.MonotonicEndTime(sd.StartTime)
-		if s.spanStore != nil {
-			s.spanStore.finished(s, sd)
-		}
-		if s.spanContext.IsSampled() {
-			// TODO: consider holding exportersMu for less time.
-			exportersMu.Lock()
-			for e := range exporters {
-				e.ExportSpan(sd)
+		exp, _ := exporters.Load().(exportersMap)
+		mustExport := s.spanContext.IsSampled() && len(exp) > 0
+		if s.spanStore != nil || mustExport {
+			sd := s.makeSpanData()
+			sd.EndTime = internal.MonotonicEndTime(sd.StartTime)
+			if s.spanStore != nil {
+				s.spanStore.finished(s, sd)
+			}
+			if mustExport {
+				for e := range exp {
+					e.ExportSpan(sd)
+				}
 			}
 			}
-			exportersMu.Unlock()
 		}
 		}
 	})
 	})
 }
 }
@@ -292,11 +296,21 @@ func (s *Span) makeSpanData() *SpanData {
 	var sd SpanData
 	var sd SpanData
 	s.mu.Lock()
 	s.mu.Lock()
 	sd = *s.data
 	sd = *s.data
-	if s.data.Attributes != nil {
-		sd.Attributes = make(map[string]interface{})
-		for k, v := range s.data.Attributes {
-			sd.Attributes[k] = v
-		}
+	if s.lruAttributes.len() > 0 {
+		sd.Attributes = s.lruAttributesToAttributeMap()
+		sd.DroppedAttributeCount = s.lruAttributes.droppedCount
+	}
+	if len(s.annotations.queue) > 0 {
+		sd.Annotations = s.interfaceArrayToAnnotationArray()
+		sd.DroppedAnnotationCount = s.annotations.droppedCount
+	}
+	if len(s.messageEvents.queue) > 0 {
+		sd.MessageEvents = s.interfaceArrayToMessageEventArray()
+		sd.DroppedMessageEventCount = s.messageEvents.droppedCount
+	}
+	if len(s.links.queue) > 0 {
+		sd.Links = s.interfaceArrayToLinksArray()
+		sd.DroppedLinkCount = s.links.droppedCount
 	}
 	}
 	s.mu.Unlock()
 	s.mu.Unlock()
 	return &sd
 	return &sd
@@ -310,6 +324,16 @@ func (s *Span) SpanContext() SpanContext {
 	return s.spanContext
 	return s.spanContext
 }
 }
 
 
+// SetName sets the name of the span, if it is recording events.
+func (s *Span) SetName(name string) {
+	if !s.IsRecordingEvents() {
+		return
+	}
+	s.mu.Lock()
+	s.data.Name = name
+	s.mu.Unlock()
+}
+
 // SetStatus sets the status of the span, if it is recording events.
 // SetStatus sets the status of the span, if it is recording events.
 func (s *Span) SetStatus(status Status) {
 func (s *Span) SetStatus(status Status) {
 	if !s.IsRecordingEvents() {
 	if !s.IsRecordingEvents() {
@@ -320,6 +344,57 @@ func (s *Span) SetStatus(status Status) {
 	s.mu.Unlock()
 	s.mu.Unlock()
 }
 }
 
 
+func (s *Span) interfaceArrayToLinksArray() []Link {
+	linksArr := make([]Link, 0)
+	for _, value := range s.links.queue {
+		linksArr = append(linksArr, value.(Link))
+	}
+	return linksArr
+}
+
+func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent {
+	messageEventArr := make([]MessageEvent, 0)
+	for _, value := range s.messageEvents.queue {
+		messageEventArr = append(messageEventArr, value.(MessageEvent))
+	}
+	return messageEventArr
+}
+
+func (s *Span) interfaceArrayToAnnotationArray() []Annotation {
+	annotationArr := make([]Annotation, 0)
+	for _, value := range s.annotations.queue {
+		annotationArr = append(annotationArr, value.(Annotation))
+	}
+	return annotationArr
+}
+
+func (s *Span) lruAttributesToAttributeMap() map[string]interface{} {
+	attributes := make(map[string]interface{})
+	for _, key := range s.lruAttributes.keys() {
+		value, ok := s.lruAttributes.get(key)
+		if ok {
+			keyStr := key.(string)
+			attributes[keyStr] = value
+		}
+	}
+	return attributes
+}
+
+func (s *Span) copyToCappedAttributes(attributes []Attribute) {
+	for _, a := range attributes {
+		s.lruAttributes.add(a.key, a.value)
+	}
+}
+
+func (s *Span) addChild() {
+	if !s.IsRecordingEvents() {
+		return
+	}
+	s.mu.Lock()
+	s.data.ChildSpanCount++
+	s.mu.Unlock()
+}
+
 // AddAttributes sets attributes in the span.
 // AddAttributes sets attributes in the span.
 //
 //
 // Existing attributes whose keys appear in the attributes parameter are overwritten.
 // Existing attributes whose keys appear in the attributes parameter are overwritten.
@@ -328,10 +403,7 @@ func (s *Span) AddAttributes(attributes ...Attribute) {
 		return
 		return
 	}
 	}
 	s.mu.Lock()
 	s.mu.Lock()
-	if s.data.Attributes == nil {
-		s.data.Attributes = make(map[string]interface{})
-	}
-	copyAttributes(s.data.Attributes, attributes)
+	s.copyToCappedAttributes(attributes)
 	s.mu.Unlock()
 	s.mu.Unlock()
 }
 }
 
 
@@ -351,7 +423,7 @@ func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...in
 		m = make(map[string]interface{})
 		m = make(map[string]interface{})
 		copyAttributes(m, attributes)
 		copyAttributes(m, attributes)
 	}
 	}
-	s.data.Annotations = append(s.data.Annotations, Annotation{
+	s.annotations.add(Annotation{
 		Time:       now,
 		Time:       now,
 		Message:    msg,
 		Message:    msg,
 		Attributes: m,
 		Attributes: m,
@@ -367,7 +439,7 @@ func (s *Span) printStringInternal(attributes []Attribute, str string) {
 		a = make(map[string]interface{})
 		a = make(map[string]interface{})
 		copyAttributes(a, attributes)
 		copyAttributes(a, attributes)
 	}
 	}
-	s.data.Annotations = append(s.data.Annotations, Annotation{
+	s.annotations.add(Annotation{
 		Time:       now,
 		Time:       now,
 		Message:    str,
 		Message:    str,
 		Attributes: a,
 		Attributes: a,
@@ -404,7 +476,7 @@ func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedBy
 	}
 	}
 	now := time.Now()
 	now := time.Now()
 	s.mu.Lock()
 	s.mu.Lock()
-	s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{
+	s.messageEvents.add(MessageEvent{
 		Time:                 now,
 		Time:                 now,
 		EventType:            MessageEventTypeSent,
 		EventType:            MessageEventTypeSent,
 		MessageID:            messageID,
 		MessageID:            messageID,
@@ -426,7 +498,7 @@ func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compresse
 	}
 	}
 	now := time.Now()
 	now := time.Now()
 	s.mu.Lock()
 	s.mu.Lock()
-	s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{
+	s.messageEvents.add(MessageEvent{
 		Time:                 now,
 		Time:                 now,
 		EventType:            MessageEventTypeRecv,
 		EventType:            MessageEventTypeRecv,
 		MessageID:            messageID,
 		MessageID:            messageID,
@@ -442,7 +514,7 @@ func (s *Span) AddLink(l Link) {
 		return
 		return
 	}
 	}
 	s.mu.Lock()
 	s.mu.Lock()
-	s.data.Links = append(s.data.Links, l)
+	s.links.add(l)
 	s.mu.Unlock()
 	s.mu.Unlock()
 }
 }
 
 
@@ -474,29 +546,39 @@ func init() {
 	gen.spanIDInc |= 1
 	gen.spanIDInc |= 1
 
 
 	config.Store(&Config{
 	config.Store(&Config{
-		DefaultSampler: ProbabilitySampler(defaultSamplingProbability),
-		IDGenerator:    gen,
+		DefaultSampler:             ProbabilitySampler(defaultSamplingProbability),
+		IDGenerator:                gen,
+		MaxAttributesPerSpan:       DefaultMaxAttributesPerSpan,
+		MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan,
+		MaxMessageEventsPerSpan:    DefaultMaxMessageEventsPerSpan,
+		MaxLinksPerSpan:            DefaultMaxLinksPerSpan,
 	})
 	})
 }
 }
 
 
 type defaultIDGenerator struct {
 type defaultIDGenerator struct {
 	sync.Mutex
 	sync.Mutex
-	traceIDRand *rand.Rand
+
+	// Please keep these as the first fields
+	// so that these 8 byte fields will be aligned on addresses
+	// divisible by 8, on both 32-bit and 64-bit machines when
+	// performing atomic increments and accesses.
+	// See:
+	// * https://github.com/census-instrumentation/opencensus-go/issues/587
+	// * https://github.com/census-instrumentation/opencensus-go/issues/865
+	// * https://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	nextSpanID uint64
+	spanIDInc  uint64
+
 	traceIDAdd  [2]uint64
 	traceIDAdd  [2]uint64
-	nextSpanID  uint64
-	spanIDInc   uint64
+	traceIDRand *rand.Rand
 }
 }
 
 
 // NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
 // NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
-// mu should be held while this function is called.
 func (gen *defaultIDGenerator) NewSpanID() [8]byte {
 func (gen *defaultIDGenerator) NewSpanID() [8]byte {
-	gen.Lock()
-	id := gen.nextSpanID
-	gen.nextSpanID += gen.spanIDInc
-	if gen.nextSpanID == 0 {
-		gen.nextSpanID += gen.spanIDInc
+	var id uint64
+	for id == 0 {
+		id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc)
 	}
 	}
-	gen.Unlock()
 	var sid [8]byte
 	var sid [8]byte
 	binary.LittleEndian.PutUint64(sid[:], id)
 	binary.LittleEndian.PutUint64(sid[:], id)
 	return sid
 	return sid

+ 147 - 0
vendor/go.opencensus.io/trace/tracestate/tracestate.go

@@ -0,0 +1,147 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package tracestate implements support for the Tracestate header of the
+// W3C TraceContext propagation format.
+package tracestate
+
+import (
+	"fmt"
+	"regexp"
+)
+
+const (
+	keyMaxSize       = 256
+	valueMaxSize     = 256
+	maxKeyValuePairs = 32
+)
+
+const (
+	keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}`
+	keyWithVendorFormat    = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}`
+	keyFormat              = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)`
+	valueFormat            = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
+)
+
+var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`)
+var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`)
+
+// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different
+// vendors propagate additional information and inter-operate with their legacy Id formats.
+type Tracestate struct {
+	entries []Entry
+}
+
+// Entry represents one key-value pair in a list of key-value pair of Tracestate.
+type Entry struct {
+	// Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter,
+	// and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and
+	// forward slashes /.
+	Key string
+
+	// Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the
+	// range 0x20 to 0x7E) except comma , and =.
+	Value string
+}
+
+// Entries returns a slice of Entry.
+func (ts *Tracestate) Entries() []Entry {
+	if ts == nil {
+		return nil
+	}
+	return ts.entries
+}
+
+func (ts *Tracestate) remove(key string) *Entry {
+	for index, entry := range ts.entries {
+		if entry.Key == key {
+			ts.entries = append(ts.entries[:index], ts.entries[index+1:]...)
+			return &entry
+		}
+	}
+	return nil
+}
+
+func (ts *Tracestate) add(entries []Entry) error {
+	for _, entry := range entries {
+		ts.remove(entry.Key)
+	}
+	if len(ts.entries)+len(entries) > maxKeyValuePairs {
+		return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d",
+			len(entries), len(ts.entries), maxKeyValuePairs)
+	}
+	ts.entries = append(entries, ts.entries...)
+	return nil
+}
+
+func isValid(entry Entry) bool {
+	return keyValidationRegExp.MatchString(entry.Key) &&
+		valueValidationRegExp.MatchString(entry.Value)
+}
+
+func containsDuplicateKey(entries ...Entry) (string, bool) {
+	keyMap := make(map[string]int)
+	for _, entry := range entries {
+		if _, ok := keyMap[entry.Key]; ok {
+			return entry.Key, true
+		}
+		keyMap[entry.Key] = 1
+	}
+	return "", false
+}
+
+func areEntriesValid(entries ...Entry) (*Entry, bool) {
+	for _, entry := range entries {
+		if !isValid(entry) {
+			return &entry, false
+		}
+	}
+	return nil, true
+}
+
+// New creates a Tracestate object from a parent and/or entries (key-value pair).
+// Entries from the parent are copied if present. The entries passed to this function
+// are inserted in front of those copied from the parent. If an entry copied from the
+// parent contains the same key as one of the entry in entries then the entry copied
+// from the parent is removed. See add func.
+//
+// An error is returned with nil Tracestate if
+//  1. one or more entry in entries is invalid.
+//  2. two or more entries in the input entries have the same key.
+//  3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs.
+//     (duplicate entry is counted only once).
+func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) {
+	if parent == nil && len(entries) == 0 {
+		return nil, nil
+	}
+	if entry, ok := areEntriesValid(entries...); !ok {
+		return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value)
+	}
+
+	if key, duplicate := containsDuplicateKey(entries...); duplicate {
+		return nil, fmt.Errorf("contains duplicate keys (%s)", key)
+	}
+
+	tracestate := Tracestate{}
+
+	if parent != nil && len(parent.entries) > 0 {
+		tracestate.entries = append([]Entry{}, parent.entries...)
+	}
+
+	err := tracestate.add(entries)
+	if err != nil {
+		return nil, err
+	}
+	return &tracestate, nil
+}