Procházet zdrojové kódy

Merge pull request #43357 from corhere/vendor-swarmkit-and-containerd-v1.6.1

Vendor latest swarmkit, containerd v1.6.1
Sebastiaan van Stijn před 3 roky
rodič
revize
a583434ebc
100 změnil soubory, kde provedl 10291 přidání a 974 odebrání
  1. 1 1
      daemon/cluster/executor/container/executor.go
  2. 2 1
      daemon/daemon.go
  3. 1 1
      daemon/oci_windows_test.go
  4. 22 17
      vendor.mod
  5. 378 28
      vendor.sum
  6. 1 0
      vendor/cloud.google.com/go/.gitignore
  7. 440 2
      vendor/cloud.google.com/go/CHANGES.md
  8. 65 12
      vendor/cloud.google.com/go/CONTRIBUTING.md
  9. 55 48
      vendor/cloud.google.com/go/README.md
  10. 64 51
      vendor/cloud.google.com/go/RELEASING.md
  11. 1 0
      vendor/cloud.google.com/go/compute/metadata/metadata.go
  12. 26 3
      vendor/cloud.google.com/go/doc.go
  13. 1 1
      vendor/cloud.google.com/go/internal/version/version.go
  14. 60 1
      vendor/cloud.google.com/go/logging/CHANGES.md
  15. 10 7
      vendor/cloud.google.com/go/logging/README.md
  16. 623 55
      vendor/cloud.google.com/go/logging/apiv2/config_client.go
  17. 29 12
      vendor/cloud.google.com/go/logging/apiv2/doc.go
  18. 206 0
      vendor/cloud.google.com/go/logging/apiv2/gapic_metadata.json
  19. 42 0
      vendor/cloud.google.com/go/logging/apiv2/info.go
  20. 182 44
      vendor/cloud.google.com/go/logging/apiv2/logging_client.go
  21. 121 31
      vendor/cloud.google.com/go/logging/apiv2/metrics_client.go
  22. 11 10
      vendor/cloud.google.com/go/logging/doc.go
  23. 81 121
      vendor/cloud.google.com/go/logging/logging.go
  24. 268 0
      vendor/cloud.google.com/go/logging/resource.go
  25. 236 0
      vendor/cloud.google.com/go/testing.md
  26. 1 2
      vendor/github.com/container-storage-interface/spec/LICENSE
  27. 6280 0
      vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go
  28. 1 1
      vendor/github.com/containerd/containerd/.golangci.yml
  29. 16 0
      vendor/github.com/containerd/containerd/.mailmap
  30. 12 2
      vendor/github.com/containerd/containerd/ADOPTERS.md
  31. 47 38
      vendor/github.com/containerd/containerd/BUILDING.md
  32. 62 31
      vendor/github.com/containerd/containerd/Makefile
  33. 1 1
      vendor/github.com/containerd/containerd/Makefile.linux
  34. 5 1
      vendor/github.com/containerd/containerd/Makefile.windows
  35. 0 17
      vendor/github.com/containerd/containerd/Protobuild.toml
  36. 21 10
      vendor/github.com/containerd/containerd/README.md
  37. 39 23
      vendor/github.com/containerd/containerd/RELEASES.md
  38. 29 2
      vendor/github.com/containerd/containerd/Vagrantfile
  39. 17 0
      vendor/github.com/containerd/containerd/api/services/containers/v1/doc.go
  40. 17 0
      vendor/github.com/containerd/containerd/api/services/content/v1/doc.go
  41. 17 0
      vendor/github.com/containerd/containerd/api/services/diff/v1/doc.go
  42. 17 0
      vendor/github.com/containerd/containerd/api/services/namespaces/v1/doc.go
  43. 17 0
      vendor/github.com/containerd/containerd/api/services/snapshots/v1/doc.go
  44. 17 0
      vendor/github.com/containerd/containerd/api/services/tasks/v1/doc.go
  45. 133 86
      vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
  46. 2 0
      vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
  47. 18 0
      vendor/github.com/containerd/containerd/api/services/version/v1/doc.go
  48. 18 0
      vendor/github.com/containerd/containerd/api/types/task/doc.go
  49. 45 9
      vendor/github.com/containerd/containerd/archive/compression/compression.go
  50. 53 32
      vendor/github.com/containerd/containerd/archive/tar.go
  51. 0 2
      vendor/github.com/containerd/containerd/archive/tar_freebsd.go
  52. 1 0
      vendor/github.com/containerd/containerd/archive/tar_mostunix.go
  53. 0 2
      vendor/github.com/containerd/containerd/archive/tar_opts_linux.go
  54. 2 4
      vendor/github.com/containerd/containerd/archive/tar_opts_windows.go
  55. 26 8
      vendor/github.com/containerd/containerd/archive/tar_unix.go
  56. 2 4
      vendor/github.com/containerd/containerd/archive/tar_windows.go
  57. 3 3
      vendor/github.com/containerd/containerd/archive/time_unix.go
  58. 6 6
      vendor/github.com/containerd/containerd/cio/io_unix.go
  59. 3 4
      vendor/github.com/containerd/containerd/cio/io_windows.go
  60. 25 26
      vendor/github.com/containerd/containerd/client.go
  61. 9 0
      vendor/github.com/containerd/containerd/client_opts.go
  62. 6 6
      vendor/github.com/containerd/containerd/container.go
  63. 1 1
      vendor/github.com/containerd/containerd/container_checkpoint_opts.go
  64. 5 5
      vendor/github.com/containerd/containerd/container_opts.go
  65. 1 0
      vendor/github.com/containerd/containerd/container_opts_unix.go
  66. 5 4
      vendor/github.com/containerd/containerd/container_restore_opts.go
  67. 19 16
      vendor/github.com/containerd/containerd/content/helpers.go
  68. 8 2
      vendor/github.com/containerd/containerd/content/local/locks.go
  69. 3 4
      vendor/github.com/containerd/containerd/content/local/readerat.go
  70. 40 37
      vendor/github.com/containerd/containerd/content/local/store.go
  71. 2 1
      vendor/github.com/containerd/containerd/content/local/store_bsd.go
  72. 2 1
      vendor/github.com/containerd/containerd/content/local/store_openbsd.go
  73. 2 1
      vendor/github.com/containerd/containerd/content/local/store_unix.go
  74. 14 13
      vendor/github.com/containerd/containerd/content/local/writer.go
  75. 6 6
      vendor/github.com/containerd/containerd/content/proxy/content_writer.go
  76. 11 1
      vendor/github.com/containerd/containerd/contrib/nvidia/nvidia.go
  77. 37 0
      vendor/github.com/containerd/containerd/defaults/defaults_darwin.go
  78. 2 1
      vendor/github.com/containerd/containerd/defaults/defaults_unix.go
  79. 0 2
      vendor/github.com/containerd/containerd/defaults/defaults_windows.go
  80. 15 0
      vendor/github.com/containerd/containerd/diff/diff.go
  81. 1 1
      vendor/github.com/containerd/containerd/diff/stream.go
  82. 3 2
      vendor/github.com/containerd/containerd/diff/stream_unix.go
  83. 3 6
      vendor/github.com/containerd/containerd/diff/stream_windows.go
  84. 2 3
      vendor/github.com/containerd/containerd/errdefs/errors.go
  85. 5 5
      vendor/github.com/containerd/containerd/errdefs/grpc.go
  86. 12 12
      vendor/github.com/containerd/containerd/events/exchange/exchange.go
  87. 4 5
      vendor/github.com/containerd/containerd/filters/parser.go
  88. 1 2
      vendor/github.com/containerd/containerd/filters/quote.go
  89. 2 0
      vendor/github.com/containerd/containerd/gc/gc.go
  90. 4 4
      vendor/github.com/containerd/containerd/identifiers/validate.go
  91. 9 3
      vendor/github.com/containerd/containerd/image.go
  92. 10 10
      vendor/github.com/containerd/containerd/images/archive/exporter.go
  93. 69 27
      vendor/github.com/containerd/containerd/images/archive/importer.go
  94. 3 3
      vendor/github.com/containerd/containerd/images/archive/reference.go
  95. 38 4
      vendor/github.com/containerd/containerd/images/handlers.go
  96. 10 11
      vendor/github.com/containerd/containerd/images/image.go
  97. 2 2
      vendor/github.com/containerd/containerd/images/mediatypes.go
  98. 37 9
      vendor/github.com/containerd/containerd/import.go
  99. 6 2
      vendor/github.com/containerd/containerd/install.go
  100. 3 2
      vendor/github.com/containerd/containerd/labels/validate.go

+ 1 - 1
daemon/cluster/executor/container/executor.go

@@ -52,7 +52,7 @@ func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBac
 		pluginBackend: p,
 		imageBackend:  i,
 		volumeBackend: v,
-		dependencies:  agent.NewDependencyManager(),
+		dependencies:  agent.NewDependencyManager(b.PluginGetter()),
 	}
 }
 

+ 2 - 1
daemon/daemon.go

@@ -65,6 +65,7 @@ import (
 	"golang.org/x/sync/singleflight"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/backoff"
+	"google.golang.org/grpc/credentials/insecure"
 )
 
 // ContainersNamespace is the name of the namespace used for users containers
@@ -885,7 +886,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
 		// It is not harm to add WithBlock for containerd connection.
 		grpc.WithBlock(),
 
-		grpc.WithInsecure(),
+		grpc.WithTransportCredentials(insecure.NewCredentials()),
 		grpc.WithConnectParams(connParams),
 		grpc.WithContextDialer(dialer.ContextDialer),
 

+ 1 - 1
daemon/oci_windows_test.go

@@ -179,7 +179,7 @@ func TestSetWindowsCredentialSpecInSpec(t *testing.T) {
 	t.Run("happy path with a 'config://' option", func(t *testing.T) {
 		configID := "my-cred-spec"
 
-		dependencyManager := swarmagent.NewDependencyManager()
+		dependencyManager := swarmagent.NewDependencyManager(nil)
 		dependencyManager.Configs().Add(swarmapi.Config{
 			ID: configID,
 			Spec: swarmapi.ConfigSpec{

+ 22 - 17
vendor.mod

@@ -18,7 +18,7 @@ require (
 	github.com/bsphere/le_go v0.0.0-20170215134836-7a984a84b549
 	github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5
 	github.com/containerd/cgroups v1.0.3
-	github.com/containerd/containerd v1.5.10
+	github.com/containerd/containerd v1.6.1
 	github.com/containerd/continuity v0.2.2
 	github.com/containerd/fifo v1.0.0
 	github.com/containerd/typeurl v1.0.2
@@ -33,7 +33,7 @@ require (
 	github.com/docker/go-units v0.4.0
 	github.com/docker/libkv v0.2.2-0.20211217103745-e480589147e3
 	github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4
-	github.com/docker/swarmkit v1.12.1-0.20210726173615-3629f50980f6
+	github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0
 	github.com/fluent/fluent-logger-golang v1.9.0
 	github.com/fsnotify/fsnotify v1.5.1
 	github.com/godbus/dbus/v5 v5.0.6
@@ -74,32 +74,32 @@ require (
 	github.com/tchap/go-patricia v2.3.0+incompatible
 	github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274
 	github.com/vbatts/tar-split v0.11.2
-	github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852
+	github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
 	github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f
 	go.etcd.io/bbolt v1.3.6
-	golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2
+	golang.org/x/net v0.0.0-20211216030914-fe4d6282115f
 	golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
 	golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
 	golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11
 	google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa
-	google.golang.org/grpc v1.40.0
+	google.golang.org/grpc v1.43.0
 	gotest.tools/v3 v3.1.0
 )
 
 require (
 	code.cloudfoundry.org/clock v1.0.0 // indirect
 	github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
+	github.com/akutz/memconn v0.1.0 // indirect
 	github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/cespare/xxhash/v2 v2.1.2 // indirect
 	github.com/cilium/ebpf v0.7.0 // indirect
+	github.com/container-storage-interface/spec v1.5.0 // indirect
 	github.com/containerd/console v1.0.3 // indirect
 	github.com/containerd/go-runc v1.0.0 // indirect
 	github.com/containerd/ttrpc v1.1.0 // indirect
-	github.com/coreos/etcd v3.3.27+incompatible // indirect
-	github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
-	github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
 	github.com/cyphar/filepath-securejoin v0.2.3 // indirect
+	github.com/dustin/go-humanize v1.0.0 // indirect
 	github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e // indirect
 	github.com/gofrs/flock v0.7.3 // indirect
 	github.com/gogo/googleapis v1.4.0 // indirect
@@ -127,13 +127,21 @@ require (
 	github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee // indirect
 	github.com/philhofer/fwd v1.0.0 // indirect
 	github.com/prometheus/client_model v0.2.0 // indirect
-	github.com/prometheus/common v0.10.0 // indirect
-	github.com/prometheus/procfs v0.6.0 // indirect
+	github.com/prometheus/common v0.30.0 // indirect
+	github.com/prometheus/procfs v0.7.3 // indirect
+	github.com/rexray/gocsi v1.2.2 // indirect
 	github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
 	github.com/tinylib/msgp v1.1.0 // indirect
 	github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
 	github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect
+	go.etcd.io/etcd/client/pkg/v3 v3.5.2 // indirect
+	go.etcd.io/etcd/pkg/v3 v3.5.2 // indirect
+	go.etcd.io/etcd/raft/v3 v3.5.2 // indirect
+	go.etcd.io/etcd/server/v3 v3.5.2 // indirect
 	go.opencensus.io v0.23.0 // indirect
+	go.uber.org/atomic v1.7.0 // indirect
+	go.uber.org/multierr v1.6.0 // indirect
+	go.uber.org/zap v1.17.0 // indirect
 	golang.org/x/crypto v0.0.0-20211202192323-5770296d904e // indirect
 	golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
 	golang.org/x/mod v0.4.2 // indirect
@@ -143,19 +151,16 @@ require (
 	golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
 	google.golang.org/api v0.46.0 // indirect
 	google.golang.org/appengine v1.6.7 // indirect
+	google.golang.org/protobuf v1.27.1 // indirect
 	labix.org/v2/mgo v0.0.0-20140701140051-000000000287 // indirect
 )
 
 replace (
-	cloud.google.com/go => cloud.google.com/go v0.59.0
-	cloud.google.com/go/bigquery => cloud.google.com/go/bigquery v1.0.0
-	cloud.google.com/go/logging => cloud.google.com/go/logging v1.0.1-0.20190813144457-ceeb313ad77b
 	github.com/armon/go-metrics => github.com/armon/go-metrics v0.0.0-20150106224455-eb0af217e5e9
 	github.com/armon/go-radix => github.com/armon/go-radix v0.0.0-20150105235045-e39d623f12e8
 	github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7
 	github.com/coreos/pkg => github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
 	github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2
-	github.com/golang/protobuf => github.com/golang/protobuf v1.3.5
 	github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
 	github.com/hashicorp/go-msgpack => github.com/hashicorp/go-msgpack v0.0.0-20140221154404-71c2886f5a67
 	github.com/hashicorp/go-multierror => github.com/hashicorp/go-multierror v1.0.0
@@ -165,10 +170,10 @@ replace (
 	github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.11
 	github.com/vishvananda/netlink => github.com/vishvananda/netlink v1.1.0
 	go.opencensus.io => go.opencensus.io v0.22.3
-	google.golang.org/api => google.golang.org/api v0.8.0
-	google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200227132054-3f1135a288c9
-	google.golang.org/grpc => google.golang.org/grpc v1.27.1
 )
 
+// Removes etcd dependency
+replace github.com/rexray/gocsi => github.com/dperny/gocsi v1.2.3-pre
+
 // autogen/winresources/dockerd is generated a build time, this replacement is only for the purpose of `go mod vendor`
 replace github.com/docker/docker/autogen/winresources/dockerd => ./hack/make/.resources-windows

Rozdílová data souboru nebyla zobrazena, protože soubor je příliš velký
+ 378 - 28
vendor.sum


+ 1 - 0
vendor/cloud.google.com/go/.gitignore

@@ -2,6 +2,7 @@
 .idea
 .vscode
 *.swp
+.history
 
 # Test files
 *.test

+ 440 - 2
vendor/cloud.google.com/go/CHANGES.md

@@ -1,5 +1,445 @@
 # Changes
 
+
+## [0.81.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.80.0...v0.81.0) (2021-04-02)
+
+
+### Features
+
+* **datacatalog:** Policy Tag Manager v1 API service feat: new RenameTagTemplateFieldEnumValue API feat: adding fully_qualified_name in lookup and search feat: added DATAPROC_METASTORE integrated system along with new entry types: DATABASE and SERVICE docs: Documentation improvements ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7))
+* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([a0b1f6f](https://www.github.com/googleapis/google-cloud-go/commit/a0b1f6faae77d014fdee166ab018ddcd6f846ab4))
+* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3))
+* **dialogflow:** expose MP3_64_KBPS and MULAW for output audio encodings. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3))
+* **secretmanager:** Rotation for Secrets ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7))
+
+
+### Bug Fixes
+
+* **internal/godocfx:** filter out non-Cloud ([#3878](https://www.github.com/googleapis/google-cloud-go/issues/3878)) ([625aef9](https://www.github.com/googleapis/google-cloud-go/commit/625aef9b47181cf627587cc9cde9e400713c6678))
+
+## [0.80.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.79.0...v0.80.0) (2021-03-23)
+
+
+### ⚠ BREAKING CHANGES
+
+* **all:** This is a breaking change in dialogflow
+
+### Features
+
+* **appengine:** added vm_liveness, search_api_available, network_settings, service_account, build_env_variables, kms_key_reference to v1 API ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
+* **assuredworkloads:** Add 'resource_settings' field to provide custom properties (ids) for the provisioned projects. ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
+* **assuredworkloads:** add HIPAA and HITRUST compliance regimes ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
+* **dialogflow/cx:** added fallback option when restoring an agent docs: clarified experiment length ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86))
+* **dialogflow/cx:** start generating apiv3 ([#3850](https://www.github.com/googleapis/google-cloud-go/issues/3850)) ([febbdcf](https://www.github.com/googleapis/google-cloud-go/commit/febbdcf13fcea3f5d8186c3d3dface1c0d27ef9e)), refs [#3634](https://www.github.com/googleapis/google-cloud-go/issues/3634)
+* **documentai:** add EVAL_SKIPPED value to the Provenance.OperationType enum in document.proto. ([cb43066](https://www.github.com/googleapis/google-cloud-go/commit/cb4306683926843f6e977f207fa6070bb9242a61))
+* **documentai:** start generating apiv1 ([#3853](https://www.github.com/googleapis/google-cloud-go/issues/3853)) ([d68e604](https://www.github.com/googleapis/google-cloud-go/commit/d68e604c953eea90489f6134e71849b24dd0fcbf))
+* **internal/godocfx:** add prettyprint class to code blocks ([#3819](https://www.github.com/googleapis/google-cloud-go/issues/3819)) ([6e49f21](https://www.github.com/googleapis/google-cloud-go/commit/6e49f2148b116ee439c8a882dcfeefb6e7647c57))
+* **internal/godocfx:** handle Markdown content ([#3816](https://www.github.com/googleapis/google-cloud-go/issues/3816)) ([56d5d0a](https://www.github.com/googleapis/google-cloud-go/commit/56d5d0a900197fb2de46120a0eda649f2c17448f))
+* **kms:** Add maxAttempts to retry policy for KMS gRPC service config feat: Add Bazel exports_files entry for KMS gRPC service config ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
+* **resourcesettings:** start generating apiv1 ([#3854](https://www.github.com/googleapis/google-cloud-go/issues/3854)) ([3b288b4](https://www.github.com/googleapis/google-cloud-go/commit/3b288b4fa593c6cb418f696b5b26768967c20b9e))
+* **speech:** Support output transcript to GCS for LongRunningRecognize. ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
+* **speech:** Support output transcript to GCS for LongRunningRecognize. ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86))
+* **speech:** Support output transcript to GCS for LongRunningRecognize. ([35a8706](https://www.github.com/googleapis/google-cloud-go/commit/35a870662df8bf63c4ec10a0233d1d7a708007ee))
+
+
+### Miscellaneous Chores
+
+* **all:** auto-regenerate gapics ([#3837](https://www.github.com/googleapis/google-cloud-go/issues/3837)) ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
+
+## [0.79.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.78.0...v0.79.0) (2021-03-10)
+
+
+### Features
+
+* **apigateway:** start generating apiv1 ([#3726](https://www.github.com/googleapis/google-cloud-go/issues/3726)) ([66046da](https://www.github.com/googleapis/google-cloud-go/commit/66046da2a4be5971ce2655dc6a5e1fadb08c3d1f))
+* **channel:** addition of billing_account field on Plan. docs: clarification that valid address lines are required for all customers. ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215))
+* **dialogflow/cx:** allow to disable webhook invocation per request ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215))
+* **dialogflow/cx:** allow to disable webhook invocation per request ([44c6bf9](https://www.github.com/googleapis/google-cloud-go/commit/44c6bf986f39a3c9fddf46788ae63bfbb3739441))
+* **dialogflow:** Add CCAI API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
+* **documentai:** remove the translation fields in document.proto. ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
+* **documentai:** Update documentai/v1beta3 protos: add support for boolean normalized value ([529925b](https://www.github.com/googleapis/google-cloud-go/commit/529925ba79f4d3191ef80a13e566d86210fe4d25))
+* **internal/godocfx:** keep some cross links on same domain ([#3767](https://www.github.com/googleapis/google-cloud-go/issues/3767)) ([77f76ed](https://www.github.com/googleapis/google-cloud-go/commit/77f76ed09cb07a090ba9054063a7c002a35bca4e))
+* **internal:** add ability to regenerate one module's docs ([#3777](https://www.github.com/googleapis/google-cloud-go/issues/3777)) ([dc15995](https://www.github.com/googleapis/google-cloud-go/commit/dc15995521bd065da4cfaae95642588919a8c548))
+* **metastore:** added support for release channels when creating service ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
+* **metastore:** Publish Dataproc Metastore v1alpha API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
+* **metastore:** start generating apiv1alpha ([#3747](https://www.github.com/googleapis/google-cloud-go/issues/3747)) ([359312a](https://www.github.com/googleapis/google-cloud-go/commit/359312ad6d4f61fb341d41ffa35fc0634979e650))
+* **metastore:** start generating apiv1beta ([#3788](https://www.github.com/googleapis/google-cloud-go/issues/3788)) ([2977095](https://www.github.com/googleapis/google-cloud-go/commit/297709593ad32f234c0fbcfa228cffcfd3e591f4))
+* **secretmanager:** added topic field to Secret ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01))
+
+
+### Bug Fixes
+
+* **analytics/admin:** add `https://www.googleapis.com/auth/analytics.edit` OAuth2 scope to the list of acceptable scopes for all read only methods of the Admin API docs: update the documentation of the `update_mask` field used by Update() methods ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01))
+* **apigateway:** Provide resource definitions for service management and IAM resources ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
+* **functions:** Fix service namespace in grpc_service_config. ([7811a34](https://www.github.com/googleapis/google-cloud-go/commit/7811a34ef64d722480c640810251bb3a0d65d495))
+* **internal/godocfx:** prevent index out of bounds when pkg == mod ([#3768](https://www.github.com/googleapis/google-cloud-go/issues/3768)) ([3d80b4e](https://www.github.com/googleapis/google-cloud-go/commit/3d80b4e93b0f7e857d6e9681d8d6a429750ecf80))
+* **internal/godocfx:** use correct anchor links ([#3738](https://www.github.com/googleapis/google-cloud-go/issues/3738)) ([919039a](https://www.github.com/googleapis/google-cloud-go/commit/919039a01a006c41e720218bd55f83ce98a5edef))
+* **internal:** fix Bash syntax ([#3779](https://www.github.com/googleapis/google-cloud-go/issues/3779)) ([3dd245d](https://www.github.com/googleapis/google-cloud-go/commit/3dd245dbdbfa84f0bbe5a476412d8463fe3e700c))
+* **tables:** use area120tables_v1alpha1.yaml as api-service-config ([#3759](https://www.github.com/googleapis/google-cloud-go/issues/3759)) ([b130ec0](https://www.github.com/googleapis/google-cloud-go/commit/b130ec0aa946b1a1eaa4d5a7c33e72353ac1612e))
+
+## [0.78.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.77.0...v0.78.0) (2021-02-22)
+
+
+### Features
+
+* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([16597fa](https://www.github.com/googleapis/google-cloud-go/commit/16597fa1ce549053c7183e8456e23f554a5501de))
+* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
+* **dialogflow:** add additional_bindings to Dialogflow v2 ListIntents API docs: update copyrights and session docs ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
+* **documentai:** Update documentai/v1beta3 protos ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4))
+* **gkehub:** Update Membership API v1beta1 proto ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4))
+* **servicecontrol:** Update the ruby_cloud_gapic_library rules for the libraries published to google-cloud-ruby to the form that works with build_gen (separate parameters for ruby_cloud_title and ruby_cloud_description). chore: Update Bazel-Ruby rules version. chore: Update build_gen version. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
+* **speech:** Support Model Adaptation. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
+
+
+### Bug Fixes
+
+* **dialogflow/cx:** RunTestCase http template. PHP REST client lib can be generated. feat: Support transition route group coverage for Test Cases. ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4))
+* **errorreporting:** Fixes ruby gem build ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
+
+## [0.77.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.76.0...v0.77.0) (2021-02-16)
+
+
+### Features
+
+* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([1aea7c8](https://www.github.com/googleapis/google-cloud-go/commit/1aea7c87d39eed87620b488ba0dd60b88ff26c04))
+* **dialogflow/cx:** supports SentimentAnalysisResult in webhook request docs: minor updates in wording ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
+* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
+* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8))
+* **gkehub:** start generating apiv1beta1 ([#3698](https://www.github.com/googleapis/google-cloud-go/issues/3698)) ([8aed3bd](https://www.github.com/googleapis/google-cloud-go/commit/8aed3bd1bbbe983e4891c813e4c5dc9b3aa1b9b2))
+* **internal/docfx:** full cross reference linking ([#3656](https://www.github.com/googleapis/google-cloud-go/issues/3656)) ([fcb7318](https://www.github.com/googleapis/google-cloud-go/commit/fcb7318eb338bf3828ac831ed06ca630e1876418))
+* **memcache:** added ApplySoftwareUpdate API docs: various clarifications, new documentation for ApplySoftwareUpdate chore: update proto annotations ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
+* **networkconnectivity:** Add state field in resources docs: Minor changes ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
+* **networkconnectivity:** Add state field in resources docs: Minor changes ([b4b5898](https://www.github.com/googleapis/google-cloud-go/commit/b4b58987368f80494bbc7f651f50e9123200fb3f))
+* **recommendationengine:** start generating apiv1beta1 ([#3686](https://www.github.com/googleapis/google-cloud-go/issues/3686)) ([8f4e130](https://www.github.com/googleapis/google-cloud-go/commit/8f4e13009444d88a5a56144129f055623a2205ac))
+
+
+### Bug Fixes
+
+* **errorreporting:** Remove dependency on AppEngine's proto definitions. This also removes the source_references field. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
+* **errorreporting:** Update bazel builds for ER client libraries. ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
+* **internal/godocfx:** use exact list of top-level decls ([#3665](https://www.github.com/googleapis/google-cloud-go/issues/3665)) ([3cd2961](https://www.github.com/googleapis/google-cloud-go/commit/3cd2961bd7b9c29d82a21ba8850eff00c7c332fd))
+* **kms:** do not retry on 13 INTERNAL ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
+* **orgpolicy:** Fix constraint resource pattern annotation ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8))
+* **orgpolicy:** Fix constraint resource pattern annotation ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
+* **profiler:** make sure retries use the most up-to-date copy of the trailer ([#3660](https://www.github.com/googleapis/google-cloud-go/issues/3660)) ([3ba9ebc](https://www.github.com/googleapis/google-cloud-go/commit/3ba9ebcee2b8b43cdf2c8f8a3d810516a604b363))
+* **vision:** sync vision v1 protos to get extra FaceAnnotation Landmark Types ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
+
+## [0.76.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.75.0...v0.76.0) (2021-02-02)
+
+
+### Features
+
+* **accessapproval:** Migrate the Bazel rules for the libraries published to google-cloud-ruby to use the gapic-generator-ruby instead of the monolith generator. ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
+* **all:** auto-regenerate gapics ([#3526](https://www.github.com/googleapis/google-cloud-go/issues/3526)) ([ab2af0b](https://www.github.com/googleapis/google-cloud-go/commit/ab2af0b32630dd97f44800f4e273184f887375db))
+* **all:** auto-regenerate gapics ([#3539](https://www.github.com/googleapis/google-cloud-go/issues/3539)) ([84d4d8a](https://www.github.com/googleapis/google-cloud-go/commit/84d4d8ae2d3fbf34a4a312a0a2e4062d18caaa3d))
+* **all:** auto-regenerate gapics ([#3546](https://www.github.com/googleapis/google-cloud-go/issues/3546)) ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7))
+* **all:** auto-regenerate gapics ([#3563](https://www.github.com/googleapis/google-cloud-go/issues/3563)) ([102112a](https://www.github.com/googleapis/google-cloud-go/commit/102112a4e9285a16645aabc89789f613d4f47c9e))
+* **all:** auto-regenerate gapics ([#3576](https://www.github.com/googleapis/google-cloud-go/issues/3576)) ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
+* **all:** auto-regenerate gapics ([#3580](https://www.github.com/googleapis/google-cloud-go/issues/3580)) ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1))
+* **all:** auto-regenerate gapics ([#3587](https://www.github.com/googleapis/google-cloud-go/issues/3587)) ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f))
+* **all:** auto-regenerate gapics ([#3598](https://www.github.com/googleapis/google-cloud-go/issues/3598)) ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **appengine:** start generating apiv1 ([#3561](https://www.github.com/googleapis/google-cloud-go/issues/3561)) ([2b6a3b4](https://www.github.com/googleapis/google-cloud-go/commit/2b6a3b4609e389da418a83eb60a8ae3710d646d7))
+* **assuredworkloads:** updated google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService service. Clients can now create workloads with US_REGIONAL_ACCESS compliance regime ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **binaryauthorization:** start generating apiv1beta1 ([#3562](https://www.github.com/googleapis/google-cloud-go/issues/3562)) ([56e18a6](https://www.github.com/googleapis/google-cloud-go/commit/56e18a64836ab9482528b212eb139f649f7a35c3))
+* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8))
+* **cloudtasks:** introducing field: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats, Task.pull_message and introducing messages: QueueStats PullMessage docs: updates to max burst size description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **cloudtasks:** introducing fields: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats and introducing messages: QueueStats docs: updates to AppEngineHttpRequest description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **datalabeling:** start generating apiv1beta1 ([#3582](https://www.github.com/googleapis/google-cloud-go/issues/3582)) ([d8a7fee](https://www.github.com/googleapis/google-cloud-go/commit/d8a7feef51d3344fa7e258aba1d9fbdab56dadcf))
+* **dataqna:** start generating apiv1alpha ([#3586](https://www.github.com/googleapis/google-cloud-go/issues/3586)) ([24c5b8f](https://www.github.com/googleapis/google-cloud-go/commit/24c5b8f4f45f8cd8b3001b1ca5a8d80e9f3b39d5))
+* **dialogflow/cx:** Add new Experiment service docs: minor doc update on redact field in intent.proto and page.proto ([0959f27](https://www.github.com/googleapis/google-cloud-go/commit/0959f27e85efe94d39437ceef0ff62ddceb8e7a7))
+* **dialogflow/cx:** added support for test cases and agent validation ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **dialogflow/cx:** added support for test cases and agent validation ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f))
+* **dialogflow:** add C++ targets for DialogFlow ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7))
+* **documentai:** start generating apiv1beta3 ([#3595](https://www.github.com/googleapis/google-cloud-go/issues/3595)) ([5ae21fa](https://www.github.com/googleapis/google-cloud-go/commit/5ae21fa1cfb8b8dacbcd0fc43eee430f7db63102))
+* **domains:** start generating apiv1beta1 ([#3632](https://www.github.com/googleapis/google-cloud-go/issues/3632)) ([b8ada6f](https://www.github.com/googleapis/google-cloud-go/commit/b8ada6f197e680d0bb26aa031e6431bc099a3149))
+* **godocfx:** include alt documentation link ([#3530](https://www.github.com/googleapis/google-cloud-go/issues/3530)) ([806cdd5](https://www.github.com/googleapis/google-cloud-go/commit/806cdd56fb6fdddd7a6c1354e55e0d1259bd6c8b))
+* **internal/gapicgen:** change commit formatting to match standard ([#3500](https://www.github.com/googleapis/google-cloud-go/issues/3500)) ([d1e3d46](https://www.github.com/googleapis/google-cloud-go/commit/d1e3d46c47c425581e2b149c07f8e27ffc373c7e))
+* **internal/godocfx:** xref function declarations ([#3615](https://www.github.com/googleapis/google-cloud-go/issues/3615)) ([2bdbb87](https://www.github.com/googleapis/google-cloud-go/commit/2bdbb87a682d799cf5e262a61a3ef1faf41151af))
+* **mediatranslation:** start generating apiv1beta1 ([#3636](https://www.github.com/googleapis/google-cloud-go/issues/3636)) ([4129469](https://www.github.com/googleapis/google-cloud-go/commit/412946966cf7f53c51deff1b1cc1a12d62ed0279))
+* **memcache:** start generating apiv1 ([#3579](https://www.github.com/googleapis/google-cloud-go/issues/3579)) ([eabf7cf](https://www.github.com/googleapis/google-cloud-go/commit/eabf7cfde7b3a3cc1b35c320ba52e07be9926359))
+* **networkconnectivity:** initial generation of apiv1alpha1 ([#3567](https://www.github.com/googleapis/google-cloud-go/issues/3567)) ([adf489a](https://www.github.com/googleapis/google-cloud-go/commit/adf489a536292e3196677621477eae0d52761e7f))
+* **orgpolicy:** start generating apiv2 ([#3652](https://www.github.com/googleapis/google-cloud-go/issues/3652)) ([c103847](https://www.github.com/googleapis/google-cloud-go/commit/c1038475779fda3589aa9659d4ad0b703036b531))
+* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8))
+* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9af529c](https://www.github.com/googleapis/google-cloud-go/commit/9af529c21e98b62c4617f7a7191c307659cf8bb8))
+* **recommender:** add bindings for folder/org type resources for protos in recommendations, insights and recommender_service to enable v1 api for folder/org ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **recommender:** auto generated cl for enabling v1beta1 folder/org APIs and integration test ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **resourcemanager:** start generating apiv2 ([#3575](https://www.github.com/googleapis/google-cloud-go/issues/3575)) ([93d0ebc](https://www.github.com/googleapis/google-cloud-go/commit/93d0ebceb4270351518a13958005bb68f0cace60))
+* **secretmanager:** added expire_time and ttl fields to Secret ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1))
+* **secretmanager:** added expire_time and ttl fields to Secret ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
+* **servicecontrol:** start generating apiv1 ([#3644](https://www.github.com/googleapis/google-cloud-go/issues/3644)) ([f84938b](https://www.github.com/googleapis/google-cloud-go/commit/f84938bb4042a5629fd66bda42de028fd833648a))
+* **servicemanagement:** start generating apiv1 ([#3614](https://www.github.com/googleapis/google-cloud-go/issues/3614)) ([b96134f](https://www.github.com/googleapis/google-cloud-go/commit/b96134fe91c182237359000cd544af5fec60d7db))
+
+
+### Bug Fixes
+
+* **datacatalog:** Update PHP package name casing to match the PHP namespace in the proto files ([c7ecf0f](https://www.github.com/googleapis/google-cloud-go/commit/c7ecf0f3f454606b124e52d20af2545b2c68646f))
+* **internal/godocfx:** add TOC element for module root package ([#3599](https://www.github.com/googleapis/google-cloud-go/issues/3599)) ([1d6eb23](https://www.github.com/googleapis/google-cloud-go/commit/1d6eb238206fcf8815d88981527ef176851afd7a))
+* **profiler:** Force gax to retry in case of certificate errors ([#3178](https://www.github.com/googleapis/google-cloud-go/issues/3178)) ([35dcd72](https://www.github.com/googleapis/google-cloud-go/commit/35dcd725dcd03266ed7439de40c277376b38cd71))
+
+## [0.75.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.74.0...v0.75.0) (2021-01-11)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#3514](https://www.github.com/googleapis/google-cloud-go/issues/3514) [#3501](https://www.github.com/googleapis/google-cloud-go/issues/3501) [#3497](https://www.github.com/googleapis/google-cloud-go/issues/3497) [#3455](https://www.github.com/googleapis/google-cloud-go/issues/3455) [#3448](https://www.github.com/googleapis/google-cloud-go/issues/3448)
+* **channel:** start generating apiv1 ([#3517](https://www.github.com/googleapis/google-cloud-go/issues/3517)) ([2cf3b3c](https://www.github.com/googleapis/google-cloud-go/commit/2cf3b3cf7d99f2efd6868a710fad9e935fc87965))
+
+
+### Bug Fixes
+
+* **internal/gapicgen:** don't regen files that have been deleted ([#3471](https://www.github.com/googleapis/google-cloud-go/issues/3471)) ([112ca94](https://www.github.com/googleapis/google-cloud-go/commit/112ca9416cc8a2502b32547dc8d789655452f84a))
+
+## [0.74.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.73.0...v0.74.0) (2020-12-10)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#3440](https://www.github.com/googleapis/google-cloud-go/issues/3440) [#3436](https://www.github.com/googleapis/google-cloud-go/issues/3436) [#3394](https://www.github.com/googleapis/google-cloud-go/issues/3394) [#3391](https://www.github.com/googleapis/google-cloud-go/issues/3391) [#3374](https://www.github.com/googleapis/google-cloud-go/issues/3374)
+* **internal/gapicgen:** support generating only gapics with genlocal ([#3383](https://www.github.com/googleapis/google-cloud-go/issues/3383)) ([eaa742a](https://www.github.com/googleapis/google-cloud-go/commit/eaa742a248dc7d93c019863248f28e37f88aae84))
+* **servicedirectory:** start generating apiv1 ([#3382](https://www.github.com/googleapis/google-cloud-go/issues/3382)) ([2774925](https://www.github.com/googleapis/google-cloud-go/commit/2774925925909071ebc585cf7400373334c156ba))
+
+
+### Bug Fixes
+
+* **internal/gapicgen:** don't create genproto pr as draft ([#3379](https://www.github.com/googleapis/google-cloud-go/issues/3379)) ([517ab0f](https://www.github.com/googleapis/google-cloud-go/commit/517ab0f25e544498c5374b256354bc41ba936ad5))
+
+## [0.73.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.72.0...v0.73.0) (2020-12-04)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#3335](https://www.github.com/googleapis/google-cloud-go/issues/3335) [#3294](https://www.github.com/googleapis/google-cloud-go/issues/3294) [#3250](https://www.github.com/googleapis/google-cloud-go/issues/3250) [#3229](https://www.github.com/googleapis/google-cloud-go/issues/3229) [#3211](https://www.github.com/googleapis/google-cloud-go/issues/3211) [#3217](https://www.github.com/googleapis/google-cloud-go/issues/3217) [#3212](https://www.github.com/googleapis/google-cloud-go/issues/3212) [#3209](https://www.github.com/googleapis/google-cloud-go/issues/3209) [#3206](https://www.github.com/googleapis/google-cloud-go/issues/3206) [#3199](https://www.github.com/googleapis/google-cloud-go/issues/3199)
+* **artifactregistry:** start generating apiv1beta2 ([#3352](https://www.github.com/googleapis/google-cloud-go/issues/3352)) ([2e6f20b](https://www.github.com/googleapis/google-cloud-go/commit/2e6f20b0ab438b0b366a1a3802fc64d1a0e66fff))
+* **internal:** copy pubsub Message and PublishResult to internal/pubsub ([#3351](https://www.github.com/googleapis/google-cloud-go/issues/3351)) ([82521ee](https://www.github.com/googleapis/google-cloud-go/commit/82521ee5038735c1663525658d27e4df00ec90be))
+* **internal/gapicgen:** support adding context to regen ([#3174](https://www.github.com/googleapis/google-cloud-go/issues/3174)) ([941ab02](https://www.github.com/googleapis/google-cloud-go/commit/941ab029ba6f7f33e8b2e31e3818aeb68312a999))
+* **internal/kokoro:** add ability to regen all DocFX YAML ([#3191](https://www.github.com/googleapis/google-cloud-go/issues/3191)) ([e12046b](https://www.github.com/googleapis/google-cloud-go/commit/e12046bc4431d33aee72c324e6eb5cc907a4214a))
+
+
+### Bug Fixes
+
+* **internal/godocfx:** filter out test packages from other modules ([#3197](https://www.github.com/googleapis/google-cloud-go/issues/3197)) ([1d397aa](https://www.github.com/googleapis/google-cloud-go/commit/1d397aa8b41f8f980cba1d3dcc50f11e4d4f4ca0))
+
+## [0.72.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.71.0...v0.72.0) (2020-11-10)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#3177](https://www.github.com/googleapis/google-cloud-go/issues/3177) [#3164](https://www.github.com/googleapis/google-cloud-go/issues/3164) [#3149](https://www.github.com/googleapis/google-cloud-go/issues/3149) [#3142](https://www.github.com/googleapis/google-cloud-go/issues/3142) [#3136](https://www.github.com/googleapis/google-cloud-go/issues/3136) [#3130](https://www.github.com/googleapis/google-cloud-go/issues/3130) [#3121](https://www.github.com/googleapis/google-cloud-go/issues/3121) [#3119](https://www.github.com/googleapis/google-cloud-go/issues/3119)
+
+
+### Bug Fixes
+
+* **all:** Update hand-written clients to not use WithEndpoint override ([#3111](https://www.github.com/googleapis/google-cloud-go/issues/3111)) ([f0cfd05](https://www.github.com/googleapis/google-cloud-go/commit/f0cfd0532f5204ff16f7bae406efa72603d16f44))
+* **internal/godocfx:** rename README files to pkg-readme ([#3185](https://www.github.com/googleapis/google-cloud-go/issues/3185)) ([d3a8571](https://www.github.com/googleapis/google-cloud-go/commit/d3a85719be411b692aede3331abb29b5a7b3da9a))
+
+
+## [0.71.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.70.0...v0.71.0) (2020-10-30)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#3115](https://www.github.com/googleapis/google-cloud-go/issues/3115) [#3106](https://www.github.com/googleapis/google-cloud-go/issues/3106) [#3102](https://www.github.com/googleapis/google-cloud-go/issues/3102) [#3083](https://www.github.com/googleapis/google-cloud-go/issues/3083) [#3073](https://www.github.com/googleapis/google-cloud-go/issues/3073) [#3057](https://www.github.com/googleapis/google-cloud-go/issues/3057) [#3044](https://www.github.com/googleapis/google-cloud-go/issues/3044)
+* **billing/budgets:** start generating apiv1 ([#3099](https://www.github.com/googleapis/google-cloud-go/issues/3099)) ([e760c85](https://www.github.com/googleapis/google-cloud-go/commit/e760c859de88a6e79b6dffc653dbf75f1630d8e3))
+* **internal:** auto-run godocfx on new mods ([#3069](https://www.github.com/googleapis/google-cloud-go/issues/3069)) ([49f497e](https://www.github.com/googleapis/google-cloud-go/commit/49f497eab80ce34dfb4ca41f033a5c0429ff5e42))
+* **pubsublite:** Added Pub/Sub Lite clients and routing headers ([#3105](https://www.github.com/googleapis/google-cloud-go/issues/3105)) ([98668fa](https://www.github.com/googleapis/google-cloud-go/commit/98668fa5457d26ed34debee708614f027020e5bc))
+* **pubsublite:** Message type and message routers ([#3077](https://www.github.com/googleapis/google-cloud-go/issues/3077)) ([179fc55](https://www.github.com/googleapis/google-cloud-go/commit/179fc550b545a5344358a243da7007ffaa7b5171))
+* **pubsublite:** Pub/Sub Lite admin client ([#3036](https://www.github.com/googleapis/google-cloud-go/issues/3036)) ([749473e](https://www.github.com/googleapis/google-cloud-go/commit/749473ead30bf1872634821d3238d1299b99acc6))
+* **pubsublite:** Publish settings and errors ([#3075](https://www.github.com/googleapis/google-cloud-go/issues/3075)) ([9eb9fcb](https://www.github.com/googleapis/google-cloud-go/commit/9eb9fcb79f17ad7c08c77c455ba3e8d89e3bdbf2))
+* **pubsublite:** Retryable stream wrapper ([#3068](https://www.github.com/googleapis/google-cloud-go/issues/3068)) ([97cfd45](https://www.github.com/googleapis/google-cloud-go/commit/97cfd4587f2f51996bd685ff486308b70eb51900))
+
+
+### Bug Fixes
+
+* **internal/kokoro:** remove unnecessary cd ([#3071](https://www.github.com/googleapis/google-cloud-go/issues/3071)) ([c1a4c3e](https://www.github.com/googleapis/google-cloud-go/commit/c1a4c3eaffcdc3cffe0e223fcfa1f60879cd23bb))
+* **pubsublite:** Disable integration tests for project id ([#3087](https://www.github.com/googleapis/google-cloud-go/issues/3087)) ([a0982f7](https://www.github.com/googleapis/google-cloud-go/commit/a0982f79d6461feabdf31363f29fed7dc5677fe7))
+
+## [0.70.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.70.0) (2020-10-19)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#3047](https://www.github.com/googleapis/google-cloud-go/issues/3047) [#3035](https://www.github.com/googleapis/google-cloud-go/issues/3035) [#3025](https://www.github.com/googleapis/google-cloud-go/issues/3025)
+* **managedidentities:** start generating apiv1 ([#3032](https://www.github.com/googleapis/google-cloud-go/issues/3032)) ([10ccca2](https://www.github.com/googleapis/google-cloud-go/commit/10ccca238074d24fea580a4cd8e64478818b0b44))
+* **pubsublite:** Types for resource paths and topic/subscription configs ([#3026](https://www.github.com/googleapis/google-cloud-go/issues/3026)) ([6f7fa86](https://www.github.com/googleapis/google-cloud-go/commit/6f7fa86ed906258f98d996aab40184f3a46f9714))
+
+## [0.69.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.69.1) (2020-10-14)
+
+This is an empty release that was created solely to aid in pubsublite's module
+carve out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## [0.69.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.68.0...v0.69.0) (2020-10-14)
+
+
+### Features
+
+* **accessapproval:** start generating apiv1 ([#3002](https://www.github.com/googleapis/google-cloud-go/issues/3002)) ([709d6e7](https://www.github.com/googleapis/google-cloud-go/commit/709d6e76393e6ac00ff488efd83bfe873173b045))
+* **all:** auto-regenerate gapics , refs [#3010](https://www.github.com/googleapis/google-cloud-go/issues/3010) [#3005](https://www.github.com/googleapis/google-cloud-go/issues/3005) [#2993](https://www.github.com/googleapis/google-cloud-go/issues/2993) [#2989](https://www.github.com/googleapis/google-cloud-go/issues/2989) [#2981](https://www.github.com/googleapis/google-cloud-go/issues/2981) [#2976](https://www.github.com/googleapis/google-cloud-go/issues/2976) [#2968](https://www.github.com/googleapis/google-cloud-go/issues/2968) [#2958](https://www.github.com/googleapis/google-cloud-go/issues/2958)
+* **cmd/go-cloud-debug-agent:** mark as deprecated ([#2964](https://www.github.com/googleapis/google-cloud-go/issues/2964)) ([276ec88](https://www.github.com/googleapis/google-cloud-go/commit/276ec88b05852c33a3ba437e18d072f7ffd8fd33))
+* **godocfx:** add nesting to TOC ([#2972](https://www.github.com/googleapis/google-cloud-go/issues/2972)) ([3a49b2d](https://www.github.com/googleapis/google-cloud-go/commit/3a49b2d142a353f98429235c3f380431430b4dbf))
+* **internal/godocfx:** HTML-ify package summary ([#2986](https://www.github.com/googleapis/google-cloud-go/issues/2986)) ([9e64b01](https://www.github.com/googleapis/google-cloud-go/commit/9e64b018255bd8d9b31d60e8f396966251de946b))
+* **internal/kokoro:** make publish_docs VERSION optional ([#2979](https://www.github.com/googleapis/google-cloud-go/issues/2979)) ([76e35f6](https://www.github.com/googleapis/google-cloud-go/commit/76e35f689cb60bd5db8e14b8c8d367c5902bcb0e))
+* **websecurityscanner:** start generating apiv1 ([#3006](https://www.github.com/googleapis/google-cloud-go/issues/3006)) ([1d92e20](https://www.github.com/googleapis/google-cloud-go/commit/1d92e2062a13f62d7a96be53a7354c0cacca6a85))
+
+
+### Bug Fixes
+
+* **godocfx:** make extra files optional, filter out third_party ([#2985](https://www.github.com/googleapis/google-cloud-go/issues/2985)) ([f268921](https://www.github.com/googleapis/google-cloud-go/commit/f2689214a24b2e325d3e8f54441bb11fbef925f0))
+
+## [0.68.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.67.0...v0.68.0) (2020-10-02)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#2952](https://www.github.com/googleapis/google-cloud-go/issues/2952) [#2944](https://www.github.com/googleapis/google-cloud-go/issues/2944) [#2935](https://www.github.com/googleapis/google-cloud-go/issues/2935)
+
+## [0.67.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.66.0...v0.67.0) (2020-09-29)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#2933](https://www.github.com/googleapis/google-cloud-go/issues/2933) [#2919](https://www.github.com/googleapis/google-cloud-go/issues/2919) [#2913](https://www.github.com/googleapis/google-cloud-go/issues/2913) [#2910](https://www.github.com/googleapis/google-cloud-go/issues/2910) [#2899](https://www.github.com/googleapis/google-cloud-go/issues/2899) [#2897](https://www.github.com/googleapis/google-cloud-go/issues/2897) [#2886](https://www.github.com/googleapis/google-cloud-go/issues/2886) [#2877](https://www.github.com/googleapis/google-cloud-go/issues/2877) [#2869](https://www.github.com/googleapis/google-cloud-go/issues/2869) [#2864](https://www.github.com/googleapis/google-cloud-go/issues/2864)
+* **assuredworkloads:** start generating apiv1beta1 ([#2866](https://www.github.com/googleapis/google-cloud-go/issues/2866)) ([7598c4d](https://www.github.com/googleapis/google-cloud-go/commit/7598c4dd2462e8270a2c7b1f496af58ca81ff568))
+* **dialogflow/cx:** start generating apiv3beta1 ([#2875](https://www.github.com/googleapis/google-cloud-go/issues/2875)) ([37ca93a](https://www.github.com/googleapis/google-cloud-go/commit/37ca93ad69eda363d956f0174d444ed5914f5a72))
+* **docfx:** add support for examples ([#2884](https://www.github.com/googleapis/google-cloud-go/issues/2884)) ([0cc0de3](https://www.github.com/googleapis/google-cloud-go/commit/0cc0de300d58be6d3b7eeb2f1baebfa6df076830))
+* **godocfx:** include README in output ([#2927](https://www.github.com/googleapis/google-cloud-go/issues/2927)) ([f084690](https://www.github.com/googleapis/google-cloud-go/commit/f084690a2ea08ce73bafaaced95ad271fd01e11e))
+* **talent:** start generating apiv4 ([#2871](https://www.github.com/googleapis/google-cloud-go/issues/2871)) ([5c98071](https://www.github.com/googleapis/google-cloud-go/commit/5c98071b03822c58862d1fa5442ff36d627f1a61))
+
+
+### Bug Fixes
+
+* **godocfx:** filter out other modules, sort pkgs ([#2894](https://www.github.com/googleapis/google-cloud-go/issues/2894)) ([868db45](https://www.github.com/googleapis/google-cloud-go/commit/868db45e2e6f4e9ad48432be86c849f335e1083d))
+* **godocfx:** shorten function names ([#2880](https://www.github.com/googleapis/google-cloud-go/issues/2880)) ([48a0217](https://www.github.com/googleapis/google-cloud-go/commit/48a0217930750c1f4327f2622b0f2a3ec8afc0b7))
+* **translate:** properly name examples ([#2892](https://www.github.com/googleapis/google-cloud-go/issues/2892)) ([c19e141](https://www.github.com/googleapis/google-cloud-go/commit/c19e1415e6fa76b7ea66a7fc67ad3ba22670a2ba)), refs [#2883](https://www.github.com/googleapis/google-cloud-go/issues/2883)
+
+## [0.66.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.65.0...v0.66.0) (2020-09-15)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#2849](https://www.github.com/googleapis/google-cloud-go/issues/2849) [#2843](https://www.github.com/googleapis/google-cloud-go/issues/2843) [#2841](https://www.github.com/googleapis/google-cloud-go/issues/2841) [#2819](https://www.github.com/googleapis/google-cloud-go/issues/2819) [#2816](https://www.github.com/googleapis/google-cloud-go/issues/2816) [#2809](https://www.github.com/googleapis/google-cloud-go/issues/2809) [#2801](https://www.github.com/googleapis/google-cloud-go/issues/2801) [#2795](https://www.github.com/googleapis/google-cloud-go/issues/2795) [#2791](https://www.github.com/googleapis/google-cloud-go/issues/2791) [#2788](https://www.github.com/googleapis/google-cloud-go/issues/2788) [#2781](https://www.github.com/googleapis/google-cloud-go/issues/2781)
+* **analytics/data:** start generating apiv1alpha ([#2796](https://www.github.com/googleapis/google-cloud-go/issues/2796)) ([e93132c](https://www.github.com/googleapis/google-cloud-go/commit/e93132c77725de3c80c34d566df269eabfcfde93))
+* **area120/tables:** start generating apiv1alpha1 ([#2807](https://www.github.com/googleapis/google-cloud-go/issues/2807)) ([9e5a4d0](https://www.github.com/googleapis/google-cloud-go/commit/9e5a4d0dee0d83be0c020797a2f579d9e42ef521))
+* **cloudbuild:** Start generating apiv1/v3 ([#2830](https://www.github.com/googleapis/google-cloud-go/issues/2830)) ([358a536](https://www.github.com/googleapis/google-cloud-go/commit/358a5368da64cf4868551652e852ceb453504f64))
+* **godocfx:** create Go DocFX YAML generator ([#2854](https://www.github.com/googleapis/google-cloud-go/issues/2854)) ([37c70ac](https://www.github.com/googleapis/google-cloud-go/commit/37c70acd91768567106ff3b2b130835998d974c5))
+* **security/privateca:** start generating apiv1beta1 ([#2806](https://www.github.com/googleapis/google-cloud-go/issues/2806)) ([f985141](https://www.github.com/googleapis/google-cloud-go/commit/f9851412183989dc69733a7e61ad39a9378cd893))
+* **video/transcoder:** start generating apiv1beta1 ([#2797](https://www.github.com/googleapis/google-cloud-go/issues/2797)) ([390dda8](https://www.github.com/googleapis/google-cloud-go/commit/390dda8ff2c526e325e434ad0aec778b7aa97ea4))
+* **workflows:** start generating apiv1beta ([#2799](https://www.github.com/googleapis/google-cloud-go/issues/2799)) ([0e39665](https://www.github.com/googleapis/google-cloud-go/commit/0e39665ccb788caec800e2887d433ca6e0cf9901))
+* **workflows/executions:** start generating apiv1beta ([#2800](https://www.github.com/googleapis/google-cloud-go/issues/2800)) ([7eaa0d1](https://www.github.com/googleapis/google-cloud-go/commit/7eaa0d184c6a2141d8bf4514b3fd20715b50a580))
+
+
+### Bug Fixes
+
+* **internal/kokoro:** install the right version of docuploader ([#2861](https://www.github.com/googleapis/google-cloud-go/issues/2861)) ([d8489c1](https://www.github.com/googleapis/google-cloud-go/commit/d8489c141b8b02e83d6426f4baebd3658ae11639))
+* **internal/kokoro:** remove extra dash in doc tarball ([#2862](https://www.github.com/googleapis/google-cloud-go/issues/2862)) ([690ddcc](https://www.github.com/googleapis/google-cloud-go/commit/690ddccc5202b5a70f1afa5c518dca37b6a0861c))
+* **profiler:** do not collect disabled profile types ([#2836](https://www.github.com/googleapis/google-cloud-go/issues/2836)) ([faeb498](https://www.github.com/googleapis/google-cloud-go/commit/faeb4985bf6afdcddba4553efa874642bf7f08ed)), refs [#2835](https://www.github.com/googleapis/google-cloud-go/issues/2835)
+
+
+### Reverts
+
+* **cloudbuild): "feat(cloudbuild:** Start generating apiv1/v3" ([#2840](https://www.github.com/googleapis/google-cloud-go/issues/2840)) ([3aaf755](https://www.github.com/googleapis/google-cloud-go/commit/3aaf755476dfea1700986fc086f53fc1ab756557))
+
+## [0.65.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.64.0...v0.65.0) (2020-08-27)
+
+
+### Announcements
+
+The following changes will be included in an upcoming release and are not 
+included in this one.
+
+#### Default Deadlines
+
+By default, non-streaming methods, like Create or Get methods, will have a
+default deadline applied to the context provided at call time, unless a context
+deadline is already set. Streaming methods have no default deadline and will run
+indefinitely, unless the context provided at call time contains a deadline.
+
+To opt-out of this behavior, set the environment variable
+`GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE` to `true` prior to
+initializing a client. This opt-out mechanism will be removed in a later
+release, with a notice similar to this one ahead of its removal.
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#2774](https://www.github.com/googleapis/google-cloud-go/issues/2774) [#2764](https://www.github.com/googleapis/google-cloud-go/issues/2764)
+
+
+### Bug Fixes
+
+* **all:** correct minor typos ([#2756](https://www.github.com/googleapis/google-cloud-go/issues/2756)) ([03d78b5](https://www.github.com/googleapis/google-cloud-go/commit/03d78b5627819cb64d1f3866f90043f709e825e1))
+* **compute/metadata:** remove leading slash for Get suffix ([#2760](https://www.github.com/googleapis/google-cloud-go/issues/2760)) ([f0d605c](https://www.github.com/googleapis/google-cloud-go/commit/f0d605ccf32391a9da056a2c551158bd076c128d))
+
+## [0.64.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.63.0...v0.64.0) (2020-08-18)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#2734](https://www.github.com/googleapis/google-cloud-go/issues/2734) [#2731](https://www.github.com/googleapis/google-cloud-go/issues/2731) [#2730](https://www.github.com/googleapis/google-cloud-go/issues/2730) [#2725](https://www.github.com/googleapis/google-cloud-go/issues/2725) [#2722](https://www.github.com/googleapis/google-cloud-go/issues/2722) [#2706](https://www.github.com/googleapis/google-cloud-go/issues/2706)
+* **pubsublite:** start generating v1 ([#2700](https://www.github.com/googleapis/google-cloud-go/issues/2700)) ([d2e777f](https://www.github.com/googleapis/google-cloud-go/commit/d2e777f56e08146646b3ffb7a78856795094ab4e))
+
+## [0.63.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.62.0...v0.63.0) (2020-08-05)
+
+
+### Features
+
+* **all:** auto-regenerate gapics ([#2682](https://www.github.com/googleapis/google-cloud-go/issues/2682)) ([63bfd63](https://www.github.com/googleapis/google-cloud-go/commit/63bfd638da169e0f1f4fa4a5125da2955022dc04))
+* **analytics/admin:** start generating apiv1alpha ([#2670](https://www.github.com/googleapis/google-cloud-go/issues/2670)) ([268199e](https://www.github.com/googleapis/google-cloud-go/commit/268199e5350a64a83ecf198e0e0fa4863f00fa6c))
+* **functions/metadata:** Special-case marshaling ([#2669](https://www.github.com/googleapis/google-cloud-go/issues/2669)) ([d8d7fc6](https://www.github.com/googleapis/google-cloud-go/commit/d8d7fc66cbc42f79bec25fb0daaf53d926e3645b))
+* **gaming:** start generate apiv1 ([#2681](https://www.github.com/googleapis/google-cloud-go/issues/2681)) ([1adfd0a](https://www.github.com/googleapis/google-cloud-go/commit/1adfd0aed6b2c0e1dd0c575a5ec0f49388fa5601))
+* **internal/kokoro:** add script to test compatibility with samples ([#2637](https://www.github.com/googleapis/google-cloud-go/issues/2637)) ([f2aa76a](https://www.github.com/googleapis/google-cloud-go/commit/f2aa76a0058e86c1c33bb634d2c084b58f77ab32))
+
+## v0.62.0
+
+### Announcements
+
+- There was a breaking change to `cloud.google.com/go/dataproc/apiv1` that was
+  merged in [this PR](https://github.com/googleapis/google-cloud-go/pull/2606).
+  This fixed a broken API response for `DiagnoseCluster`. When polling on the
+  Long Running Operation(LRO), the API now returns
+  `(*dataprocpb.DiagnoseClusterResults, error)` whereas it only returned an
+  `error` before.
+
+### Changes
+
+- all:
+  - Updated all direct dependencies.
+  - Updated contributing guidelines to suggest allowing edits from maintainers.
+- billing/budgets:
+  - Start generating client for apiv1beta1.
+- functions:
+  - Start generating client for apiv1.
+- notebooks:
+  - Start generating client apiv1beta1.
+- profiler:
+  - update proftest to support parsing floating-point backoff durations.
+  - Fix the regexp used to parse backoff duration.
+- Various updates to autogenerated clients.
+
+## v0.61.0
+
+### Changes
+
+- all:
+  - Update all direct dependencies.
+- dashboard:
+  - Start generating client for apiv1.
+- policytroubleshooter:
+  - Start generating client for apiv1.
+- profiler:
+  - Disable OpenCensus Telemetry for requests made by the profiler package by default. You can re-enable it using `profiler.Config.EnableOCTelemetry`.
+- Various updates to autogenerated clients.
+
+## v0.60.0
+
+### Changes
+
+- all:
+  - Refactored examples to reduce module dependencies.
+  - Update sub-modules to use cloud.google.com/go v0.59.0.
+- internal:
+  - Start generating client for gaming apiv1beta.
+- Various updates to autogenerated clients.
+
 ## v0.59.0
 
 ### Announcements
@@ -1538,5 +1978,3 @@ Natural Language.
 [`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging).
 This client uses gRPC as its transport layer, and supports log reading, sinks
 and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly.
-
-

+ 65 - 12
vendor/cloud.google.com/go/CONTRIBUTING.md

@@ -21,21 +21,25 @@
     `cd google-cloud-go`
 
 1. Fork the repo.
-   
+
 1. Set your fork as a remote:
     `git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git`
 
-1. Make changes (see [Formatting](#formatting) and [Style](#style)), commit to
-   your fork.
+1. Make changes, commit to your fork.
 
    Commit messages should follow the
-   [Go project style](https://github.com/golang/go/wiki/CommitMessage). For example:
+   [Conventional Commits Style](https://www.conventionalcommits.org). The scope
+   portion should always be filled with the name of the package affected by the
+   changes being made. For example:
    ```
-   functions: add gophers codelab
+   feat(functions): add gophers codelab
    ```
 
 1. Send a pull request with your changes.
 
+   To minimize friction, consider setting `Allow edits from maintainers` on the
+   PR, which will enable project committers and automation to update your PR.
+
 1. A maintainer will review the pull request and make comments.
 
    Prefer adding additional commits over amending and force-pushing since it can
@@ -43,7 +47,13 @@
 
    Commits will be squashed when they're merged.
 
-## Integration Tests
+## Testing
+
+We test code against two versions of Go, the minimum and maximum versions
+supported by our clients. To see which versions these are checkout our
+[README](README.md#supported-versions).
+
+### Integration Tests
 
 In addition to the unit tests, you may run the integration test suite. These
 directions describe setting up your environment to run integration tests for
@@ -93,7 +103,8 @@ Next, ensure the following APIs are enabled in the general project:
 - Google Compute Engine Instance Group Updater API
 - Google Compute Engine Instance Groups API
 - Kubernetes Engine API
-- Stackdriver Error Reporting API
+- Cloud Error Reporting API
+- Pub/Sub Lite API
 
 Next, create a Datastore database in the general project, and a Firestore
 database in the Firestore project.
@@ -118,10 +129,13 @@ project's service account.
 (e.g. doorway-cliff-677) for the Firestore project.
 - `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the
 Firestore project's service account.
+- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API created above.
+
+As part of the setup that follows, the following variables will be configured:
+
 - `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
 in the form
 "projects/P/locations/L/keyRings/R". The creation of this is described below.
-- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API.
 - `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.
 
 Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
@@ -140,7 +154,7 @@ $ gcloud auth login
 $ gcloud datastore indexes create datastore/testdata/index.yaml
 
 # Creates a Google Cloud storage bucket with the same name as your test project,
-# and with the Stackdriver Logging service account as owner, for the sink
+# and with the Cloud Logging service account as owner, for the sink
 # integration tests in logging.
 $ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
 $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
@@ -148,7 +162,7 @@ $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
 # Creates a PubSub topic for integration tests of storage notifications.
 $ gcloud beta pubsub topics create go-storage-notification-test
 # Next, go to the Pub/Sub dashboard in GCP console. Authorize the user
-# "service-<numberic project id>@gs-project-accounts.iam.gserviceaccount.com"
+# "service-<numeric project id>@gs-project-accounts.iam.gserviceaccount.com"
 # as a publisher to that topic.
 
 # Creates a Spanner instance for the spanner integration tests.
@@ -167,7 +181,38 @@ $ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --pu
 # Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
 $ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
 # Authorizes Google Cloud Storage to encrypt and decrypt using key1.
-gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
+$ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
+```
+
+It may be useful to add exports to your shell initialization for future use.
+For instance, in `.zshrc`:
+
+```sh
+#### START GO SDK Test Variables
+# Developers Console project's ID (e.g. bamboo-shift-455) for the general project.
+export GCLOUD_TESTS_GOLANG_PROJECT_ID=your-project
+
+# The path to the JSON key file of the general project's service account.
+export GCLOUD_TESTS_GOLANG_KEY=~/directory/your-project-abcd1234.json
+
+# Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project.
+export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project
+
+# The path to the JSON key file of the Firestore project's service account.
+export GCLOUD_TESTS_GOLANG_FIRESTORE_KEY=~/directory/your-firestore-project-abcd1234.json
+
+# The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R".
+# The creation of this is described below.
+export MY_KEYRING=my-golang-sdk-test
+export MY_LOCATION=global
+export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
+
+# API key for using the Translate API.
+export GCLOUD_TESTS_API_KEY=abcdefghijk123456789
+
+# Compute Engine zone. (https://cloud.google.com/compute/docs/regions-zones)
+export GCLOUD_TESTS_GOLANG_ZONE=your-chosen-region
+#### END GO SDK Test Variables
 ```
 
 #### Running
@@ -176,7 +221,15 @@ Once you've done the necessary setup, you can run the integration tests by
 running:
 
 ``` sh
-$ go test -v cloud.google.com/go/...
+$ go test -v ./...
+```
+
+Note that the above command will not run the tests in other modules. To run
+tests on other modules, first navigate to the appropriate
+subdirectory. For instance, to run only the tests for datastore:
+``` sh
+$ cd datastore
+$ go test -v ./...
 ```
 
 #### Replay

+ 55 - 48
vendor/cloud.google.com/go/README.md

@@ -1,6 +1,6 @@
 # Google Cloud Client Libraries for Go
 
-[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://pkg.go.dev/cloud.google.com/go)
+[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go.svg)](https://pkg.go.dev/cloud.google.com/go)
 
 Go packages for [Google Cloud Platform](https://cloud.google.com) services.
 
@@ -25,52 +25,51 @@ To install the packages on your system, *do not clone the repo*. Instead:
 **NOTE:** Some of these packages are under development, and may occasionally
 make backwards-incompatible changes.
 
-**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
-
 ## Supported APIs
 
-Google API                                      | Status       | Package
-------------------------------------------------|--------------|-----------------------------------------------------------
-[Asset][cloud-asset]                            | stable       | [`cloud.google.com/go/asset/apiv1`](https://pkg.go.dev/cloud.google.com/go/asset/v1beta)
-[Automl][cloud-automl]                          | stable       | [`cloud.google.com/go/automl/apiv1`](https://pkg.go.dev/cloud.google.com/go/automl/apiv1)
-[BigQuery][cloud-bigquery]                      | stable       | [`cloud.google.com/go/bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery)
-[Bigtable][cloud-bigtable]                      | stable       | [`cloud.google.com/go/bigtable`](https://pkg.go.dev/cloud.google.com/go/bigtable)
-[Cloudbuild][cloud-build]                       | stable       | [`cloud.google.com/go/cloudbuild/apiv1`](https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1)
-[Cloudtasks][cloud-tasks]                       | stable       | [`cloud.google.com/go/cloudtasks/apiv2`](https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2)
-[Container][cloud-container]                    | stable       | [`cloud.google.com/go/container/apiv1`](https://pkg.go.dev/cloud.google.com/go/container/apiv1)
-[ContainerAnalysis][cloud-containeranalysis]    | beta         | [`cloud.google.com/go/containeranalysis/apiv1`](https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1)
-[Dataproc][cloud-dataproc]                      | stable       | [`cloud.google.com/go/dataproc/apiv1`](https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1)
-[Datastore][cloud-datastore]                    | stable       | [`cloud.google.com/go/datastore`](https://pkg.go.dev/cloud.google.com/go/datastore)
-[Debugger][cloud-debugger]                      | stable       | [`cloud.google.com/go/debugger/apiv2`](https://pkg.go.dev/cloud.google.com/go/debugger/apiv2)
-[Dialogflow][cloud-dialogflow]                  | stable       | [`cloud.google.com/go/dialogflow/apiv2`](https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2)
-[Data Loss Prevention][cloud-dlp]               | stable       | [`cloud.google.com/go/dlp/apiv2`](https://pkg.go.dev/cloud.google.com/go/dlp/apiv2)
-[ErrorReporting][cloud-errors]                  | alpha        | [`cloud.google.com/go/errorreporting`](https://pkg.go.dev/cloud.google.com/go/errorreporting)
-[Firestore][cloud-firestore]                    | stable       | [`cloud.google.com/go/firestore`](https://pkg.go.dev/cloud.google.com/go/firestore)
-[IAM][cloud-iam]                                | stable       | [`cloud.google.com/go/iam`](https://pkg.go.dev/cloud.google.com/go/iam)
-[IoT][cloud-iot]                                | stable       | [`cloud.google.com/go/iot/apiv1`](https://pkg.go.dev/cloud.google.com/go/iot/apiv1)
-[IRM][cloud-irm]                                | alpha        | [`cloud.google.com/go/irm/apiv1alpha2`](https://pkg.go.dev/cloud.google.com/go/irm/apiv1alpha2)
-[KMS][cloud-kms]                                | stable       | [`cloud.google.com/go/kms/apiv1`](https://pkg.go.dev/cloud.google.com/go/kms/apiv1)
-[Natural Language][cloud-natural-language]      | stable       | [`cloud.google.com/go/language/apiv1`](https://pkg.go.dev/cloud.google.com/go/language/apiv1)
-[Logging][cloud-logging]                        | stable       | [`cloud.google.com/go/logging`](https://pkg.go.dev/cloud.google.com/go/logging)
-[Memorystore][cloud-memorystore]                | alpha        | [`cloud.google.com/go/redis/apiv1`](https://pkg.go.dev/cloud.google.com/go/redis/apiv1)
-[Monitoring][cloud-monitoring]                  | stable       | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3)
-[OS Login][cloud-oslogin]                       | stable       | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1)
-[Pub/Sub][cloud-pubsub]                         | stable       | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub)
-[Phishing Protection][cloud-phishingprotection] | alpha        | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1)
-[reCAPTCHA Enterprise][cloud-recaptcha]         | alpha        | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1)
-[Recommender][cloud-recommender]                | beta         | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1)
-[Scheduler][cloud-scheduler]                    | stable       | [`cloud.google.com/go/scheduler/apiv1`](https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1)
-[Securitycenter][cloud-securitycenter]          | stable       | [`cloud.google.com/go/securitycenter/apiv1`](https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1)
-[Spanner][cloud-spanner]                        | stable       | [`cloud.google.com/go/spanner`](https://pkg.go.dev/cloud.google.com/go/spanner)
-[Speech][cloud-speech]                          | stable       | [`cloud.google.com/go/speech/apiv1`](https://pkg.go.dev/cloud.google.com/go/speech/apiv1)
-[Storage][cloud-storage]                        | stable       | [`cloud.google.com/go/storage`](https://pkg.go.dev/cloud.google.com/go/storage)
-[Talent][cloud-talent]                          | alpha        | [`cloud.google.com/go/talent/apiv4beta1`](https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1)
-[Text To Speech][cloud-texttospeech]            | stable       | [`cloud.google.com/go/texttospeech/apiv1`](https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1)
-[Trace][cloud-trace]                            | stable       | [`cloud.google.com/go/trace/apiv2`](https://pkg.go.dev/cloud.google.com/go/trace/apiv2)
-[Translate][cloud-translate]                    | stable       | [`cloud.google.com/go/translate`](https://pkg.go.dev/cloud.google.com/go/translate)
-[Video Intelligence][cloud-video]               | beta         | [`cloud.google.com/go/videointelligence/apiv1beta2`](https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2)
-[Vision][cloud-vision]                          | stable       | [`cloud.google.com/go/vision/apiv1`](https://pkg.go.dev/cloud.google.com/go/vision/apiv1)
-[Webrisk][cloud-webrisk]                        | alpha        | [`cloud.google.com/go/webrisk/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1)
+| Google API                                      | Status | Package                                                                                                                       |
+| ----------------------------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
+| [Asset][cloud-asset]                            | stable | [`cloud.google.com/go/asset/apiv1`](https://pkg.go.dev/cloud.google.com/go/asset/v1beta)                                      |
+| [Automl][cloud-automl]                          | stable | [`cloud.google.com/go/automl/apiv1`](https://pkg.go.dev/cloud.google.com/go/automl/apiv1)                                     |
+| [BigQuery][cloud-bigquery]                      | stable | [`cloud.google.com/go/bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery)                                             |
+| [Bigtable][cloud-bigtable]                      | stable | [`cloud.google.com/go/bigtable`](https://pkg.go.dev/cloud.google.com/go/bigtable)                                             |
+| [Cloudbuild][cloud-build]                       | stable | [`cloud.google.com/go/cloudbuild/apiv1`](https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1)                             |
+| [Cloudtasks][cloud-tasks]                       | stable | [`cloud.google.com/go/cloudtasks/apiv2`](https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2)                             |
+| [Container][cloud-container]                    | stable | [`cloud.google.com/go/container/apiv1`](https://pkg.go.dev/cloud.google.com/go/container/apiv1)                               |
+| [ContainerAnalysis][cloud-containeranalysis]    | beta   | [`cloud.google.com/go/containeranalysis/apiv1`](https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1)               |
+| [Dataproc][cloud-dataproc]                      | stable | [`cloud.google.com/go/dataproc/apiv1`](https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1)                                 |
+| [Datastore][cloud-datastore]                    | stable | [`cloud.google.com/go/datastore`](https://pkg.go.dev/cloud.google.com/go/datastore)                                           |
+| [Debugger][cloud-debugger]                      | stable | [`cloud.google.com/go/debugger/apiv2`](https://pkg.go.dev/cloud.google.com/go/debugger/apiv2)                                 |
+| [Dialogflow][cloud-dialogflow]                  | stable | [`cloud.google.com/go/dialogflow/apiv2`](https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2)                             |
+| [Data Loss Prevention][cloud-dlp]               | stable | [`cloud.google.com/go/dlp/apiv2`](https://pkg.go.dev/cloud.google.com/go/dlp/apiv2)                                           |
+| [ErrorReporting][cloud-errors]                  | alpha  | [`cloud.google.com/go/errorreporting`](https://pkg.go.dev/cloud.google.com/go/errorreporting)                                 |
+| [Firestore][cloud-firestore]                    | stable | [`cloud.google.com/go/firestore`](https://pkg.go.dev/cloud.google.com/go/firestore)                                           |
+| [IAM][cloud-iam]                                | stable | [`cloud.google.com/go/iam`](https://pkg.go.dev/cloud.google.com/go/iam)                                                       |
+| [IoT][cloud-iot]                                | stable | [`cloud.google.com/go/iot/apiv1`](https://pkg.go.dev/cloud.google.com/go/iot/apiv1)                                           |
+| [IRM][cloud-irm]                                | alpha  | [`cloud.google.com/go/irm/apiv1alpha2`](https://pkg.go.dev/cloud.google.com/go/irm/apiv1alpha2)                               |
+| [KMS][cloud-kms]                                | stable | [`cloud.google.com/go/kms/apiv1`](https://pkg.go.dev/cloud.google.com/go/kms/apiv1)                                           |
+| [Natural Language][cloud-natural-language]      | stable | [`cloud.google.com/go/language/apiv1`](https://pkg.go.dev/cloud.google.com/go/language/apiv1)                                 |
+| [Logging][cloud-logging]                        | stable | [`cloud.google.com/go/logging`](https://pkg.go.dev/cloud.google.com/go/logging)                                               |
+| [Memorystore][cloud-memorystore]                | alpha  | [`cloud.google.com/go/redis/apiv1`](https://pkg.go.dev/cloud.google.com/go/redis/apiv1)                                       |
+| [Monitoring][cloud-monitoring]                  | stable | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3)                             |
+| [OS Login][cloud-oslogin]                       | stable | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1)                                   |
+| [Pub/Sub][cloud-pubsub]                         | stable | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub)                                                 |
+| [Pub/Sub Lite][cloud-pubsublite]                | beta   | [`cloud.google.com/go/pubsublite`](https://pkg.go.dev/cloud.google.com/go/pubsublite)                                                 |
+| [Phishing Protection][cloud-phishingprotection] | alpha  | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1)   |
+| [reCAPTCHA Enterprise][cloud-recaptcha]         | alpha  | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1) |
+| [Recommender][cloud-recommender]                | beta   | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1)                 |
+| [Scheduler][cloud-scheduler]                    | stable | [`cloud.google.com/go/scheduler/apiv1`](https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1)                               |
+| [Securitycenter][cloud-securitycenter]          | stable | [`cloud.google.com/go/securitycenter/apiv1`](https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1)                     |
+| [Spanner][cloud-spanner]                        | stable | [`cloud.google.com/go/spanner`](https://pkg.go.dev/cloud.google.com/go/spanner)                                               |
+| [Speech][cloud-speech]                          | stable | [`cloud.google.com/go/speech/apiv1`](https://pkg.go.dev/cloud.google.com/go/speech/apiv1)                                     |
+| [Storage][cloud-storage]                        | stable | [`cloud.google.com/go/storage`](https://pkg.go.dev/cloud.google.com/go/storage)                                               |
+| [Talent][cloud-talent]                          | alpha  | [`cloud.google.com/go/talent/apiv4beta1`](https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1)                           |
+| [Text To Speech][cloud-texttospeech]            | stable | [`cloud.google.com/go/texttospeech/apiv1`](https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1)                         |
+| [Trace][cloud-trace]                            | stable | [`cloud.google.com/go/trace/apiv2`](https://pkg.go.dev/cloud.google.com/go/trace/apiv2)                                       |
+| [Translate][cloud-translate]                    | stable | [`cloud.google.com/go/translate`](https://pkg.go.dev/cloud.google.com/go/translate)                                           |
+| [Video Intelligence][cloud-video]               | beta   | [`cloud.google.com/go/videointelligence/apiv1beta2`](https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2)     |
+| [Vision][cloud-vision]                          | stable | [`cloud.google.com/go/vision/apiv1`](https://pkg.go.dev/cloud.google.com/go/vision/apiv1)                                     |
+| [Webrisk][cloud-webrisk]                        | alpha  | [`cloud.google.com/go/webrisk/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1)                         |
 
 > **Alpha status**: the API is still being actively developed. As a
 > result, it might change in backward-incompatible ways and is not recommended
@@ -85,10 +84,9 @@ Google API                                      | Status       | Package
 
 Documentation and examples are available at [pkg.go.dev/cloud.google.com/go](https://pkg.go.dev/cloud.google.com/go)
 
-## Go Versions Supported
+## [Go Versions Supported](#supported-versions)
 
-We support the two most recent major versions of Go. If Google App Engine uses
-an older version, we support that as well.
+We currently support Go versions 1.11 and newer.
 
 ## Authorization
 
@@ -153,6 +151,7 @@ for more information.
 [cloud-irm]: https://cloud.google.com/incident-response/docs/concepts
 [cloud-kms]: https://cloud.google.com/kms/
 [cloud-pubsub]: https://cloud.google.com/pubsub/
+[cloud-pubsublite]: https://cloud.google.com/pubsub/lite
 [cloud-storage]: https://cloud.google.com/storage/
 [cloud-language]: https://cloud.google.com/natural-language
 [cloud-logging]: https://cloud.google.com/logging/
@@ -176,3 +175,11 @@ for more information.
 [cloud-video]: https://cloud.google.com/video-intelligence/
 [cloud-vision]: https://cloud.google.com/vision
 [cloud-webrisk]: https://cloud.google.com/web-risk/
+
+## Links
+
+- [Go on Google Cloud](https://cloud.google.com/go/home)
+- [Getting started with Go on Google Cloud](https://cloud.google.com/go/getting-started)
+- [App Engine Quickstart](https://cloud.google.com/appengine/docs/standard/go/quickstart)
+- [Cloud Functions Quickstart](https://cloud.google.com/functions/docs/quickstart-go)
+- [Cloud Run Quickstart](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#go)

+ 64 - 51
vendor/cloud.google.com/go/RELEASING.md

@@ -1,25 +1,6 @@
-# Setup from scratch
+# Releasing
 
-1. [Install Go](https://golang.org/dl/).
-    1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`)
-    is in your `PATH`.
-    1. Check it's working by running `go version`.
-        * If it doesn't work, check the install location, usually
-        `/usr/local/go`, is on your `PATH`.
-
-1. Sign one of the
-[contributor license agreements](#contributor-license-agreements) below.
-
-1. Clone the repo:
-    `git clone https://github.com/googleapis/google-cloud-go`
-
-1. Change into the checked out source:
-    `cd google-cloud-go`
-
-1. Fork the repo and add your fork as a secondary remote (this is necessary in
-   order to create PRs).
-
-# Which module to release?
+## Determine which module to release
 
 The Go client libraries have several modules. Each module does not strictly
 correspond to a single library - they correspond to trees of directories. If a
@@ -27,17 +8,22 @@ file needs to be released, you must release the closest ancestor module.
 
 To see all modules:
 
-```
+```bash
 $ cat `find . -name go.mod` | grep module
+module cloud.google.com/go/pubsub
+module cloud.google.com/go/spanner
 module cloud.google.com/go
 module cloud.google.com/go/bigtable
-module cloud.google.com/go/firestore
 module cloud.google.com/go/bigquery
 module cloud.google.com/go/storage
-module cloud.google.com/go/datastore
-module cloud.google.com/go/pubsub
-module cloud.google.com/go/spanner
+module cloud.google.com/go/pubsublite
+module cloud.google.com/go/firestore
 module cloud.google.com/go/logging
+module cloud.google.com/go/internal/gapicgen
+module cloud.google.com/go/internal/godocfx
+module cloud.google.com/go/internal/examples/fake
+module cloud.google.com/go/internal/examples/mock
+module cloud.google.com/go/datastore
 ```
 
 The `cloud.google.com/go` is the repository root module. Each other module is
@@ -53,18 +39,47 @@ of the `cloud.google.com/go` repository root module. Note: releasing
 `cloud.google.com/go` has no impact on any of the submodules, and vice-versa.
 They are released entirely independently.
 
-# Test failures
+## Test failures
 
 If there are any test failures in the Kokoro build, releases are blocked until
 the failures have been resolved.
 
-# How to release `cloud.google.com/go`
+## How to release
 
+### Automated Releases (`cloud.google.com/go` and submodules)
+
+We now use [release-please](https://github.com/googleapis/release-please) to
+perform automated releases for `cloud.google.com/go` and all submodules.
+
+1. If there are changes that have not yet been released, a
+   [pull request](https://github.com/googleapis/google-cloud-go/pull/2971) will
+   be automatically opened by release-please
+   with a title like "chore: release X.Y.Z" (for the root module) or 
+   "chore: release datastore X.Y.Z" (for the datastore submodule), where X.Y.Z 
+   is the next version to be released. Find the desired pull request
+   [here](https://github.com/googleapis/google-cloud-go/pulls)
 1. Check for failures in the
-   [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any
-   failures in the most recent build, address them before proceeding with the
-   release.
-1. Navigate to `~/code/gocloud/` and switch to master.
+   [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
+   any failures in the most recent build, address them before proceeding with
+   the release. (This applies even if the failures are in a different submodule
+   from the one being released.)
+1. Review the release notes. These are automatically generated from the titles
+   of any merged commits since the previous release. If you would like to edit
+   them, this can be done by updating the changes in the release PR.
+1. To cut a release, approve and merge the pull request. Doing so will
+   update the `CHANGES.md`, tag the merged commit with the appropriate version,
+   and draft a GitHub release which will copy the notes from `CHANGES.md`.
+
+### Manual Release (`cloud.google.com/go`)
+
+If for whatever reason the automated release process is not working as expected,
+here is how to manually cut a release of `cloud.google.com/go`.
+
+1. Check for failures in the
+   [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
+   any failures in the most recent build, address them before proceeding with
+   the release.
+1. Navigate to `google-cloud-go/` and switch to master.
 1. `git pull`
 1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases.
    The current latest tag `$CV` is the largest tag. It should look something
@@ -76,8 +91,11 @@ the failures have been resolved.
    (the `git log` is going to show you things in submodules, which are not going
    to be part of your release).
 1. Edit `CHANGES.md` to include a summary of the changes.
-1. `cd internal/version && go generate && cd -`
-1. Commit the changes, push to your fork, and create a PR.
+1. In `internal/version/version.go`, update `const Repo` to today's date with
+   the format `YYYYMMDD`.
+1. In `internal/version` run `go generate`.
+1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
+   and create a PR titled `chore: release $NV`.
 1. Wait for the PR to be reviewed and merged. Once it's merged, and without
    merging any other PRs in the meantime:
    a. Switch to master.
@@ -85,24 +103,22 @@ the failures have been resolved.
    c. Tag the repo with the next version: `git tag $NV`.
    d. Push the tag to origin:
       `git push origin $NV`
-2. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
+1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
    with the new release, copying the contents of `CHANGES.md`.
 
-# How to release a submodule
-
-We have several submodules, including `cloud.google.com/go/logging`,
-`cloud.google.com/go/datastore`, and so on.
+### Manual Releases (submodules)
 
-To release a submodule:
+If for whatever reason the automated release process is not working as expected,
+here is how to manually cut a release of a submodule.
 
 (these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly)
 
 1. Check for failures in the
-   [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any
-   failures in the most recent build, address them before proceeding with the
-   release. (This applies even if the failures are in a different submodule from the one
-   being released.)
-1. Navigate to `~/code/gocloud/` and switch to master.
+   [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
+   any failures in the most recent build, address them before proceeding with
+   the release. (This applies even if the failures are in a different submodule
+   from the one being released.)
+1. Navigate to `google-cloud-go/` and switch to master.
 1. `git pull`
 1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all
    existing releases. The current latest tag `$CV` is the largest tag. It
@@ -111,8 +127,9 @@ To release a submodule:
 1. On master, run `git log $CV.. -- datastore/` to list all the changes to the
    submodule directory since the last release.
 1. Edit `datastore/CHANGES.md` to include a summary of the changes.
-1. `cd internal/version && go generate && cd -`
-1. Commit the changes, push to your fork, and create a PR.
+1. In `internal/version` run `go generate`.
+1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
+   and create a PR titled `chore(datastore): release $NV`.
 1. Wait for the PR to be reviewed and merged. Once it's merged, and without
    merging any other PRs in the meantime:
    a. Switch to master.
@@ -122,7 +139,3 @@ To release a submodule:
       `git push origin $NV`
 1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
    with the new release, copying the contents of `datastore/CHANGES.md`.
-
-# Appendix
-
-1: This should get better as submodule tooling matures.

+ 1 - 0
vendor/cloud.google.com/go/compute/metadata/metadata.go

@@ -296,6 +296,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
 		// being stable anyway.
 		host = metadataIP
 	}
+	suffix = strings.TrimLeft(suffix, "/")
 	u := "http://" + host + "/computeMetadata/v1/" + suffix
 	req, err := http.NewRequest("GET", u, nil)
 	if err != nil {

+ 26 - 3
vendor/cloud.google.com/go/doc.go

@@ -34,9 +34,18 @@ in this package for details.
 
 Timeouts and Cancellation
 
-By default, all requests in sub-packages will run indefinitely, retrying on transient
-errors when correctness allows. To set timeouts or arrange for cancellation, use
-contexts. See the examples for details.
+By default, non-streaming methods, like Create or Get, will have a default deadline applied to the
+context provided at call time, unless a context deadline is already set. Streaming
+methods have no default deadline and will run indefinitely. To set timeouts or
+arrange for cancellation, use contexts. See the examples for details. Transient
+errors will be retried when correctness allows.
+
+To opt out of default deadlines, set the temporary environment variable
+GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client
+creation. This affects all Google Cloud Go client libraries. This opt-out
+mechanism will be removed in a future release. File an issue at
+https://github.com/googleapis/google-cloud-go if the default deadlines
+cannot work for you.
 
 Do not attempt to control the initial connection (dialing) of a service by setting a
 timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts
@@ -76,6 +85,20 @@ https://godoc.org/google.golang.org/grpc/grpclog for more information.
 For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2".
 
 
+Inspecting errors
+
+Most of the errors returned by the generated clients can be converted into a
+`grpc.Status`. Converting your errors to this type can be a useful to get
+more information about what went wrong while debugging.
+ if err != {
+    if s, ok := status.FromError(err); ok {
+	   log.Println(s.Message())
+	   for _, d := range s.Proto().Details {
+	      log.Println(d)
+	   }
+	}
+ }
+
 Client Stability
 
 Clients in this repository are considered alpha or beta unless otherwise

+ 1 - 1
vendor/cloud.google.com/go/internal/version/version.go

@@ -26,7 +26,7 @@ import (
 
 // Repo is the current version of the client libraries in this
 // repo. It should be a date in YYYYMMDD format.
-const Repo = "20200618"
+const Repo = "20201104"
 
 // Go returns the Go runtime version. The returned string
 // has no whitespace.

+ 60 - 1
vendor/cloud.google.com/go/logging/CHANGES.md

@@ -1,6 +1,65 @@
 # Changes
 
+### [1.4.2](https://www.github.com/googleapis/google-cloud-go/compare/logging/v1.4.1...logging/v1.4.2) (2021-05-20)
+
+
+### Bug Fixes
+
+* **logging:** correctly detect GKE resource ([#4092](https://www.github.com/googleapis/google-cloud-go/issues/4092)) ([a2538e1](https://www.github.com/googleapis/google-cloud-go/commit/a2538e16123c21da62036b56df8c104360f1c2d6))
+
+### [1.4.1](https://www.github.com/googleapis/google-cloud-go/compare/logging/v1.4.0...logging/v1.4.1) (2021-05-03)
+
+
+### Bug Fixes
+
+* **logging:** allow nil or custom zones in resource detection ([#3997](https://www.github.com/googleapis/google-cloud-go/issues/3997)) ([aded90b](https://www.github.com/googleapis/google-cloud-go/commit/aded90b92de3fa3bed079af1aa4879d00572e8ae))
+* **logging:** appengine zone label ([#3998](https://www.github.com/googleapis/google-cloud-go/issues/3998)) ([394a586](https://www.github.com/googleapis/google-cloud-go/commit/394a586bac04953e92a6496a7ca3b61bd64155ab))
+
+## [1.4.0](https://www.github.com/googleapis/google-cloud-go/compare/logging/v1.2.0...logging/v1.4.0) (2021-04-15)
+
+
+### Features
+
+* **logging:** cloud run and functions resource autodetection ([#3909](https://www.github.com/googleapis/google-cloud-go/issues/3909)) ([1204de8](https://www.github.com/googleapis/google-cloud-go/commit/1204de85e58334bf93fecdcb0ab8b581449c2745))
+* **logging:** make toLogEntry function public ([#3863](https://www.github.com/googleapis/google-cloud-go/issues/3863)) ([71828c2](https://www.github.com/googleapis/google-cloud-go/commit/71828c28d424c34da6d0392651739a364cd57e79))
+
+
+### Bug Fixes
+
+* **logging:** Entries has a 24H default filter ([#3120](https://www.github.com/googleapis/google-cloud-go/issues/3120)) ([b32eb82](https://www.github.com/googleapis/google-cloud-go/commit/b32eb822d17838bde91c610a5a9d392d325a592d))
+
+## v1.3.0
+
+- Updates to various dependencies.
+
+## [1.2.0](https://www.github.com/googleapis/google-cloud-go/compare/logging/v1.1.2...v1.2.0) (2021-01-25)
+
+
+### Features
+
+* **logging:** add localIP and Cache fields to HTTPRequest conversion from proto ([#3600](https://www.github.com/googleapis/google-cloud-go/issues/3600)) ([f93027b](https://www.github.com/googleapis/google-cloud-go/commit/f93027b47735e7c181989666e0826bea57ec51e1))
+
+### [1.1.2](https://www.github.com/googleapis/google-cloud-go/compare/logging/v1.1.1...v1.1.2) (2020-11-09)
+
+
+### Bug Fixes
+
+* **logging:** allow X-Cloud-Trace-Context fields to be optional ([#3062](https://www.github.com/googleapis/google-cloud-go/issues/3062)) ([7ff03cf](https://www.github.com/googleapis/google-cloud-go/commit/7ff03cf9a544e753de5b034e18339ecf517d2193))
+* **logging:** do not panic in library code ([#3076](https://www.github.com/googleapis/google-cloud-go/issues/3076)) ([529be97](https://www.github.com/googleapis/google-cloud-go/commit/529be977f766443f49cb8914e17ba07c93841e84)), closes [#1862](https://www.github.com/googleapis/google-cloud-go/issues/1862)
+
+## v1.1.1
+
+- Rebrand "Stackdriver Logging" to "Cloud Logging".
+
+## v1.1.0
+
+- Support unmarshalling stringified Severity.
+- Add exported SetGoogleClientInfo wrappers to manual file.
+- Support no payload.
+- Update "Grouping Logs by Request" docs.
+- Add auto-detection of monitored resources on GAE Standard.
+
 ## v1.0.0
 
 This is the first tag to carve out logging as its own module. See:
-https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.

+ 10 - 7
vendor/cloud.google.com/go/logging/README.md

@@ -1,25 +1,27 @@
-## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging)
+## Cloud Logging [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/logging.svg)](https://pkg.go.dev/cloud.google.com/go/logging)
 
-- [About Stackdriver Logging](https://cloud.google.com/logging/)
+- [About Cloud Logging](https://cloud.google.com/logging/)
 - [API documentation](https://cloud.google.com/logging/docs)
-- [Go client documentation](https://godoc.org/cloud.google.com/go/logging)
+- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/logging)
 - [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging)
 
 ### Example Usage
 
 First create a `logging.Client` to use throughout your application:
 [snip]:# (logging-1)
+
 ```go
 ctx := context.Background()
 client, err := logging.NewClient(ctx, "my-project")
 if err != nil {
-	// TODO: Handle error.
+   // TODO: Handle error.
 }
 ```
 
 Usually, you'll want to add log entries to a buffer to be periodically flushed
-(automatically and asynchronously) to the Stackdriver Logging service.
+(automatically and asynchronously) to the Cloud Logging service.
 [snip]:# (logging-2)
+
 ```go
 logger := client.Logger("my-log")
 logger.Log(logging.Entry{Payload: "something happened!"})
@@ -27,9 +29,10 @@ logger.Log(logging.Entry{Payload: "something happened!"})
 
 Close your client before your program exits, to flush any buffered log entries.
 [snip]:# (logging-3)
+
 ```go
 err = client.Close()
 if err != nil {
-	// TODO: Handle error.
+   // TODO: Handle error.
 }
-```
+```

+ 623 - 55
vendor/cloud.google.com/go/logging/apiv2/config_client.go

@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC
+// Copyright 2021 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Code generated by gapic-generator. DO NOT EDIT.
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
 
 package logging
 
@@ -27,37 +27,69 @@ import (
 	gax "github.com/googleapis/gax-go/v2"
 	"google.golang.org/api/iterator"
 	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
+	"google.golang.org/api/option/internaloption"
+	gtransport "google.golang.org/api/transport/grpc"
 	loggingpb "google.golang.org/genproto/googleapis/logging/v2"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/metadata"
 )
 
+var newConfigClientHook clientHook
+
 // ConfigCallOptions contains the retry settings for each method of ConfigClient.
 type ConfigCallOptions struct {
-	ListSinks       []gax.CallOption
-	GetSink         []gax.CallOption
-	CreateSink      []gax.CallOption
-	UpdateSink      []gax.CallOption
-	DeleteSink      []gax.CallOption
-	ListExclusions  []gax.CallOption
-	GetExclusion    []gax.CallOption
-	CreateExclusion []gax.CallOption
-	UpdateExclusion []gax.CallOption
-	DeleteExclusion []gax.CallOption
+	ListBuckets        []gax.CallOption
+	GetBucket          []gax.CallOption
+	CreateBucket       []gax.CallOption
+	UpdateBucket       []gax.CallOption
+	DeleteBucket       []gax.CallOption
+	UndeleteBucket     []gax.CallOption
+	ListViews          []gax.CallOption
+	GetView            []gax.CallOption
+	CreateView         []gax.CallOption
+	UpdateView         []gax.CallOption
+	DeleteView         []gax.CallOption
+	ListSinks          []gax.CallOption
+	GetSink            []gax.CallOption
+	CreateSink         []gax.CallOption
+	UpdateSink         []gax.CallOption
+	DeleteSink         []gax.CallOption
+	ListExclusions     []gax.CallOption
+	GetExclusion       []gax.CallOption
+	CreateExclusion    []gax.CallOption
+	UpdateExclusion    []gax.CallOption
+	DeleteExclusion    []gax.CallOption
+	GetCmekSettings    []gax.CallOption
+	UpdateCmekSettings []gax.CallOption
 }
 
 func defaultConfigClientOptions() []option.ClientOption {
 	return []option.ClientOption{
-		option.WithEndpoint("logging.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
+		internaloption.WithDefaultEndpoint("logging.googleapis.com:443"),
+		internaloption.WithDefaultMTLSEndpoint("logging.mtls.googleapis.com:443"),
+		internaloption.WithDefaultAudience("https://logging.googleapis.com/"),
+		internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+		option.WithGRPCDialOption(grpc.WithDisableServiceConfig()),
+		option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+			grpc.MaxCallRecvMsgSize(math.MaxInt32))),
 	}
 }
 
 func defaultConfigCallOptions() *ConfigCallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
+	return &ConfigCallOptions{
+		ListBuckets:    []gax.CallOption{},
+		GetBucket:      []gax.CallOption{},
+		CreateBucket:   []gax.CallOption{},
+		UpdateBucket:   []gax.CallOption{},
+		DeleteBucket:   []gax.CallOption{},
+		UndeleteBucket: []gax.CallOption{},
+		ListViews:      []gax.CallOption{},
+		GetView:        []gax.CallOption{},
+		CreateView:     []gax.CallOption{},
+		UpdateView:     []gax.CallOption{},
+		DeleteView:     []gax.CallOption{},
+		ListSinks: []gax.CallOption{
 			gax.WithRetry(func() gax.Retryer {
 				return gax.OnCodes([]codes.Code{
 					codes.DeadlineExceeded,
@@ -66,31 +98,105 @@ func defaultConfigCallOptions() *ConfigCallOptions {
 				}, gax.Backoff{
 					Initial:    100 * time.Millisecond,
 					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
+					Multiplier: 1.30,
 				})
 			}),
 		},
-	}
-	return &ConfigCallOptions{
-		ListSinks:       retry[[2]string{"default", "idempotent"}],
-		GetSink:         retry[[2]string{"default", "idempotent"}],
-		CreateSink:      retry[[2]string{"default", "non_idempotent"}],
-		UpdateSink:      retry[[2]string{"default", "idempotent"}],
-		DeleteSink:      retry[[2]string{"default", "idempotent"}],
-		ListExclusions:  retry[[2]string{"default", "idempotent"}],
-		GetExclusion:    retry[[2]string{"default", "idempotent"}],
-		CreateExclusion: retry[[2]string{"default", "non_idempotent"}],
-		UpdateExclusion: retry[[2]string{"default", "non_idempotent"}],
-		DeleteExclusion: retry[[2]string{"default", "idempotent"}],
+		GetSink: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		CreateSink: []gax.CallOption{},
+		UpdateSink: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		DeleteSink: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		ListExclusions: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		GetExclusion: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		CreateExclusion: []gax.CallOption{},
+		UpdateExclusion: []gax.CallOption{},
+		DeleteExclusion: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		GetCmekSettings:    []gax.CallOption{},
+		UpdateCmekSettings: []gax.CallOption{},
 	}
 }
 
-// ConfigClient is a client for interacting with Stackdriver Logging API.
+// ConfigClient is a client for interacting with Cloud Logging API.
 //
 // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
 type ConfigClient struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
+	// Connection pool of gRPC connections to the service.
+	connPool gtransport.ConnPool
+
+	// flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE
+	disableDeadlines bool
 
 	// The gRPC API client.
 	configClient loggingpb.ConfigServiceV2Client
@@ -104,43 +210,300 @@ type ConfigClient struct {
 
 // NewConfigClient creates a new config service v2 client.
 //
-// Service for configuring sinks used to export log entries out of
-// Logging.
+// Service for configuring sinks used to route log entries.
 func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...)
+	clientOpts := defaultConfigClientOptions()
+
+	if newConfigClientHook != nil {
+		hookOpts, err := newConfigClientHook(ctx, clientHookParams{})
+		if err != nil {
+			return nil, err
+		}
+		clientOpts = append(clientOpts, hookOpts...)
+	}
+
+	disableDeadlines, err := checkDisableDeadlines()
+	if err != nil {
+		return nil, err
+	}
+
+	connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
 	if err != nil {
 		return nil, err
 	}
 	c := &ConfigClient{
-		conn:        conn,
-		CallOptions: defaultConfigCallOptions(),
+		connPool:         connPool,
+		disableDeadlines: disableDeadlines,
+		CallOptions:      defaultConfigCallOptions(),
 
-		configClient: loggingpb.NewConfigServiceV2Client(conn),
+		configClient: loggingpb.NewConfigServiceV2Client(connPool),
 	}
-	c.SetGoogleClientInfo()
+	c.setGoogleClientInfo()
+
 	return c, nil
 }
 
-// Connection returns the client's connection to the API service.
+// Connection returns a connection to the API service.
+//
+// Deprecated.
 func (c *ConfigClient) Connection() *grpc.ClientConn {
-	return c.conn
+	return c.connPool.Conn()
 }
 
 // Close closes the connection to the API service. The user should invoke this when
 // the client is no longer required.
 func (c *ConfigClient) Close() error {
-	return c.conn.Close()
+	return c.connPool.Close()
 }
 
-// SetGoogleClientInfo sets the name and version of the application in
+// setGoogleClientInfo sets the name and version of the application in
 // the `x-goog-api-client` header passed on each request. Intended for
 // use by Google-written clients.
-func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) {
+func (c *ConfigClient) setGoogleClientInfo(keyval ...string) {
 	kv := append([]string{"gl-go", versionGo()}, keyval...)
 	kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
 	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
 }
 
+// ListBuckets lists buckets.
+func (c *ConfigClient) ListBuckets(ctx context.Context, req *loggingpb.ListBucketsRequest, opts ...gax.CallOption) *LogBucketIterator {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.ListBuckets[0:len(c.CallOptions.ListBuckets):len(c.CallOptions.ListBuckets)], opts...)
+	it := &LogBucketIterator{}
+	req = proto.Clone(req).(*loggingpb.ListBucketsRequest)
+	it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogBucket, string, error) {
+		var resp *loggingpb.ListBucketsResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.configClient.ListBuckets(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+
+		it.Response = resp
+		return resp.GetBuckets(), resp.GetNextPageToken(), nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	it.pageInfo.MaxSize = int(req.GetPageSize())
+	it.pageInfo.Token = req.GetPageToken()
+	return it
+}
+
+// GetBucket gets a bucket.
+func (c *ConfigClient) GetBucket(ctx context.Context, req *loggingpb.GetBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.GetBucket[0:len(c.CallOptions.GetBucket):len(c.CallOptions.GetBucket)], opts...)
+	var resp *loggingpb.LogBucket
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.configClient.GetBucket(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// CreateBucket creates a bucket that can be used to store log entries. Once a bucket has
+// been created, the region cannot be changed.
+func (c *ConfigClient) CreateBucket(ctx context.Context, req *loggingpb.CreateBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.CreateBucket[0:len(c.CallOptions.CreateBucket):len(c.CallOptions.CreateBucket)], opts...)
+	var resp *loggingpb.LogBucket
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.configClient.CreateBucket(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// UpdateBucket updates a bucket. This method replaces the following fields in the
+// existing bucket with values from the new bucket: retention_period
+//
+// If the retention period is decreased and the bucket is locked,
+// FAILED_PRECONDITION will be returned.
+//
+// If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION
+// will be returned.
+//
+// A buckets region may not be modified after it is created.
+func (c *ConfigClient) UpdateBucket(ctx context.Context, req *loggingpb.UpdateBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.UpdateBucket[0:len(c.CallOptions.UpdateBucket):len(c.CallOptions.UpdateBucket)], opts...)
+	var resp *loggingpb.LogBucket
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.configClient.UpdateBucket(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// DeleteBucket deletes a bucket.
+// Moves the bucket to the DELETE_REQUESTED state. After 7 days, the
+// bucket will be purged and all logs in the bucket will be permanently
+// deleted.
+func (c *ConfigClient) DeleteBucket(ctx context.Context, req *loggingpb.DeleteBucketRequest, opts ...gax.CallOption) error {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.DeleteBucket[0:len(c.CallOptions.DeleteBucket):len(c.CallOptions.DeleteBucket)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.configClient.DeleteBucket(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// UndeleteBucket undeletes a bucket. A bucket that has been deleted may be undeleted within
+// the grace period of 7 days.
+func (c *ConfigClient) UndeleteBucket(ctx context.Context, req *loggingpb.UndeleteBucketRequest, opts ...gax.CallOption) error {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.UndeleteBucket[0:len(c.CallOptions.UndeleteBucket):len(c.CallOptions.UndeleteBucket)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.configClient.UndeleteBucket(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
+// ListViews lists views on a bucket.
+func (c *ConfigClient) ListViews(ctx context.Context, req *loggingpb.ListViewsRequest, opts ...gax.CallOption) *LogViewIterator {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.ListViews[0:len(c.CallOptions.ListViews):len(c.CallOptions.ListViews)], opts...)
+	it := &LogViewIterator{}
+	req = proto.Clone(req).(*loggingpb.ListViewsRequest)
+	it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogView, string, error) {
+		var resp *loggingpb.ListViewsResponse
+		req.PageToken = pageToken
+		if pageSize > math.MaxInt32 {
+			req.PageSize = math.MaxInt32
+		} else {
+			req.PageSize = int32(pageSize)
+		}
+		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+			var err error
+			resp, err = c.configClient.ListViews(ctx, req, settings.GRPC...)
+			return err
+		}, opts...)
+		if err != nil {
+			return nil, "", err
+		}
+
+		it.Response = resp
+		return resp.GetViews(), resp.GetNextPageToken(), nil
+	}
+	fetch := func(pageSize int, pageToken string) (string, error) {
+		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+		if err != nil {
+			return "", err
+		}
+		it.items = append(it.items, items...)
+		return nextPageToken, nil
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+	it.pageInfo.MaxSize = int(req.GetPageSize())
+	it.pageInfo.Token = req.GetPageToken()
+	return it
+}
+
+// GetView gets a view.
+func (c *ConfigClient) GetView(ctx context.Context, req *loggingpb.GetViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.GetView[0:len(c.CallOptions.GetView):len(c.CallOptions.GetView)], opts...)
+	var resp *loggingpb.LogView
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.configClient.GetView(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// CreateView creates a view over logs in a bucket. A bucket may contain a maximum of
+// 50 views.
+func (c *ConfigClient) CreateView(ctx context.Context, req *loggingpb.CreateViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.CreateView[0:len(c.CallOptions.CreateView):len(c.CallOptions.CreateView)], opts...)
+	var resp *loggingpb.LogView
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.configClient.CreateView(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// UpdateView updates a view. This method replaces the following fields in the existing
+// view with values from the new view: filter.
+func (c *ConfigClient) UpdateView(ctx context.Context, req *loggingpb.UpdateViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.UpdateView[0:len(c.CallOptions.UpdateView):len(c.CallOptions.UpdateView)], opts...)
+	var resp *loggingpb.LogView
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.configClient.UpdateView(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// DeleteView deletes a view from a bucket.
+func (c *ConfigClient) DeleteView(ctx context.Context, req *loggingpb.DeleteViewRequest, opts ...gax.CallOption) error {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.DeleteView[0:len(c.CallOptions.DeleteView):len(c.CallOptions.DeleteView)], opts...)
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		_, err = c.configClient.DeleteView(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	return err
+}
+
 // ListSinks lists sinks.
 func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator {
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
@@ -164,7 +527,9 @@ func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRe
 		if err != nil {
 			return nil, "", err
 		}
-		return resp.Sinks, resp.NextPageToken, nil
+
+		it.Response = resp
+		return resp.GetSinks(), resp.GetNextPageToken(), nil
 	}
 	fetch := func(pageSize int, pageToken string) (string, error) {
 		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
@@ -175,13 +540,18 @@ func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRe
 		return nextPageToken, nil
 	}
 	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	it.pageInfo.MaxSize = int(req.PageSize)
-	it.pageInfo.Token = req.PageToken
+	it.pageInfo.MaxSize = int(req.GetPageSize())
+	it.pageInfo.Token = req.GetPageToken()
 	return it
 }
 
 // GetSink gets a sink.
 func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.GetSink[0:len(c.CallOptions.GetSink):len(c.CallOptions.GetSink)], opts...)
@@ -197,11 +567,16 @@ func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkReques
 	return resp, nil
 }
 
-// CreateSink creates a sink that exports specified log entries to a destination.  The
-// export of newly-ingested log entries begins immediately, unless the sink's
-// writer_identity is not permitted to write to the destination.  A sink can
+// CreateSink creates a sink that exports specified log entries to a destination. The
+// export of newly-ingested log entries begins immediately, unless the sinks
+// writer_identity is not permitted to write to the destination. A sink can
 // export log entries only from the resource owning the sink.
 func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 120000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.CreateSink[0:len(c.CallOptions.CreateSink):len(c.CallOptions.CreateSink)], opts...)
@@ -217,11 +592,17 @@ func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSink
 	return resp, nil
 }
 
-// UpdateSink updates a sink.  This method replaces the following fields in the existing
+// UpdateSink updates a sink. This method replaces the following fields in the existing
 // sink with values from the new sink: destination, and filter.
+//
 // The updated sink might also have a new writer_identity; see the
 // unique_writer_identity field.
 func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...)
@@ -240,6 +621,11 @@ func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSink
 // DeleteSink deletes a sink. If the sink has a unique writer_identity, then that
 // service account is also deleted.
 func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.DeleteSink[0:len(c.CallOptions.DeleteSink):len(c.CallOptions.DeleteSink)], opts...)
@@ -274,7 +660,9 @@ func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListEx
 		if err != nil {
 			return nil, "", err
 		}
-		return resp.Exclusions, resp.NextPageToken, nil
+
+		it.Response = resp
+		return resp.GetExclusions(), resp.GetNextPageToken(), nil
 	}
 	fetch := func(pageSize int, pageToken string) (string, error) {
 		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
@@ -285,13 +673,18 @@ func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListEx
 		return nextPageToken, nil
 	}
 	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	it.pageInfo.MaxSize = int(req.PageSize)
-	it.pageInfo.Token = req.PageToken
+	it.pageInfo.MaxSize = int(req.GetPageSize())
+	it.pageInfo.Token = req.GetPageToken()
 	return it
 }
 
 // GetExclusion gets the description of an exclusion.
 func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.GetExclusion[0:len(c.CallOptions.GetExclusion):len(c.CallOptions.GetExclusion)], opts...)
@@ -311,6 +704,11 @@ func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclu
 // Only log entries belonging to that resource can be excluded.
 // You can have up to 10 exclusions in a resource.
 func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 120000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.CreateExclusion[0:len(c.CallOptions.CreateExclusion):len(c.CallOptions.CreateExclusion)], opts...)
@@ -328,6 +726,11 @@ func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.Creat
 
 // UpdateExclusion changes one or more properties of an existing exclusion.
 func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 120000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.UpdateExclusion[0:len(c.CallOptions.UpdateExclusion):len(c.CallOptions.UpdateExclusion)], opts...)
@@ -345,6 +748,11 @@ func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.Updat
 
 // DeleteExclusion deletes an exclusion.
 func (c *ConfigClient) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest, opts ...gax.CallOption) error {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.DeleteExclusion[0:len(c.CallOptions.DeleteExclusion):len(c.CallOptions.DeleteExclusion)], opts...)
@@ -356,12 +764,120 @@ func (c *ConfigClient) DeleteExclusion(ctx context.Context, req *loggingpb.Delet
 	return err
 }
 
+// GetCmekSettings gets the Logs Router CMEK settings for the given resource.
+//
+// Note: CMEK for the Logs Router can currently only be configured for GCP
+// organizations. Once configured, it applies to all projects and folders in
+// the GCP organization.
+//
+// See Enabling CMEK for Logs
+// Router (at https://cloud.google.com/logging/docs/routing/managed-encryption)
+// for more information.
+func (c *ConfigClient) GetCmekSettings(ctx context.Context, req *loggingpb.GetCmekSettingsRequest, opts ...gax.CallOption) (*loggingpb.CmekSettings, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.GetCmekSettings[0:len(c.CallOptions.GetCmekSettings):len(c.CallOptions.GetCmekSettings)], opts...)
+	var resp *loggingpb.CmekSettings
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.configClient.GetCmekSettings(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// UpdateCmekSettings updates the Logs Router CMEK settings for the given resource.
+//
+// Note: CMEK for the Logs Router can currently only be configured for GCP
+// organizations. Once configured, it applies to all projects and folders in
+// the GCP organization.
+//
+// UpdateCmekSettings
+// will fail if 1) kms_key_name is invalid, or 2) the associated service
+// account does not have the required
+// roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or
+// 3) access to the key is disabled.
+//
+// See Enabling CMEK for Logs
+// Router (at https://cloud.google.com/logging/docs/routing/managed-encryption)
+// for more information.
+func (c *ConfigClient) UpdateCmekSettings(ctx context.Context, req *loggingpb.UpdateCmekSettingsRequest, opts ...gax.CallOption) (*loggingpb.CmekSettings, error) {
+	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
+	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+	opts = append(c.CallOptions.UpdateCmekSettings[0:len(c.CallOptions.UpdateCmekSettings):len(c.CallOptions.UpdateCmekSettings)], opts...)
+	var resp *loggingpb.CmekSettings
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.configClient.UpdateCmekSettings(ctx, req, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// LogBucketIterator manages a stream of *loggingpb.LogBucket.
+type LogBucketIterator struct {
+	items    []*loggingpb.LogBucket
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// Response is the raw response for the current page.
+	// It must be cast to the RPC response type.
+	// Calling Next() or InternalFetch() updates this value.
+	Response interface{}
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogBucket, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *LogBucketIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *LogBucketIterator) Next() (*loggingpb.LogBucket, error) {
+	var item *loggingpb.LogBucket
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *LogBucketIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *LogBucketIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}
+
 // LogExclusionIterator manages a stream of *loggingpb.LogExclusion.
 type LogExclusionIterator struct {
 	items    []*loggingpb.LogExclusion
 	pageInfo *iterator.PageInfo
 	nextFunc func() error
 
+	// Response is the raw response for the current page.
+	// It must be cast to the RPC response type.
+	// Calling Next() or InternalFetch() updates this value.
+	Response interface{}
+
 	// InternalFetch is for use by the Google Cloud Libraries only.
 	// It is not part of the stable interface of this package.
 	//
@@ -404,6 +920,11 @@ type LogSinkIterator struct {
 	pageInfo *iterator.PageInfo
 	nextFunc func() error
 
+	// Response is the raw response for the current page.
+	// It must be cast to the RPC response type.
+	// Calling Next() or InternalFetch() updates this value.
+	Response interface{}
+
 	// InternalFetch is for use by the Google Cloud Libraries only.
 	// It is not part of the stable interface of this package.
 	//
@@ -439,3 +960,50 @@ func (it *LogSinkIterator) takeBuf() interface{} {
 	it.items = nil
 	return b
 }
+
+// LogViewIterator manages a stream of *loggingpb.LogView.
+type LogViewIterator struct {
+	items    []*loggingpb.LogView
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+
+	// Response is the raw response for the current page.
+	// It must be cast to the RPC response type.
+	// Calling Next() or InternalFetch() updates this value.
+	Response interface{}
+
+	// InternalFetch is for use by the Google Cloud Libraries only.
+	// It is not part of the stable interface of this package.
+	//
+	// InternalFetch returns results from a single call to the underlying RPC.
+	// The number of results is no greater than pageSize.
+	// If there are no more results, nextPageToken is empty and err is nil.
+	InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogView, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *LogViewIterator) PageInfo() *iterator.PageInfo {
+	return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *LogViewIterator) Next() (*loggingpb.LogView, error) {
+	var item *loggingpb.LogView
+	if err := it.nextFunc(); err != nil {
+		return item, err
+	}
+	item = it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *LogViewIterator) bufLen() int {
+	return len(it.items)
+}
+
+func (it *LogViewIterator) takeBuf() interface{} {
+	b := it.items
+	it.items = nil
+	return b
+}

+ 29 - 12
vendor/cloud.google.com/go/logging/apiv2/doc.go

@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC
+// Copyright 2021 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,14 +12,15 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Code generated by gapic-generator. DO NOT EDIT.
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
 
 // Package logging is an auto-generated package for the
-// Stackdriver Logging API.
+// Cloud Logging API.
 //
-//   NOTE: This package is in alpha. It is not stable, and is likely to change.
-//
-// Writes log entries and manages your Logging configuration.
+// Writes log entries and manages your Cloud Logging configuration. The table
+// entries below are presented in alphabetical order, not in order of common
+// use. For explanations of the concepts found in the table entries, read the
+// documentation at https://cloud.google.com/logging/docs.
 //
 // Use of Context
 //
@@ -30,20 +31,28 @@
 // To close the open connection, use the Close() method.
 //
 // For information about setting deadlines, reusing contexts, and more
-// please visit godoc.org/cloud.google.com/go.
-//
-// Use the client at cloud.google.com/go/logging in preference to this.
+// please visit pkg.go.dev/cloud.google.com/go.
 package logging // import "cloud.google.com/go/logging/apiv2"
 
 import (
 	"context"
+	"os"
 	"runtime"
+	"strconv"
 	"strings"
 	"unicode"
 
+	"google.golang.org/api/option"
 	"google.golang.org/grpc/metadata"
 )
 
+// For more information on implementing a client constructor hook, see
+// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
+type clientHookParams struct{}
+type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
+
+const versionClient = "20210518"
+
 func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
 	out, _ := metadata.FromOutgoingContext(ctx)
 	out = out.Copy()
@@ -55,6 +64,16 @@ func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
 	return metadata.NewOutgoingContext(ctx, out)
 }
 
+func checkDisableDeadlines() (bool, error) {
+	raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE")
+	if !ok {
+		return false, nil
+	}
+
+	b, err := strconv.ParseBool(raw)
+	return b, err
+}
+
 // DefaultAuthScopes reports the default set of authentication scopes to use with this package.
 func DefaultAuthScopes() []string {
 	return []string{
@@ -81,7 +100,7 @@ func versionGo() string {
 	}
 
 	notSemverRune := func(r rune) bool {
-		return strings.IndexRune("0123456789.", r) < 0
+		return !strings.ContainsRune("0123456789.", r)
 	}
 
 	if strings.HasPrefix(s, "go1") {
@@ -102,5 +121,3 @@ func versionGo() string {
 	}
 	return "UNKNOWN"
 }
-
-const versionClient = "20190801"

+ 206 - 0
vendor/cloud.google.com/go/logging/apiv2/gapic_metadata.json

@@ -0,0 +1,206 @@
+{
+  "schema":  "1.0",
+  "comment":  "This file maps proto services/RPCs to the corresponding library clients/methods.",
+  "language":  "go",
+  "protoPackage":  "google.logging.v2",
+  "libraryPackage":  "cloud.google.com/go/logging/apiv2",
+  "services":  {
+    "ConfigServiceV2":  {
+      "clients":  {
+        "grpc":  {
+          "libraryClient":  "ConfigClient",
+          "rpcs":  {
+            "CreateBucket":  {
+              "methods":  [
+                "CreateBucket"
+              ]
+            },
+            "CreateExclusion":  {
+              "methods":  [
+                "CreateExclusion"
+              ]
+            },
+            "CreateSink":  {
+              "methods":  [
+                "CreateSink"
+              ]
+            },
+            "CreateView":  {
+              "methods":  [
+                "CreateView"
+              ]
+            },
+            "DeleteBucket":  {
+              "methods":  [
+                "DeleteBucket"
+              ]
+            },
+            "DeleteExclusion":  {
+              "methods":  [
+                "DeleteExclusion"
+              ]
+            },
+            "DeleteSink":  {
+              "methods":  [
+                "DeleteSink"
+              ]
+            },
+            "DeleteView":  {
+              "methods":  [
+                "DeleteView"
+              ]
+            },
+            "GetBucket":  {
+              "methods":  [
+                "GetBucket"
+              ]
+            },
+            "GetCmekSettings":  {
+              "methods":  [
+                "GetCmekSettings"
+              ]
+            },
+            "GetExclusion":  {
+              "methods":  [
+                "GetExclusion"
+              ]
+            },
+            "GetSink":  {
+              "methods":  [
+                "GetSink"
+              ]
+            },
+            "GetView":  {
+              "methods":  [
+                "GetView"
+              ]
+            },
+            "ListBuckets":  {
+              "methods":  [
+                "ListBuckets"
+              ]
+            },
+            "ListExclusions":  {
+              "methods":  [
+                "ListExclusions"
+              ]
+            },
+            "ListSinks":  {
+              "methods":  [
+                "ListSinks"
+              ]
+            },
+            "ListViews":  {
+              "methods":  [
+                "ListViews"
+              ]
+            },
+            "UndeleteBucket":  {
+              "methods":  [
+                "UndeleteBucket"
+              ]
+            },
+            "UpdateBucket":  {
+              "methods":  [
+                "UpdateBucket"
+              ]
+            },
+            "UpdateCmekSettings":  {
+              "methods":  [
+                "UpdateCmekSettings"
+              ]
+            },
+            "UpdateExclusion":  {
+              "methods":  [
+                "UpdateExclusion"
+              ]
+            },
+            "UpdateSink":  {
+              "methods":  [
+                "UpdateSink"
+              ]
+            },
+            "UpdateView":  {
+              "methods":  [
+                "UpdateView"
+              ]
+            }
+          }
+        }
+      }
+    },
+    "LoggingServiceV2":  {
+      "clients":  {
+        "grpc":  {
+          "libraryClient":  "Client",
+          "rpcs":  {
+            "DeleteLog":  {
+              "methods":  [
+                "DeleteLog"
+              ]
+            },
+            "ListLogEntries":  {
+              "methods":  [
+                "ListLogEntries"
+              ]
+            },
+            "ListLogs":  {
+              "methods":  [
+                "ListLogs"
+              ]
+            },
+            "ListMonitoredResourceDescriptors":  {
+              "methods":  [
+                "ListMonitoredResourceDescriptors"
+              ]
+            },
+            "TailLogEntries":  {
+              "methods":  [
+                "TailLogEntries"
+              ]
+            },
+            "WriteLogEntries":  {
+              "methods":  [
+                "WriteLogEntries"
+              ]
+            }
+          }
+        }
+      }
+    },
+    "MetricsServiceV2":  {
+      "clients":  {
+        "grpc":  {
+          "libraryClient":  "MetricsClient",
+          "rpcs":  {
+            "CreateLogMetric":  {
+              "methods":  [
+                "CreateLogMetric"
+              ]
+            },
+            "DeleteLogMetric":  {
+              "methods":  [
+                "DeleteLogMetric"
+              ]
+            },
+            "GetLogMetric":  {
+              "methods":  [
+                "GetLogMetric"
+              ]
+            },
+            "ListLogMetrics":  {
+              "methods":  [
+                "ListLogMetrics"
+              ]
+            },
+            "UpdateLogMetric":  {
+              "methods":  [
+                "UpdateLogMetric"
+              ]
+            }
+          }
+        }
+      }
+    }
+  }
+}

+ 42 - 0
vendor/cloud.google.com/go/logging/apiv2/info.go

@@ -0,0 +1,42 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logging
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Also passes any
+// provided key-value pairs. Intended for use by Google-written clients.
+//
+// Internal use only.
+func (c *Client) SetGoogleClientInfo(keyval ...string) {
+	c.setGoogleClientInfo(keyval...)
+}
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Also passes any
+// provided key-value pairs. Intended for use by Google-written clients.
+//
+// Internal use only.
+func (mc *MetricsClient) SetGoogleClientInfo(keyval ...string) {
+	mc.setGoogleClientInfo(keyval...)
+}
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Also passes any
+// provided key-value pairs. Intended for use by Google-written clients.
+//
+// Internal use only.
+func (cc *ConfigClient) SetGoogleClientInfo(keyval ...string) {
+	cc.setGoogleClientInfo(keyval...)
+}

+ 182 - 44
vendor/cloud.google.com/go/logging/apiv2/logging_client.go

@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC
+// Copyright 2021 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Code generated by gapic-generator. DO NOT EDIT.
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
 
 package logging
 
@@ -27,7 +27,8 @@ import (
 	gax "github.com/googleapis/gax-go/v2"
 	"google.golang.org/api/iterator"
 	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
+	"google.golang.org/api/option/internaloption"
+	gtransport "google.golang.org/api/transport/grpc"
 	monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
 	loggingpb "google.golang.org/genproto/googleapis/logging/v2"
 	"google.golang.org/grpc"
@@ -35,6 +36,8 @@ import (
 	"google.golang.org/grpc/metadata"
 )
 
+var newClientHook clientHook
+
 // CallOptions contains the retry settings for each method of Client.
 type CallOptions struct {
 	DeleteLog                        []gax.CallOption
@@ -42,18 +45,24 @@ type CallOptions struct {
 	ListLogEntries                   []gax.CallOption
 	ListMonitoredResourceDescriptors []gax.CallOption
 	ListLogs                         []gax.CallOption
+	TailLogEntries                   []gax.CallOption
 }
 
 func defaultClientOptions() []option.ClientOption {
 	return []option.ClientOption{
-		option.WithEndpoint("logging.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
+		internaloption.WithDefaultEndpoint("logging.googleapis.com:443"),
+		internaloption.WithDefaultMTLSEndpoint("logging.mtls.googleapis.com:443"),
+		internaloption.WithDefaultAudience("https://logging.googleapis.com/"),
+		internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+		option.WithGRPCDialOption(grpc.WithDisableServiceConfig()),
+		option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+			grpc.MaxCallRecvMsgSize(math.MaxInt32))),
 	}
 }
 
 func defaultCallOptions() *CallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
+	return &CallOptions{
+		DeleteLog: []gax.CallOption{
 			gax.WithRetry(func() gax.Retryer {
 				return gax.OnCodes([]codes.Code{
 					codes.DeadlineExceeded,
@@ -62,26 +71,87 @@ func defaultCallOptions() *CallOptions {
 				}, gax.Backoff{
 					Initial:    100 * time.Millisecond,
 					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		WriteLogEntries: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		ListLogEntries: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		ListMonitoredResourceDescriptors: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		ListLogs: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		TailLogEntries: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
 				})
 			}),
 		},
-	}
-	return &CallOptions{
-		DeleteLog:                        retry[[2]string{"default", "idempotent"}],
-		WriteLogEntries:                  retry[[2]string{"default", "idempotent"}],
-		ListLogEntries:                   retry[[2]string{"default", "idempotent"}],
-		ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}],
-		ListLogs:                         retry[[2]string{"default", "idempotent"}],
 	}
 }
 
-// Client is a client for interacting with Stackdriver Logging API.
+// Client is a client for interacting with Cloud Logging API.
 //
 // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
 type Client struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
+	// Connection pool of gRPC connections to the service.
+	connPool gtransport.ConnPool
+
+	// flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE
+	disableDeadlines bool
 
 	// The gRPC API client.
 	client loggingpb.LoggingServiceV2Client
@@ -97,45 +167,69 @@ type Client struct {
 //
 // Service for ingesting and querying logs.
 func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
+	clientOpts := defaultClientOptions()
+
+	if newClientHook != nil {
+		hookOpts, err := newClientHook(ctx, clientHookParams{})
+		if err != nil {
+			return nil, err
+		}
+		clientOpts = append(clientOpts, hookOpts...)
+	}
+
+	disableDeadlines, err := checkDisableDeadlines()
+	if err != nil {
+		return nil, err
+	}
+
+	connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
 	if err != nil {
 		return nil, err
 	}
 	c := &Client{
-		conn:        conn,
-		CallOptions: defaultCallOptions(),
+		connPool:         connPool,
+		disableDeadlines: disableDeadlines,
+		CallOptions:      defaultCallOptions(),
 
-		client: loggingpb.NewLoggingServiceV2Client(conn),
+		client: loggingpb.NewLoggingServiceV2Client(connPool),
 	}
-	c.SetGoogleClientInfo()
+	c.setGoogleClientInfo()
+
 	return c, nil
 }
 
-// Connection returns the client's connection to the API service.
+// Connection returns a connection to the API service.
+//
+// Deprecated.
 func (c *Client) Connection() *grpc.ClientConn {
-	return c.conn
+	return c.connPool.Conn()
 }
 
 // Close closes the connection to the API service. The user should invoke this when
 // the client is no longer required.
 func (c *Client) Close() error {
-	return c.conn.Close()
+	return c.connPool.Close()
 }
 
-// SetGoogleClientInfo sets the name and version of the application in
+// setGoogleClientInfo sets the name and version of the application in
 // the `x-goog-api-client` header passed on each request. Intended for
 // use by Google-written clients.
-func (c *Client) SetGoogleClientInfo(keyval ...string) {
+func (c *Client) setGoogleClientInfo(keyval ...string) {
 	kv := append([]string{"gl-go", versionGo()}, keyval...)
 	kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
 	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
 }
 
-// DeleteLog deletes all the log entries in a log.
-// The log reappears if it receives new entries.
-// Log entries written shortly before the delete operation might not be
-// deleted.
+// DeleteLog deletes all the log entries in a log. The log reappears if it receives new
+// entries. Log entries written shortly before the delete operation might not
+// be deleted. Entries received after the delete operation with a timestamp
+// before the operation will be deleted.
 func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "log_name", url.QueryEscape(req.GetLogName())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.DeleteLog[0:len(c.CallOptions.DeleteLog):len(c.CallOptions.DeleteLog)], opts...)
@@ -155,6 +249,11 @@ func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest,
 // different resources (projects, organizations, billing accounts or
 // folders)
 func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	ctx = insertMetadata(ctx, c.xGoogMetadata)
 	opts = append(c.CallOptions.WriteLogEntries[0:len(c.CallOptions.WriteLogEntries):len(c.CallOptions.WriteLogEntries)], opts...)
 	var resp *loggingpb.WriteLogEntriesResponse
@@ -169,9 +268,10 @@ func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEnt
 	return resp, nil
 }
 
-// ListLogEntries lists log entries.  Use this method to retrieve log entries from
-// Logging.  For ways to export log entries, see
-// Exporting Logs (at /logging/docs/export).
+// ListLogEntries lists log entries.  Use this method to retrieve log entries that originated
+// from a project/folder/organization/billing account.  For ways to export log
+// entries, see Exporting
+// Logs (at https://cloud.google.com/logging/docs/export).
 func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator {
 	ctx = insertMetadata(ctx, c.xGoogMetadata)
 	opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...)
@@ -193,7 +293,9 @@ func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntri
 		if err != nil {
 			return nil, "", err
 		}
-		return resp.Entries, resp.NextPageToken, nil
+
+		it.Response = resp
+		return resp.GetEntries(), resp.GetNextPageToken(), nil
 	}
 	fetch := func(pageSize int, pageToken string) (string, error) {
 		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
@@ -204,8 +306,8 @@ func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntri
 		return nextPageToken, nil
 	}
 	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	it.pageInfo.MaxSize = int(req.PageSize)
-	it.pageInfo.Token = req.PageToken
+	it.pageInfo.MaxSize = int(req.GetPageSize())
+	it.pageInfo.Token = req.GetPageToken()
 	return it
 }
 
@@ -231,7 +333,9 @@ func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *logg
 		if err != nil {
 			return nil, "", err
 		}
-		return resp.ResourceDescriptors, resp.NextPageToken, nil
+
+		it.Response = resp
+		return resp.GetResourceDescriptors(), resp.GetNextPageToken(), nil
 	}
 	fetch := func(pageSize int, pageToken string) (string, error) {
 		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
@@ -242,8 +346,8 @@ func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *logg
 		return nextPageToken, nil
 	}
 	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	it.pageInfo.MaxSize = int(req.PageSize)
-	it.pageInfo.Token = req.PageToken
+	it.pageInfo.MaxSize = int(req.GetPageSize())
+	it.pageInfo.Token = req.GetPageToken()
 	return it
 }
 
@@ -271,7 +375,9 @@ func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, o
 		if err != nil {
 			return nil, "", err
 		}
-		return resp.LogNames, resp.NextPageToken, nil
+
+		it.Response = resp
+		return resp.GetLogNames(), resp.GetNextPageToken(), nil
 	}
 	fetch := func(pageSize int, pageToken string) (string, error) {
 		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
@@ -282,17 +388,39 @@ func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, o
 		return nextPageToken, nil
 	}
 	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	it.pageInfo.MaxSize = int(req.PageSize)
-	it.pageInfo.Token = req.PageToken
+	it.pageInfo.MaxSize = int(req.GetPageSize())
+	it.pageInfo.Token = req.GetPageToken()
 	return it
 }
 
+// TailLogEntries streaming read of log entries as they are ingested. Until the stream is
+// terminated, it will continue reading logs.
+func (c *Client) TailLogEntries(ctx context.Context, opts ...gax.CallOption) (loggingpb.LoggingServiceV2_TailLogEntriesClient, error) {
+	ctx = insertMetadata(ctx, c.xGoogMetadata)
+	opts = append(c.CallOptions.TailLogEntries[0:len(c.CallOptions.TailLogEntries):len(c.CallOptions.TailLogEntries)], opts...)
+	var resp loggingpb.LoggingServiceV2_TailLogEntriesClient
+	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+		var err error
+		resp, err = c.client.TailLogEntries(ctx, settings.GRPC...)
+		return err
+	}, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
 // LogEntryIterator manages a stream of *loggingpb.LogEntry.
 type LogEntryIterator struct {
 	items    []*loggingpb.LogEntry
 	pageInfo *iterator.PageInfo
 	nextFunc func() error
 
+	// Response is the raw response for the current page.
+	// It must be cast to the RPC response type.
+	// Calling Next() or InternalFetch() updates this value.
+	Response interface{}
+
 	// InternalFetch is for use by the Google Cloud Libraries only.
 	// It is not part of the stable interface of this package.
 	//
@@ -335,6 +463,11 @@ type MonitoredResourceDescriptorIterator struct {
 	pageInfo *iterator.PageInfo
 	nextFunc func() error
 
+	// Response is the raw response for the current page.
+	// It must be cast to the RPC response type.
+	// Calling Next() or InternalFetch() updates this value.
+	Response interface{}
+
 	// InternalFetch is for use by the Google Cloud Libraries only.
 	// It is not part of the stable interface of this package.
 	//
@@ -377,6 +510,11 @@ type StringIterator struct {
 	pageInfo *iterator.PageInfo
 	nextFunc func() error
 
+	// Response is the raw response for the current page.
+	// It must be cast to the RPC response type.
+	// Calling Next() or InternalFetch() updates this value.
+	Response interface{}
+
 	// InternalFetch is for use by the Google Cloud Libraries only.
 	// It is not part of the stable interface of this package.
 	//

+ 121 - 31
vendor/cloud.google.com/go/logging/apiv2/metrics_client.go

@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC
+// Copyright 2021 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Code generated by gapic-generator. DO NOT EDIT.
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
 
 package logging
 
@@ -27,13 +27,16 @@ import (
 	gax "github.com/googleapis/gax-go/v2"
 	"google.golang.org/api/iterator"
 	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
+	"google.golang.org/api/option/internaloption"
+	gtransport "google.golang.org/api/transport/grpc"
 	loggingpb "google.golang.org/genproto/googleapis/logging/v2"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/metadata"
 )
 
+var newMetricsClientHook clientHook
+
 // MetricsCallOptions contains the retry settings for each method of MetricsClient.
 type MetricsCallOptions struct {
 	ListLogMetrics  []gax.CallOption
@@ -45,14 +48,19 @@ type MetricsCallOptions struct {
 
 func defaultMetricsClientOptions() []option.ClientOption {
 	return []option.ClientOption{
-		option.WithEndpoint("logging.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
+		internaloption.WithDefaultEndpoint("logging.googleapis.com:443"),
+		internaloption.WithDefaultMTLSEndpoint("logging.mtls.googleapis.com:443"),
+		internaloption.WithDefaultAudience("https://logging.googleapis.com/"),
+		internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+		option.WithGRPCDialOption(grpc.WithDisableServiceConfig()),
+		option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+			grpc.MaxCallRecvMsgSize(math.MaxInt32))),
 	}
 }
 
 func defaultMetricsCallOptions() *MetricsCallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
+	return &MetricsCallOptions{
+		ListLogMetrics: []gax.CallOption{
 			gax.WithRetry(func() gax.Retryer {
 				return gax.OnCodes([]codes.Code{
 					codes.DeadlineExceeded,
@@ -61,26 +69,62 @@ func defaultMetricsCallOptions() *MetricsCallOptions {
 				}, gax.Backoff{
 					Initial:    100 * time.Millisecond,
 					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		GetLogMetric: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		CreateLogMetric: []gax.CallOption{},
+		UpdateLogMetric: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
+				})
+			}),
+		},
+		DeleteLogMetric: []gax.CallOption{
+			gax.WithRetry(func() gax.Retryer {
+				return gax.OnCodes([]codes.Code{
+					codes.DeadlineExceeded,
+					codes.Internal,
+					codes.Unavailable,
+				}, gax.Backoff{
+					Initial:    100 * time.Millisecond,
+					Max:        60000 * time.Millisecond,
+					Multiplier: 1.30,
 				})
 			}),
 		},
-	}
-	return &MetricsCallOptions{
-		ListLogMetrics:  retry[[2]string{"default", "idempotent"}],
-		GetLogMetric:    retry[[2]string{"default", "idempotent"}],
-		CreateLogMetric: retry[[2]string{"default", "non_idempotent"}],
-		UpdateLogMetric: retry[[2]string{"default", "idempotent"}],
-		DeleteLogMetric: retry[[2]string{"default", "idempotent"}],
 	}
 }
 
-// MetricsClient is a client for interacting with Stackdriver Logging API.
+// MetricsClient is a client for interacting with Cloud Logging API.
 //
 // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
 type MetricsClient struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
+	// Connection pool of gRPC connections to the service.
+	connPool gtransport.ConnPool
+
+	// flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE
+	disableDeadlines bool
 
 	// The gRPC API client.
 	metricsClient loggingpb.MetricsServiceV2Client
@@ -96,35 +140,54 @@ type MetricsClient struct {
 //
 // Service for configuring logs-based metrics.
 func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*MetricsClient, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultMetricsClientOptions(), opts...)...)
+	clientOpts := defaultMetricsClientOptions()
+
+	if newMetricsClientHook != nil {
+		hookOpts, err := newMetricsClientHook(ctx, clientHookParams{})
+		if err != nil {
+			return nil, err
+		}
+		clientOpts = append(clientOpts, hookOpts...)
+	}
+
+	disableDeadlines, err := checkDisableDeadlines()
+	if err != nil {
+		return nil, err
+	}
+
+	connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
 	if err != nil {
 		return nil, err
 	}
 	c := &MetricsClient{
-		conn:        conn,
-		CallOptions: defaultMetricsCallOptions(),
+		connPool:         connPool,
+		disableDeadlines: disableDeadlines,
+		CallOptions:      defaultMetricsCallOptions(),
 
-		metricsClient: loggingpb.NewMetricsServiceV2Client(conn),
+		metricsClient: loggingpb.NewMetricsServiceV2Client(connPool),
 	}
-	c.SetGoogleClientInfo()
+	c.setGoogleClientInfo()
+
 	return c, nil
 }
 
-// Connection returns the client's connection to the API service.
+// Connection returns a connection to the API service.
+//
+// Deprecated.
 func (c *MetricsClient) Connection() *grpc.ClientConn {
-	return c.conn
+	return c.connPool.Conn()
 }
 
 // Close closes the connection to the API service. The user should invoke this when
 // the client is no longer required.
 func (c *MetricsClient) Close() error {
-	return c.conn.Close()
+	return c.connPool.Close()
 }
 
-// SetGoogleClientInfo sets the name and version of the application in
+// setGoogleClientInfo sets the name and version of the application in
 // the `x-goog-api-client` header passed on each request. Intended for
 // use by Google-written clients.
-func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) {
+func (c *MetricsClient) setGoogleClientInfo(keyval ...string) {
 	kv := append([]string{"gl-go", versionGo()}, keyval...)
 	kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
 	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
@@ -153,7 +216,9 @@ func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListL
 		if err != nil {
 			return nil, "", err
 		}
-		return resp.Metrics, resp.NextPageToken, nil
+
+		it.Response = resp
+		return resp.GetMetrics(), resp.GetNextPageToken(), nil
 	}
 	fetch := func(pageSize int, pageToken string) (string, error) {
 		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
@@ -164,13 +229,18 @@ func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListL
 		return nextPageToken, nil
 	}
 	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	it.pageInfo.MaxSize = int(req.PageSize)
-	it.pageInfo.Token = req.PageToken
+	it.pageInfo.MaxSize = int(req.GetPageSize())
+	it.pageInfo.Token = req.GetPageToken()
 	return it
 }
 
 // GetLogMetric gets a logs-based metric.
 func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.GetLogMetric[0:len(c.CallOptions.GetLogMetric):len(c.CallOptions.GetLogMetric)], opts...)
@@ -188,6 +258,11 @@ func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogM
 
 // CreateLogMetric creates a logs-based metric.
 func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.CreateLogMetric[0:len(c.CallOptions.CreateLogMetric):len(c.CallOptions.CreateLogMetric)], opts...)
@@ -205,6 +280,11 @@ func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.Crea
 
 // UpdateLogMetric creates or updates a logs-based metric.
 func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.UpdateLogMetric[0:len(c.CallOptions.UpdateLogMetric):len(c.CallOptions.UpdateLogMetric)], opts...)
@@ -222,6 +302,11 @@ func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.Upda
 
 // DeleteLogMetric deletes a logs-based metric.
 func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error {
+	if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
+		cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
+		defer cancel()
+		ctx = cctx
+	}
 	md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName())))
 	ctx = insertMetadata(ctx, c.xGoogMetadata, md)
 	opts = append(c.CallOptions.DeleteLogMetric[0:len(c.CallOptions.DeleteLogMetric):len(c.CallOptions.DeleteLogMetric)], opts...)
@@ -239,6 +324,11 @@ type LogMetricIterator struct {
 	pageInfo *iterator.PageInfo
 	nextFunc func() error
 
+	// Response is the raw response for the current page.
+	// It must be cast to the RPC response type.
+	// Calling Next() or InternalFetch() updates this value.
+	Response interface{}
+
 	// InternalFetch is for use by the Google Cloud Libraries only.
 	// It is not part of the stable interface of this package.
 	//

+ 11 - 10
vendor/cloud.google.com/go/logging/doc.go

@@ -13,7 +13,7 @@
 // limitations under the License.
 
 /*
-Package logging contains a Stackdriver Logging client suitable for writing logs.
+Package logging contains a Cloud Logging client suitable for writing logs.
 For reading logs, and working with sinks, metrics and monitored resources,
 see package cloud.google.com/go/logging/logadmin.
 
@@ -23,7 +23,7 @@ See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API
 
 Creating a Client
 
-Use a Client to interact with the Stackdriver Logging API.
+Use a Client to interact with the Cloud Logging API.
 
 	// Create a Client
 	ctx := context.Background()
@@ -36,7 +36,7 @@ Use a Client to interact with the Stackdriver Logging API.
 Basic Usage
 
 For most use cases, you'll want to add log entries to a buffer to be periodically
-flushed (automatically and asynchronously) to the Stackdriver Logging service.
+flushed (automatically and asynchronously) to the Cloud Logging service.
 
 	// Initialize a logger
 	lg := client.Logger("my-log")
@@ -47,7 +47,7 @@ flushed (automatically and asynchronously) to the Stackdriver Logging service.
 
 Closing your Client
 
-You should call Client.Close before your program exits to flush any buffered log entries to the Stackdriver Logging service.
+You should call Client.Close before your program exits to flush any buffered log entries to the Cloud Logging service.
 
 	// Close the client when finished.
 	err = client.Close()
@@ -106,7 +106,7 @@ An Entry may have one of a number of severity levels associated with it.
 
 Viewing Logs
 
-You can view Stackdriver logs for projects at
+You can view Cloud logs for projects at
 https://console.cloud.google.com/logs/viewer. Use the dropdown at the top left. When
 running from a Google Cloud Platform VM, select "GCE VM Instance". Otherwise, select
 "Google Project" and then the project ID. Logs for organizations, folders and billing
@@ -117,13 +117,14 @@ Grouping Logs by Request
 
 To group all the log entries written during a single HTTP request, create two
 Loggers, a "parent" and a "child," with different log IDs. Both should be in the same
-project, and have the same MonitoredResouce type and labels.
+project, and have the same MonitoredResource type and labels.
 
-- Parent entries must have HTTPRequest.Request populated. (Strictly speaking, only the URL is necessary.)
+- Parent entries must have HTTPRequest.Request (strictly speaking, only Method and URL are necessary),
+  and HTTPRequest.Status populated.
 
-- A child entry's timestamp must be within the time interval covered by the parent request (i.e., older
-than parent.Timestamp, and newer than parent.Timestamp - parent.HTTPRequest.Latency, assuming the
-parent timestamp marks the end of the request.
+- A child entry's timestamp must be within the time interval covered by the parent request. (i.e., before
+the parent.Timestamp and after the parent.Timestamp - parent.HTTPRequest.Latency. This assumes the
+parent.Timestamp marks the end of the request.)
 
 - The trace field must be populated in all of the entries and match exactly.
 

+ 81 - 121
vendor/cloud.google.com/go/logging/logging.go

@@ -39,14 +39,12 @@ import (
 	"time"
 	"unicode/utf8"
 
-	"cloud.google.com/go/compute/metadata"
 	"cloud.google.com/go/internal/version"
 	vkit "cloud.google.com/go/logging/apiv2"
 	"cloud.google.com/go/logging/internal"
 	"github.com/golang/protobuf/proto"
 	"github.com/golang/protobuf/ptypes"
 	structpb "github.com/golang/protobuf/ptypes/struct"
-	tspb "github.com/golang/protobuf/ptypes/timestamp"
 	"google.golang.org/api/option"
 	"google.golang.org/api/support/bundler"
 	mrpb "google.golang.org/genproto/googleapis/api/monitoredres"
@@ -137,11 +135,8 @@ type Client struct {
 // By default NewClient uses WriteScope. To use a different scope, call
 // NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes).
 func NewClient(ctx context.Context, parent string, opts ...option.ClientOption) (*Client, error) {
-	if !strings.ContainsRune(parent, '/') {
-		parent = "projects/" + parent
-	}
+	parent = makeParent(parent)
 	opts = append([]option.ClientOption{
-		option.WithEndpoint(internal.ProdAddr),
 		option.WithScopes(WriteScope),
 	}, opts...)
 	c, err := vkit.NewClient(ctx, opts...)
@@ -173,26 +168,27 @@ func NewClient(ctx context.Context, parent string, opts ...option.ClientOption)
 	return client, nil
 }
 
-var unixZeroTimestamp *tspb.Timestamp
-
-func init() {
-	var err error
-	unixZeroTimestamp, err = ptypes.TimestampProto(time.Unix(0, 0))
-	if err != nil {
-		panic(err)
+func makeParent(parent string) string {
+	if !strings.ContainsRune(parent, '/') {
+		return "projects/" + parent
 	}
+	return parent
 }
 
 // Ping reports whether the client's connection to the logging service and the
 // authentication configuration are valid. To accomplish this, Ping writes a
 // log entry "ping" to a log named "ping".
 func (c *Client) Ping(ctx context.Context) error {
+	unixZeroTimestamp, err := ptypes.TimestampProto(time.Unix(0, 0))
+	if err != nil {
+		return err
+	}
 	ent := &logpb.LogEntry{
 		Payload:   &logpb.LogEntry_TextPayload{TextPayload: "ping"},
 		Timestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both
 		InsertId:  "ping",            // necessary for the service to dedup these entries.
 	}
-	_, err := c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{
+	_, err = c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{
 		LogName:  internal.LogPath(c.parent, "ping"),
 		Resource: monitoredResource(c.parent),
 		Entries:  []*logpb.LogEntry{ent},
@@ -244,86 +240,6 @@ type LoggerOption interface {
 	set(*Logger)
 }
 
-// CommonResource sets the monitored resource associated with all log entries
-// written from a Logger. If not provided, the resource is automatically
-// detected based on the running environment.  This value can be overridden
-// per-entry by setting an Entry's Resource field.
-func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} }
-
-type commonResource struct{ *mrpb.MonitoredResource }
-
-func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource }
-
-var detectedResource struct {
-	pb   *mrpb.MonitoredResource
-	once sync.Once
-}
-
-func detectResource() *mrpb.MonitoredResource {
-	detectedResource.once.Do(func() {
-		if !metadata.OnGCE() {
-			return
-		}
-		projectID, err := metadata.ProjectID()
-		if err != nil {
-			return
-		}
-		id, err := metadata.InstanceID()
-		if err != nil {
-			return
-		}
-		zone, err := metadata.Zone()
-		if err != nil {
-			return
-		}
-		name, err := metadata.InstanceName()
-		if err != nil {
-			return
-		}
-		detectedResource.pb = &mrpb.MonitoredResource{
-			Type: "gce_instance",
-			Labels: map[string]string{
-				"project_id":    projectID,
-				"instance_id":   id,
-				"instance_name": name,
-				"zone":          zone,
-			},
-		}
-	})
-	return detectedResource.pb
-}
-
-var resourceInfo = map[string]struct{ rtype, label string }{
-	"organizations":   {"organization", "organization_id"},
-	"folders":         {"folder", "folder_id"},
-	"projects":        {"project", "project_id"},
-	"billingAccounts": {"billing_account", "account_id"},
-}
-
-func monitoredResource(parent string) *mrpb.MonitoredResource {
-	parts := strings.SplitN(parent, "/", 2)
-	if len(parts) != 2 {
-		return globalResource(parent)
-	}
-	info, ok := resourceInfo[parts[0]]
-	if !ok {
-		return globalResource(parts[1])
-	}
-	return &mrpb.MonitoredResource{
-		Type:   info.rtype,
-		Labels: map[string]string{info.label: parts[1]},
-	}
-}
-
-func globalResource(projectID string) *mrpb.MonitoredResource {
-	return &mrpb.MonitoredResource{
-		Type: "global",
-		Labels: map[string]string{
-			"project_id": projectID,
-		},
-	}
-}
-
 // CommonLabels are labels that apply to all log entries written from a Logger,
 // so that you don't have to repeat them in each log entry's Labels field. If
 // any of the log entries contains a (key, value) with the same key that is in
@@ -544,6 +460,17 @@ func (v Severity) String() string {
 	return strconv.Itoa(int(v))
 }
 
+// UnmarshalJSON turns a string representation of severity into the type
+// Severity.
+func (v *Severity) UnmarshalJSON(data []byte) error {
+	var s string
+	if err := json.Unmarshal(data, &s); err != nil {
+		return err
+	}
+	*v = ParseSeverity(s)
+	return nil
+}
+
 // ParseSeverity returns the Severity whose name equals s, ignoring case. It
 // returns Default if no Severity matches.
 func ParseSeverity(s string) Severity {
@@ -654,14 +581,20 @@ type HTTPRequest struct {
 	// validated with the origin server before being served from cache. This
 	// field is only meaningful if CacheHit is true.
 	CacheValidatedWithOriginServer bool
+
+	// CacheFillBytes is the number of HTTP response bytes inserted into cache. Set only when a cache fill was attempted.
+	CacheFillBytes int64
+
+	// CacheLookup tells whether or not a cache lookup was attempted.
+	CacheLookup bool
 }
 
-func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest {
+func fromHTTPRequest(r *HTTPRequest) (*logtypepb.HttpRequest, error) {
 	if r == nil {
-		return nil
+		return nil, nil
 	}
 	if r.Request == nil {
-		panic("HTTPRequest must have a non-nil Request")
+		return nil, errors.New("logging: HTTPRequest must have a non-nil Request")
 	}
 	u := *r.Request.URL
 	u.Fragment = ""
@@ -677,11 +610,14 @@ func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest {
 		Referer:                        r.Request.Referer(),
 		CacheHit:                       r.CacheHit,
 		CacheValidatedWithOriginServer: r.CacheValidatedWithOriginServer,
+		Protocol:                       r.Request.Proto,
+		CacheFillBytes:                 r.CacheFillBytes,
+		CacheLookup:                    r.CacheLookup,
 	}
 	if r.Latency != 0 {
 		pb.Latency = ptypes.DurationProto(r.Latency)
 	}
-	return pb
+	return pb, nil
 }
 
 // fixUTF8 is a helper that fixes an invalid UTF-8 string by replacing
@@ -761,16 +697,15 @@ func jsonValueToStructValue(v interface{}) *structpb.Value {
 		}
 		return &structpb.Value{Kind: &structpb.Value_ListValue{ListValue: &structpb.ListValue{Values: vals}}}
 	default:
-		panic(fmt.Sprintf("bad type %T for JSON value", v))
+		return &structpb.Value{Kind: &structpb.Value_NullValue{}}
 	}
 }
 
 // LogSync logs the Entry synchronously without any buffering. Because LogSync is slow
 // and will block, it is intended primarily for debugging or critical errors.
 // Prefer Log for most uses.
-// TODO(jba): come up with a better name (LogNow?) or eliminate.
 func (l *Logger) LogSync(ctx context.Context, e Entry) error {
-	ent, err := l.toLogEntry(e)
+	ent, err := toLogEntryInternal(e, l.client, l.client.parent)
 	if err != nil {
 		return err
 	}
@@ -785,7 +720,7 @@ func (l *Logger) LogSync(ctx context.Context, e Entry) error {
 
 // Log buffers the Entry for output to the logging service. It never blocks.
 func (l *Logger) Log(e Entry) {
-	ent, err := l.toLogEntry(e)
+	ent, err := toLogEntryInternal(e, l.client, l.client.parent)
 	if err != nil {
 		l.client.error(err)
 		return
@@ -832,38 +767,55 @@ func (l *Logger) writeLogEntries(entries []*logpb.LogEntry) {
 // (for example by calling SetFlags or SetPrefix).
 func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] }
 
-var reCloudTraceContext = regexp.MustCompile(`([a-f\d]+)/([a-f\d]+);o=(\d)`)
+var reCloudTraceContext = regexp.MustCompile(
+	// Matches on "TRACE_ID"
+	`([a-f\d]+)?` +
+		// Matches on "/SPAN_ID"
+		`(?:/([a-f\d]+))?` +
+		// Matches on ";0=TRACE_TRUE"
+		`(?:;o=(\d))?`)
 
 func deconstructXCloudTraceContext(s string) (traceID, spanID string, traceSampled bool) {
-	// As per the format described at https://cloud.google.com/trace/docs/troubleshooting#force-trace
+	// As per the format described at https://cloud.google.com/trace/docs/setup#force-trace
 	//    "X-Cloud-Trace-Context: TRACE_ID/SPAN_ID;o=TRACE_TRUE"
 	// for example:
-	//    "X-Cloud-Trace-Context: 105445aa7843bc8bf206b120001000/0;o=1"
+	//    "X-Cloud-Trace-Context: 105445aa7843bc8bf206b120001000/1;o=1"
 	//
 	// We expect:
-	//   * traceID:         "105445aa7843bc8bf206b120001000"
-	//   * spanID:          ""
-	//   * traceSampled:    true
-	matches := reCloudTraceContext.FindAllStringSubmatch(s, -1)
-	if len(matches) != 1 {
-		return
-	}
+	//   * traceID (optional): 			"105445aa7843bc8bf206b120001000"
+	//   * spanID (optional):       	"1"
+	//   * traceSampled (optional): 	true
+	matches := reCloudTraceContext.FindStringSubmatch(s)
 
-	sub := matches[0]
-	if len(sub) != 4 {
-		return
-	}
+	traceID, spanID, traceSampled = matches[1], matches[2], matches[3] == "1"
 
-	traceID, spanID = sub[1], sub[2]
 	if spanID == "0" {
 		spanID = ""
 	}
-	traceSampled = sub[3] == "1"
 
 	return
 }
 
-func (l *Logger) toLogEntry(e Entry) (*logpb.LogEntry, error) {
+// ToLogEntry takes an Entry structure and converts it to the LogEntry proto.
+// A parent can take any of the following forms:
+//    projects/PROJECT_ID
+//    folders/FOLDER_ID
+//    billingAccounts/ACCOUNT_ID
+//    organizations/ORG_ID
+// for backwards compatibility, a string with no '/' is also allowed and is interpreted
+// as a project ID.
+//
+// ToLogEntry is implied when users invoke Logger.Log or Logger.LogSync,
+// but its exported as a pub function here to give users additional flexibility
+// when using the library. Don't call this method manually if Logger.Log or
+// Logger.LogSync are used, it is intended to be used together with direct call
+// to WriteLogEntries method.
+func ToLogEntry(e Entry, parent string) (*logpb.LogEntry, error) {
+	// We have this method to support logging agents that need a bigger flexibility.
+	return toLogEntryInternal(e, nil, makeParent(parent))
+}
+
+func toLogEntryInternal(e Entry, client *Client, parent string) (*logpb.LogEntry, error) {
 	if e.LogName != "" {
 		return nil, errors.New("logging: Entry.LogName should be not be set when writing")
 	}
@@ -882,7 +834,7 @@ func (l *Logger) toLogEntry(e Entry) (*logpb.LogEntry, error) {
 			// https://cloud.google.com/appengine/docs/flexible/go/writing-application-logs.
 			traceID, spanID, traceSampled := deconstructXCloudTraceContext(traceHeader)
 			if traceID != "" {
-				e.Trace = fmt.Sprintf("%s/traces/%s", l.client.parent, traceID)
+				e.Trace = fmt.Sprintf("%s/traces/%s", parent, traceID)
 			}
 			if e.SpanID == "" {
 				e.SpanID = spanID
@@ -894,11 +846,19 @@ func (l *Logger) toLogEntry(e Entry) (*logpb.LogEntry, error) {
 			e.TraceSampled = e.TraceSampled || traceSampled
 		}
 	}
+	req, err := fromHTTPRequest(e.HTTPRequest)
+	if err != nil {
+		if client != nil {
+			client.error(err)
+		} else {
+			return nil, err
+		}
+	}
 	ent := &logpb.LogEntry{
 		Timestamp:      ts,
 		Severity:       logtypepb.LogSeverity(e.Severity),
 		InsertId:       e.InsertID,
-		HttpRequest:    fromHTTPRequest(e.HTTPRequest),
+		HttpRequest:    req,
 		Operation:      e.Operation,
 		Labels:         e.Labels,
 		Trace:          e.Trace,

+ 268 - 0
vendor/cloud.google.com/go/logging/resource.go

@@ -0,0 +1,268 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logging
+
+import (
+	"io/ioutil"
+	"os"
+	"strings"
+	"sync"
+
+	"cloud.google.com/go/compute/metadata"
+	mrpb "google.golang.org/genproto/googleapis/api/monitoredres"
+)
+
+// CommonResource sets the monitored resource associated with all log entries
+// written from a Logger. If not provided, the resource is automatically
+// detected based on the running environment (on GCE, GCR, GCF and GAE Standard only).
+// This value can be overridden per-entry by setting an Entry's Resource field.
+func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} }
+
+type commonResource struct{ *mrpb.MonitoredResource }
+
+func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource }
+
+var detectedResource struct {
+	pb   *mrpb.MonitoredResource
+	once sync.Once
+}
+
+// isAppEngine returns true for both standard and flex
+func isAppEngine() bool {
+	_, service := os.LookupEnv("GAE_SERVICE")
+	_, version := os.LookupEnv("GAE_VERSION")
+	_, instance := os.LookupEnv("GAE_INSTANCE")
+
+	return service && version && instance
+}
+
+func detectAppEngineResource() *mrpb.MonitoredResource {
+	projectID, err := metadata.ProjectID()
+	if err != nil {
+		return nil
+	}
+	if projectID == "" {
+		projectID = os.Getenv("GOOGLE_CLOUD_PROJECT")
+	}
+	zone, err := metadata.Zone()
+	if err != nil {
+		return nil
+	}
+
+	return &mrpb.MonitoredResource{
+		Type: "gae_app",
+		Labels: map[string]string{
+			"project_id":  projectID,
+			"module_id":   os.Getenv("GAE_SERVICE"),
+			"version_id":  os.Getenv("GAE_VERSION"),
+			"instance_id": os.Getenv("GAE_INSTANCE"),
+			"runtime":     os.Getenv("GAE_RUNTIME"),
+			"zone":        zone,
+		},
+	}
+}
+
+func isCloudFunction() bool {
+	// Reserved envvars in older function runtimes, e.g. Node.js 8, Python 3.7 and Go 1.11.
+	_, name := os.LookupEnv("FUNCTION_NAME")
+	_, region := os.LookupEnv("FUNCTION_REGION")
+	_, entry := os.LookupEnv("ENTRY_POINT")
+
+	// Reserved envvars in newer function runtimes.
+	_, target := os.LookupEnv("FUNCTION_TARGET")
+	_, signature := os.LookupEnv("FUNCTION_SIGNATURE_TYPE")
+	_, service := os.LookupEnv("K_SERVICE")
+	return (name && region && entry) || (target && signature && service)
+}
+
+func detectCloudFunction() *mrpb.MonitoredResource {
+	projectID, err := metadata.ProjectID()
+	if err != nil {
+		return nil
+	}
+	zone, err := metadata.Zone()
+	if err != nil {
+		return nil
+	}
+	// Newer functions runtimes store name in K_SERVICE.
+	functionName, exists := os.LookupEnv("K_SERVICE")
+	if !exists {
+		functionName, _ = os.LookupEnv("FUNCTION_NAME")
+	}
+	return &mrpb.MonitoredResource{
+		Type: "cloud_function",
+		Labels: map[string]string{
+			"project_id":    projectID,
+			"region":        regionFromZone(zone),
+			"function_name": functionName,
+		},
+	}
+}
+
+func isCloudRun() bool {
+	_, config := os.LookupEnv("K_CONFIGURATION")
+	_, service := os.LookupEnv("K_SERVICE")
+	_, revision := os.LookupEnv("K_REVISION")
+	return config && service && revision
+}
+
+func detectCloudRunResource() *mrpb.MonitoredResource {
+	projectID, err := metadata.ProjectID()
+	if err != nil {
+		return nil
+	}
+	zone, err := metadata.Zone()
+	if err != nil {
+		return nil
+	}
+	return &mrpb.MonitoredResource{
+		Type: "cloud_run_revision",
+		Labels: map[string]string{
+			"project_id":         projectID,
+			"location":           regionFromZone(zone),
+			"service_name":       os.Getenv("K_SERVICE"),
+			"revision_name":      os.Getenv("K_REVISION"),
+			"configuration_name": os.Getenv("K_CONFIGURATION"),
+		},
+	}
+}
+
+func isKubernetesEngine() bool {
+	clusterName, err := metadata.InstanceAttributeValue("cluster-name")
+	// Note: InstanceAttributeValue can return "", nil
+	if err != nil || clusterName == "" {
+		return false
+	}
+	return true
+}
+
+func detectKubernetesResource() *mrpb.MonitoredResource {
+	projectID, err := metadata.ProjectID()
+	if err != nil {
+		return nil
+	}
+	zone, err := metadata.Zone()
+	if err != nil {
+		return nil
+	}
+	clusterName, err := metadata.InstanceAttributeValue("cluster-name")
+	if err != nil {
+		return nil
+	}
+	namespaceBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
+	namespaceName := ""
+	if err == nil {
+		namespaceName = string(namespaceBytes)
+	}
+	return &mrpb.MonitoredResource{
+		Type: "k8s_container",
+		Labels: map[string]string{
+			"cluster_name":   clusterName,
+			"location":       zone,
+			"project_id":     projectID,
+			"pod_name":       os.Getenv("HOSTNAME"),
+			"namespace_name": namespaceName,
+			// To get the `container_name` label, users need to explicitly provide it.
+			"container_name": os.Getenv("CONTAINER_NAME"),
+		},
+	}
+}
+
+func detectGCEResource() *mrpb.MonitoredResource {
+	projectID, err := metadata.ProjectID()
+	if err != nil {
+		return nil
+	}
+	id, err := metadata.InstanceID()
+	if err != nil {
+		return nil
+	}
+	zone, err := metadata.Zone()
+	if err != nil {
+		return nil
+	}
+	name, err := metadata.InstanceName()
+	if err != nil {
+		return nil
+	}
+	return &mrpb.MonitoredResource{
+		Type: "gce_instance",
+		Labels: map[string]string{
+			"project_id":    projectID,
+			"instance_id":   id,
+			"instance_name": name,
+			"zone":          zone,
+		},
+	}
+}
+
+func detectResource() *mrpb.MonitoredResource {
+	detectedResource.once.Do(func() {
+		switch {
+		// AppEngine, Functions, CloudRun, Kubernetes are detected first,
+		// as metadata.OnGCE() erroneously returns true on these runtimes.
+		case isAppEngine():
+			detectedResource.pb = detectAppEngineResource()
+		case isCloudFunction():
+			detectedResource.pb = detectCloudFunction()
+		case isCloudRun():
+			detectedResource.pb = detectCloudRunResource()
+		case isKubernetesEngine():
+			detectedResource.pb = detectKubernetesResource()
+		case metadata.OnGCE():
+			detectedResource.pb = detectGCEResource()
+		}
+	})
+	return detectedResource.pb
+}
+
+var resourceInfo = map[string]struct{ rtype, label string }{
+	"organizations":   {"organization", "organization_id"},
+	"folders":         {"folder", "folder_id"},
+	"projects":        {"project", "project_id"},
+	"billingAccounts": {"billing_account", "account_id"},
+}
+
+func monitoredResource(parent string) *mrpb.MonitoredResource {
+	parts := strings.SplitN(parent, "/", 2)
+	if len(parts) != 2 {
+		return globalResource(parent)
+	}
+	info, ok := resourceInfo[parts[0]]
+	if !ok {
+		return globalResource(parts[1])
+	}
+	return &mrpb.MonitoredResource{
+		Type:   info.rtype,
+		Labels: map[string]string{info.label: parts[1]},
+	}
+}
+
+func regionFromZone(zone string) string {
+	cutoff := strings.LastIndex(zone, "-")
+	if cutoff > 0 {
+		return zone[:cutoff]
+	}
+	return zone
+}
+
+func globalResource(projectID string) *mrpb.MonitoredResource {
+	return &mrpb.MonitoredResource{
+		Type: "global",
+		Labels: map[string]string{
+			"project_id": projectID,
+		},
+	}
+}

+ 236 - 0
vendor/cloud.google.com/go/testing.md

@@ -0,0 +1,236 @@
+# Testing Code that depends on Go Client Libraries
+
+The Go client libraries generated as a part of `cloud.google.com/go` all take
+the approach of returning concrete types instead of interfaces. That way, new
+fields and methods can be added to the libraries without breaking users. This
+document will go over some patterns that can be used to test code that depends
+on the Go client libraries.
+
+## Testing gRPC services using fakes
+
+*Note*: You can see the full
+[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/master/internal/examples/fake).
+
+The clients found in `cloud.google.com/go` are gRPC based, with a couple of
+notable exceptions being the [`storage`](https://pkg.go.dev/cloud.google.com/go/storage)
+and [`bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) clients.
+Interactions with gRPC services can be faked by serving up your own in-memory
+server within your test. One benefit of using this approach is that you don’t
+need to define an interface in your runtime code; you can keep using
+concrete struct types. You instead define a fake server in your test code. For
+example, take a look at the following function:
+
+```go
+import (
+        "context"
+        "fmt"
+        "log"
+        "os"
+
+        translate "cloud.google.com/go/translate/apiv3"
+        "github.com/googleapis/gax-go/v2"
+        translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
+)
+
+func TranslateTextWithConcreteClient(client *translate.TranslationClient, text string, targetLang string) (string, error) {
+        ctx := context.Background()
+        log.Printf("Translating %q to %q", text, targetLang)
+        req := &translatepb.TranslateTextRequest{
+                Parent:             fmt.Sprintf("projects/%s/locations/global", os.Getenv("GOOGLE_CLOUD_PROJECT")),
+                TargetLanguageCode: "en-US",
+                Contents:           []string{text},
+        }
+        resp, err := client.TranslateText(ctx, req)
+        if err != nil {
+                return "", fmt.Errorf("unable to translate text: %v", err)
+        }
+        translations := resp.GetTranslations()
+        if len(translations) != 1 {
+                return "", fmt.Errorf("expected only one result, got %d", len(translations))
+        }
+        return translations[0].TranslatedText, nil
+}
+```
+
+Here is an example of what a fake server implementation would look like for
+faking the interactions above:
+
+```go
+import (
+        "context"
+
+        translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
+)
+
+type fakeTranslationServer struct {
+        translatepb.UnimplementedTranslationServiceServer
+}
+
+func (f *fakeTranslationServer) TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest) (*translatepb.TranslateTextResponse, error) {
+        resp := &translatepb.TranslateTextResponse{
+                Translations: []*translatepb.Translation{
+                        &translatepb.Translation{
+                                TranslatedText: "Hello World",
+                        },
+                },
+        }
+        return resp, nil
+}
+```
+
+All of the generated protobuf code found in [google.golang.org/genproto](https://pkg.go.dev/google.golang.org/genproto)
+contains a similar `package.UnimplmentedFooServer` type that is useful for
+creating fakes. By embedding the unimplemented server in the
+`fakeTranslationServer`, the fake will “inherit” all of the RPCs the server
+exposes. Then, by providing our own `fakeTranslationServer.TranslateText`
+method you can “override” the default unimplemented behavior of the one RPC that
+you would like to be faked.
+
+The test itself does require a little bit of setup: start up a `net.Listener`,
+register the server, and tell the client library to call the server:
+
+```go
+import (
+        "context"
+        "net"
+        "testing"
+
+        translate "cloud.google.com/go/translate/apiv3"
+        "google.golang.org/api/option"
+        translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
+        "google.golang.org/grpc"
+)
+
+func TestTranslateTextWithConcreteClient(t *testing.T) {
+        ctx := context.Background()
+
+        // Setup the fake server.
+        fakeTranslationServer := &fakeTranslationServer{}
+        l, err := net.Listen("tcp", "localhost:0")
+        if err != nil {
+                t.Fatal(err)
+        }
+        gsrv := grpc.NewServer()
+        translatepb.RegisterTranslationServiceServer(gsrv, fakeTranslationServer)
+        fakeServerAddr := l.Addr().String()
+        go func() {
+                if err := gsrv.Serve(l); err != nil {
+                        panic(err)
+                }
+        }()
+
+        // Create a client.
+        client, err := translate.NewTranslationClient(ctx,
+                option.WithEndpoint(fakeServerAddr),
+                option.WithoutAuthentication(),
+                option.WithGRPCDialOption(grpc.WithInsecure()),
+        )
+        if err != nil {
+                t.Fatal(err)
+        }
+
+        // Run the test.
+        text, err := TranslateTextWithConcreteClient(client, "Hola Mundo", "en-US")
+        if err != nil {
+                t.Fatal(err)
+        }
+        if text != "Hello World" {
+                t.Fatalf("got %q, want Hello World", text)
+        }
+}
+```
+
+## Testing using mocks
+
+*Note*: You can see the full
+[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/master/internal/examples/mock).
+
+When mocking code you need to work with interfaces. Let’s create an interface
+for the `cloud.google.com/go/translate/apiv3` client used in the
+`TranslateTextWithConcreteClient` function mentioned in the previous section.
+The `translate.Client` has over a dozen methods but this code only uses one of
+them. Here is an interface that satisfies the interactions of the
+`translate.Client` in this function.
+
+```go
+type TranslationClient interface {
+        TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error)
+}
+```
+
+Now that we have an interface that satisfies the method being used we can
+rewrite the function signature to take the interface instead of the concrete
+type.
+
+```go
+func TranslateTextWithInterfaceClient(client TranslationClient, text string, targetLang string) (string, error) {
+// ...
+}
+```
+
+This allows a real `translate.Client` to be passed to the method in production
+and for a mock implementation to be passed in during testing. This pattern can
+be applied to any Go code, not just `cloud.google.com/go`. This is because
+interfaces in Go are implicitly satisfied. Structs in the client libraries can
+implicitly implement interfaces defined in your codebase. Let’s take a look at
+what it might look like to define a lightweight mock for the `TranslationClient`
+interface.
+
+```go
+import (
+        "context"
+        "testing"
+
+        "github.com/googleapis/gax-go/v2"
+        translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
+)
+
+type mockClient struct{}
+
+func (*mockClient) TranslateText(_ context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) {
+        resp := &translatepb.TranslateTextResponse{
+                Translations: []*translatepb.Translation{
+                        &translatepb.Translation{
+                                TranslatedText: "Hello World",
+                        },
+                },
+        }
+        return resp, nil
+}
+
+func TestTranslateTextWithAbstractClient(t *testing.T) {
+        client := &mockClient{}
+        text, err := TranslateTextWithInterfaceClient(client, "Hola Mundo", "en-US")
+        if err != nil {
+                t.Fatal(err)
+        }
+        if text != "Hello World" {
+                t.Fatalf("got %q, want Hello World", text)
+        }
+}
+```
+
+If you prefer to not write your own mocks there are mocking frameworks such as
+[golang/mock](https://github.com/golang/mock) which can generate mocks for you
+from an interface. As a word of caution though, try to not
+[overuse mocks](https://testing.googleblog.com/2013/05/testing-on-toilet-dont-overuse-mocks.html).
+
+## Testing using emulators
+
+Some of the client libraries provided in `cloud.google.com/go` support running
+against a service emulator. The concept is similar to that of using fakes,
+mentioned above, but the server is managed for you. You just need to start it up
+and instruct the client library to talk to the emulator by setting a service
+specific emulator environment variable. Current services/environment-variables
+are:
+
+- bigtable: `BIGTABLE_EMULATOR_HOST`
+- datastore: `DATASTORE_EMULATOR_HOST`
+- firestore: `FIRESTORE_EMULATOR_HOST`
+- pubsub: `PUBSUB_EMULATOR_HOST`
+- spanner: `SPANNER_EMULATOR_HOST`
+- storage: `STORAGE_EMULATOR_HOST`
+  - Although the storage client supports an emulator environment variable there is no official emulator provided by gcloud.
+
+For more information on emulators please refer to the
+[gcloud documentation](https://cloud.google.com/sdk/gcloud/reference/beta/emulators).

+ 1 - 2
vendor/github.com/coreos/pkg/LICENSE → vendor/github.com/container-storage-interface/spec/LICENSE

@@ -1,4 +1,4 @@
-Apache License
+                                 Apache License
                            Version 2.0, January 2004
                         http://www.apache.org/licenses/
 
@@ -199,4 +199,3 @@ Apache License
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
-

+ 6280 - 0
vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go

@@ -0,0 +1,6280 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/container-storage-interface/spec/csi.proto
+
+package csi
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	timestamp "github.com/golang/protobuf/ptypes/timestamp"
+	wrappers "github.com/golang/protobuf/ptypes/wrappers"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type PluginCapability_Service_Type int32
+
+const (
+	PluginCapability_Service_UNKNOWN PluginCapability_Service_Type = 0
+	// CONTROLLER_SERVICE indicates that the Plugin provides RPCs for
+	// the ControllerService. Plugins SHOULD provide this capability.
+	// In rare cases certain plugins MAY wish to omit the
+	// ControllerService entirely from their implementation, but such
+	// SHOULD NOT be the common case.
+	// The presence of this capability determines whether the CO will
+	// attempt to invoke the REQUIRED ControllerService RPCs, as well
+	// as specific RPCs as indicated by ControllerGetCapabilities.
+	PluginCapability_Service_CONTROLLER_SERVICE PluginCapability_Service_Type = 1
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for
+	// this plugin MAY NOT be equally accessible by all nodes in the
+	// cluster. The CO MUST use the topology information returned by
+	// CreateVolumeRequest along with the topology information
+	// returned by NodeGetInfo to ensure that a given volume is
+	// accessible from a given node when scheduling workloads.
+	PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS PluginCapability_Service_Type = 2
+)
+
+var PluginCapability_Service_Type_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "CONTROLLER_SERVICE",
+	2: "VOLUME_ACCESSIBILITY_CONSTRAINTS",
+}
+
+var PluginCapability_Service_Type_value = map[string]int32{
+	"UNKNOWN":                          0,
+	"CONTROLLER_SERVICE":               1,
+	"VOLUME_ACCESSIBILITY_CONSTRAINTS": 2,
+}
+
+func (x PluginCapability_Service_Type) String() string {
+	return proto.EnumName(PluginCapability_Service_Type_name, int32(x))
+}
+
+func (PluginCapability_Service_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{4, 0, 0}
+}
+
+type PluginCapability_VolumeExpansion_Type int32
+
+const (
+	PluginCapability_VolumeExpansion_UNKNOWN PluginCapability_VolumeExpansion_Type = 0
+	// ONLINE indicates that volumes may be expanded when published to
+	// a node. When a Plugin implements this capability it MUST
+	// implement either the EXPAND_VOLUME controller capability or the
+	// EXPAND_VOLUME node capability or both. When a plugin supports
+	// ONLINE volume expansion and also has the EXPAND_VOLUME
+	// controller capability then the plugin MUST support expansion of
+	// volumes currently published and available on a node. When a
+	// plugin supports ONLINE volume expansion and also has the
+	// EXPAND_VOLUME node capability then the plugin MAY support
+	// expansion of node-published volume via NodeExpandVolume.
+	//
+	// Example 1: Given a shared filesystem volume (e.g. GlusterFs),
+	//   the Plugin may set the ONLINE volume expansion capability and
+	//   implement ControllerExpandVolume but not NodeExpandVolume.
+	//
+	// Example 2: Given a block storage volume type (e.g. EBS), the
+	//   Plugin may set the ONLINE volume expansion capability and
+	//   implement both ControllerExpandVolume and NodeExpandVolume.
+	//
+	// Example 3: Given a Plugin that supports volume expansion only
+	//   upon a node, the Plugin may set the ONLINE volume
+	//   expansion capability and implement NodeExpandVolume but not
+	//   ControllerExpandVolume.
+	PluginCapability_VolumeExpansion_ONLINE PluginCapability_VolumeExpansion_Type = 1
+	// OFFLINE indicates that volumes currently published and
+	// available on a node SHALL NOT be expanded via
+	// ControllerExpandVolume. When a plugin supports OFFLINE volume
+	// expansion it MUST implement either the EXPAND_VOLUME controller
+	// capability or both the EXPAND_VOLUME controller capability and
+	// the EXPAND_VOLUME node capability.
+	//
+	// Example 1: Given a block storage volume type (e.g. Azure Disk)
+	//   that does not support expansion of "node-attached" (i.e.
+	//   controller-published) volumes, the Plugin may indicate
+	//   OFFLINE volume expansion support and implement both
+	//   ControllerExpandVolume and NodeExpandVolume.
+	PluginCapability_VolumeExpansion_OFFLINE PluginCapability_VolumeExpansion_Type = 2
+)
+
+var PluginCapability_VolumeExpansion_Type_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "ONLINE",
+	2: "OFFLINE",
+}
+
+var PluginCapability_VolumeExpansion_Type_value = map[string]int32{
+	"UNKNOWN": 0,
+	"ONLINE":  1,
+	"OFFLINE": 2,
+}
+
+func (x PluginCapability_VolumeExpansion_Type) String() string {
+	return proto.EnumName(PluginCapability_VolumeExpansion_Type_name, int32(x))
+}
+
+func (PluginCapability_VolumeExpansion_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{4, 1, 0}
+}
+
+type VolumeCapability_AccessMode_Mode int32
+
+const (
+	VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0
+	// Can only be published once as read/write on a single node, at
+	// any given time.
+	VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1
+	// Can only be published once as readonly on a single node, at
+	// any given time.
+	VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2
+	// Can be published as readonly at multiple nodes simultaneously.
+	VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3
+	// Can be published at multiple nodes simultaneously. Only one of
+	// the node can be used as read/write. The rest will be readonly.
+	VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4
+	// Can be published as read/write at multiple nodes
+	// simultaneously.
+	VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5
+	// Can only be published once as read/write at a single workload
+	// on a single node, at any given time. SHOULD be used instead of
+	// SINGLE_NODE_WRITER for COs using the experimental
+	// SINGLE_NODE_MULTI_WRITER capability.
+	VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 6
+	// Can be published as read/write at multiple workloads on a
+	// single node simultaneously. SHOULD be used instead of
+	// SINGLE_NODE_WRITER for COs using the experimental
+	// SINGLE_NODE_MULTI_WRITER capability.
+	VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 7
+)
+
+var VolumeCapability_AccessMode_Mode_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "SINGLE_NODE_WRITER",
+	2: "SINGLE_NODE_READER_ONLY",
+	3: "MULTI_NODE_READER_ONLY",
+	4: "MULTI_NODE_SINGLE_WRITER",
+	5: "MULTI_NODE_MULTI_WRITER",
+	6: "SINGLE_NODE_SINGLE_WRITER",
+	7: "SINGLE_NODE_MULTI_WRITER",
+}
+
+var VolumeCapability_AccessMode_Mode_value = map[string]int32{
+	"UNKNOWN":                   0,
+	"SINGLE_NODE_WRITER":        1,
+	"SINGLE_NODE_READER_ONLY":   2,
+	"MULTI_NODE_READER_ONLY":    3,
+	"MULTI_NODE_SINGLE_WRITER":  4,
+	"MULTI_NODE_MULTI_WRITER":   5,
+	"SINGLE_NODE_SINGLE_WRITER": 6,
+	"SINGLE_NODE_MULTI_WRITER":  7,
+}
+
+func (x VolumeCapability_AccessMode_Mode) String() string {
+	return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x))
+}
+
+func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{10, 2, 0}
+}
+
+type ControllerServiceCapability_RPC_Type int32
+
+const (
+	ControllerServiceCapability_RPC_UNKNOWN                  ControllerServiceCapability_RPC_Type = 0
+	ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME     ControllerServiceCapability_RPC_Type = 1
+	ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2
+	ControllerServiceCapability_RPC_LIST_VOLUMES             ControllerServiceCapability_RPC_Type = 3
+	ControllerServiceCapability_RPC_GET_CAPACITY             ControllerServiceCapability_RPC_Type = 4
+	// Currently the only way to consume a snapshot is to create
+	// a volume from it. Therefore plugins supporting
+	// CREATE_DELETE_SNAPSHOT MUST support creating volume from
+	// snapshot.
+	ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT ControllerServiceCapability_RPC_Type = 5
+	ControllerServiceCapability_RPC_LIST_SNAPSHOTS         ControllerServiceCapability_RPC_Type = 6
+	// Plugins supporting volume cloning at the storage level MAY
+	// report this capability. The source volume MUST be managed by
+	// the same plugin. Not all volume sources and parameters
+	// combinations MAY work.
+	ControllerServiceCapability_RPC_CLONE_VOLUME ControllerServiceCapability_RPC_Type = 7
+	// Indicates the SP supports ControllerPublishVolume.readonly
+	// field.
+	ControllerServiceCapability_RPC_PUBLISH_READONLY ControllerServiceCapability_RPC_Type = 8
+	// See VolumeExpansion for details.
+	ControllerServiceCapability_RPC_EXPAND_VOLUME ControllerServiceCapability_RPC_Type = 9
+	// Indicates the SP supports the
+	// ListVolumesResponse.entry.published_nodes field
+	ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES ControllerServiceCapability_RPC_Type = 10
+	// Indicates that the Controller service can report volume
+	// conditions.
+	// An SP MAY implement `VolumeCondition` in only the Controller
+	// Plugin, only the Node Plugin, or both.
+	// If `VolumeCondition` is implemented in both the Controller and
+	// Node Plugins, it SHALL report from different perspectives.
+	// If for some reason Controller and Node Plugins report
+	// misaligned volume conditions, CO SHALL assume the worst case
+	// is the truth.
+	// Note that, for alpha, `VolumeCondition` is intended be
+	// informative for humans only, not for automation.
+	ControllerServiceCapability_RPC_VOLUME_CONDITION ControllerServiceCapability_RPC_Type = 11
+	// Indicates the SP supports the ControllerGetVolume RPC.
+	// This enables COs to, for example, fetch per volume
+	// condition after a volume is provisioned.
+	ControllerServiceCapability_RPC_GET_VOLUME ControllerServiceCapability_RPC_Type = 12
+	// Indicates the SP supports the SINGLE_NODE_SINGLE_WRITER and/or
+	// SINGLE_NODE_MULTI_WRITER access modes.
+	// These access modes are intended to replace the
+	// SINGLE_NODE_WRITER access mode to clarify the number of writers
+	// for a volume on a single node. Plugins MUST accept and allow
+	// use of the SINGLE_NODE_WRITER access mode when either
+	// SINGLE_NODE_SINGLE_WRITER and/or SINGLE_NODE_MULTI_WRITER are
+	// supported, in order to permit older COs to continue working.
+	ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER ControllerServiceCapability_RPC_Type = 13
+)
+
+var ControllerServiceCapability_RPC_Type_name = map[int32]string{
+	0:  "UNKNOWN",
+	1:  "CREATE_DELETE_VOLUME",
+	2:  "PUBLISH_UNPUBLISH_VOLUME",
+	3:  "LIST_VOLUMES",
+	4:  "GET_CAPACITY",
+	5:  "CREATE_DELETE_SNAPSHOT",
+	6:  "LIST_SNAPSHOTS",
+	7:  "CLONE_VOLUME",
+	8:  "PUBLISH_READONLY",
+	9:  "EXPAND_VOLUME",
+	10: "LIST_VOLUMES_PUBLISHED_NODES",
+	11: "VOLUME_CONDITION",
+	12: "GET_VOLUME",
+	13: "SINGLE_NODE_MULTI_WRITER",
+}
+
+var ControllerServiceCapability_RPC_Type_value = map[string]int32{
+	"UNKNOWN":                      0,
+	"CREATE_DELETE_VOLUME":         1,
+	"PUBLISH_UNPUBLISH_VOLUME":     2,
+	"LIST_VOLUMES":                 3,
+	"GET_CAPACITY":                 4,
+	"CREATE_DELETE_SNAPSHOT":       5,
+	"LIST_SNAPSHOTS":               6,
+	"CLONE_VOLUME":                 7,
+	"PUBLISH_READONLY":             8,
+	"EXPAND_VOLUME":                9,
+	"LIST_VOLUMES_PUBLISHED_NODES": 10,
+	"VOLUME_CONDITION":             11,
+	"GET_VOLUME":                   12,
+	"SINGLE_NODE_MULTI_WRITER":     13,
+}
+
+func (x ControllerServiceCapability_RPC_Type) String() string {
+	return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x))
+}
+
+func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{31, 0, 0}
+}
+
+type VolumeUsage_Unit int32
+
+const (
+	VolumeUsage_UNKNOWN VolumeUsage_Unit = 0
+	VolumeUsage_BYTES   VolumeUsage_Unit = 1
+	VolumeUsage_INODES  VolumeUsage_Unit = 2
+)
+
+var VolumeUsage_Unit_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "BYTES",
+	2: "INODES",
+}
+
+var VolumeUsage_Unit_value = map[string]int32{
+	"UNKNOWN": 0,
+	"BYTES":   1,
+	"INODES":  2,
+}
+
+func (x VolumeUsage_Unit) String() string {
+	return proto.EnumName(VolumeUsage_Unit_name, int32(x))
+}
+
+func (VolumeUsage_Unit) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{51, 0}
+}
+
+type NodeServiceCapability_RPC_Type int32
+
+const (
+	NodeServiceCapability_RPC_UNKNOWN              NodeServiceCapability_RPC_Type = 0
+	NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME NodeServiceCapability_RPC_Type = 1
+	// If Plugin implements GET_VOLUME_STATS capability
+	// then it MUST implement NodeGetVolumeStats RPC
+	// call for fetching volume statistics.
+	NodeServiceCapability_RPC_GET_VOLUME_STATS NodeServiceCapability_RPC_Type = 2
+	// See VolumeExpansion for details.
+	NodeServiceCapability_RPC_EXPAND_VOLUME NodeServiceCapability_RPC_Type = 3
+	// Indicates that the Node service can report volume conditions.
+	// An SP MAY implement `VolumeCondition` in only the Node
+	// Plugin, only the Controller Plugin, or both.
+	// If `VolumeCondition` is implemented in both the Node and
+	// Controller Plugins, it SHALL report from different
+	// perspectives.
+	// If for some reason Node and Controller Plugins report
+	// misaligned volume conditions, CO SHALL assume the worst case
+	// is the truth.
+	// Note that, for alpha, `VolumeCondition` is intended to be
+	// informative for humans only, not for automation.
+	NodeServiceCapability_RPC_VOLUME_CONDITION NodeServiceCapability_RPC_Type = 4
+	// Indicates the SP supports the SINGLE_NODE_SINGLE_WRITER and/or
+	// SINGLE_NODE_MULTI_WRITER access modes.
+	// These access modes are intended to replace the
+	// SINGLE_NODE_WRITER access mode to clarify the number of writers
+	// for a volume on a single node. Plugins MUST accept and allow
+	// use of the SINGLE_NODE_WRITER access mode (subject to the
+	// processing rules for NodePublishVolume), when either
+	// SINGLE_NODE_SINGLE_WRITER and/or SINGLE_NODE_MULTI_WRITER are
+	// supported, in order to permit older COs to continue working.
+	NodeServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER NodeServiceCapability_RPC_Type = 5
+	// Indicates that Node service supports mounting volumes
+	// with provided volume group identifier during node stage
+	// or node publish RPC calls.
+	NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP NodeServiceCapability_RPC_Type = 6
+)
+
+var NodeServiceCapability_RPC_Type_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "STAGE_UNSTAGE_VOLUME",
+	2: "GET_VOLUME_STATS",
+	3: "EXPAND_VOLUME",
+	4: "VOLUME_CONDITION",
+	5: "SINGLE_NODE_MULTI_WRITER",
+	6: "VOLUME_MOUNT_GROUP",
+}
+
+var NodeServiceCapability_RPC_Type_value = map[string]int32{
+	"UNKNOWN":                  0,
+	"STAGE_UNSTAGE_VOLUME":     1,
+	"GET_VOLUME_STATS":         2,
+	"EXPAND_VOLUME":            3,
+	"VOLUME_CONDITION":         4,
+	"SINGLE_NODE_MULTI_WRITER": 5,
+	"VOLUME_MOUNT_GROUP":       6,
+}
+
+func (x NodeServiceCapability_RPC_Type) String() string {
+	return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x))
+}
+
+func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{55, 0, 0}
+}
+
+type GetPluginInfoRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GetPluginInfoRequest) Reset()         { *m = GetPluginInfoRequest{} }
+func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPluginInfoRequest) ProtoMessage()    {}
+func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{0}
+}
+
+func (m *GetPluginInfoRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetPluginInfoRequest.Unmarshal(m, b)
+}
+func (m *GetPluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetPluginInfoRequest.Marshal(b, m, deterministic)
+}
+func (m *GetPluginInfoRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetPluginInfoRequest.Merge(m, src)
+}
+func (m *GetPluginInfoRequest) XXX_Size() int {
+	return xxx_messageInfo_GetPluginInfoRequest.Size(m)
+}
+func (m *GetPluginInfoRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetPluginInfoRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetPluginInfoRequest proto.InternalMessageInfo
+
+type GetPluginInfoResponse struct {
+	// The name MUST follow domain name notation format
+	// (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD
+	// include the plugin's host company name and the plugin name,
+	// to minimize the possibility of collisions. It MUST be 63
+	// characters or less, beginning and ending with an alphanumeric
+	// character ([a-z0-9A-Z]) with dashes (-), dots (.), and
+	// alphanumerics between. This field is REQUIRED.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// This field is REQUIRED. Value of this field is opaque to the CO.
+	VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion,proto3" json:"vendor_version,omitempty"`
+	// This field is OPTIONAL. Values are opaque to the CO.
+	Manifest             map[string]string `protobuf:"bytes,3,rep,name=manifest,proto3" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *GetPluginInfoResponse) Reset()         { *m = GetPluginInfoResponse{} }
+func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) }
+func (*GetPluginInfoResponse) ProtoMessage()    {}
+func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{1}
+}
+
+func (m *GetPluginInfoResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetPluginInfoResponse.Unmarshal(m, b)
+}
+func (m *GetPluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetPluginInfoResponse.Marshal(b, m, deterministic)
+}
+func (m *GetPluginInfoResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetPluginInfoResponse.Merge(m, src)
+}
+func (m *GetPluginInfoResponse) XXX_Size() int {
+	return xxx_messageInfo_GetPluginInfoResponse.Size(m)
+}
+func (m *GetPluginInfoResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetPluginInfoResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetPluginInfoResponse proto.InternalMessageInfo
+
+func (m *GetPluginInfoResponse) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *GetPluginInfoResponse) GetVendorVersion() string {
+	if m != nil {
+		return m.VendorVersion
+	}
+	return ""
+}
+
+func (m *GetPluginInfoResponse) GetManifest() map[string]string {
+	if m != nil {
+		return m.Manifest
+	}
+	return nil
+}
+
+type GetPluginCapabilitiesRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GetPluginCapabilitiesRequest) Reset()         { *m = GetPluginCapabilitiesRequest{} }
+func (m *GetPluginCapabilitiesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPluginCapabilitiesRequest) ProtoMessage()    {}
+func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{2}
+}
+
+func (m *GetPluginCapabilitiesRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetPluginCapabilitiesRequest.Unmarshal(m, b)
+}
+func (m *GetPluginCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetPluginCapabilitiesRequest.Marshal(b, m, deterministic)
+}
+func (m *GetPluginCapabilitiesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetPluginCapabilitiesRequest.Merge(m, src)
+}
+func (m *GetPluginCapabilitiesRequest) XXX_Size() int {
+	return xxx_messageInfo_GetPluginCapabilitiesRequest.Size(m)
+}
+func (m *GetPluginCapabilitiesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetPluginCapabilitiesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetPluginCapabilitiesRequest proto.InternalMessageInfo
+
+type GetPluginCapabilitiesResponse struct {
+	// All the capabilities that the controller service supports. This
+	// field is OPTIONAL.
+	Capabilities         []*PluginCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *GetPluginCapabilitiesResponse) Reset()         { *m = GetPluginCapabilitiesResponse{} }
+func (m *GetPluginCapabilitiesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetPluginCapabilitiesResponse) ProtoMessage()    {}
+func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{3}
+}
+
+func (m *GetPluginCapabilitiesResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetPluginCapabilitiesResponse.Unmarshal(m, b)
+}
+func (m *GetPluginCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetPluginCapabilitiesResponse.Marshal(b, m, deterministic)
+}
+func (m *GetPluginCapabilitiesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetPluginCapabilitiesResponse.Merge(m, src)
+}
+func (m *GetPluginCapabilitiesResponse) XXX_Size() int {
+	return xxx_messageInfo_GetPluginCapabilitiesResponse.Size(m)
+}
+func (m *GetPluginCapabilitiesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetPluginCapabilitiesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetPluginCapabilitiesResponse proto.InternalMessageInfo
+
+func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability {
+	if m != nil {
+		return m.Capabilities
+	}
+	return nil
+}
+
+// Specifies a capability of the plugin.
+type PluginCapability struct {
+	// Types that are valid to be assigned to Type:
+	//	*PluginCapability_Service_
+	//	*PluginCapability_VolumeExpansion_
+	Type                 isPluginCapability_Type `protobuf_oneof:"type"`
+	XXX_NoUnkeyedLiteral struct{}                `json:"-"`
+	XXX_unrecognized     []byte                  `json:"-"`
+	XXX_sizecache        int32                   `json:"-"`
+}
+
+func (m *PluginCapability) Reset()         { *m = PluginCapability{} }
+func (m *PluginCapability) String() string { return proto.CompactTextString(m) }
+func (*PluginCapability) ProtoMessage()    {}
+func (*PluginCapability) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{4}
+}
+
+func (m *PluginCapability) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PluginCapability.Unmarshal(m, b)
+}
+func (m *PluginCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PluginCapability.Marshal(b, m, deterministic)
+}
+func (m *PluginCapability) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PluginCapability.Merge(m, src)
+}
+func (m *PluginCapability) XXX_Size() int {
+	return xxx_messageInfo_PluginCapability.Size(m)
+}
+func (m *PluginCapability) XXX_DiscardUnknown() {
+	xxx_messageInfo_PluginCapability.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PluginCapability proto.InternalMessageInfo
+
+type isPluginCapability_Type interface {
+	isPluginCapability_Type()
+}
+
+type PluginCapability_Service_ struct {
+	Service *PluginCapability_Service `protobuf:"bytes,1,opt,name=service,proto3,oneof"`
+}
+
+type PluginCapability_VolumeExpansion_ struct {
+	VolumeExpansion *PluginCapability_VolumeExpansion `protobuf:"bytes,2,opt,name=volume_expansion,json=volumeExpansion,proto3,oneof"`
+}
+
+func (*PluginCapability_Service_) isPluginCapability_Type() {}
+
+func (*PluginCapability_VolumeExpansion_) isPluginCapability_Type() {}
+
+func (m *PluginCapability) GetType() isPluginCapability_Type {
+	if m != nil {
+		return m.Type
+	}
+	return nil
+}
+
+func (m *PluginCapability) GetService() *PluginCapability_Service {
+	if x, ok := m.GetType().(*PluginCapability_Service_); ok {
+		return x.Service
+	}
+	return nil
+}
+
+func (m *PluginCapability) GetVolumeExpansion() *PluginCapability_VolumeExpansion {
+	if x, ok := m.GetType().(*PluginCapability_VolumeExpansion_); ok {
+		return x.VolumeExpansion
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*PluginCapability) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*PluginCapability_Service_)(nil),
+		(*PluginCapability_VolumeExpansion_)(nil),
+	}
+}
+
+type PluginCapability_Service struct {
+	Type                 PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_Service_Type" json:"type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
+	XXX_unrecognized     []byte                        `json:"-"`
+	XXX_sizecache        int32                         `json:"-"`
+}
+
+func (m *PluginCapability_Service) Reset()         { *m = PluginCapability_Service{} }
+func (m *PluginCapability_Service) String() string { return proto.CompactTextString(m) }
+func (*PluginCapability_Service) ProtoMessage()    {}
+func (*PluginCapability_Service) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{4, 0}
+}
+
+func (m *PluginCapability_Service) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PluginCapability_Service.Unmarshal(m, b)
+}
+func (m *PluginCapability_Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PluginCapability_Service.Marshal(b, m, deterministic)
+}
+func (m *PluginCapability_Service) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PluginCapability_Service.Merge(m, src)
+}
+func (m *PluginCapability_Service) XXX_Size() int {
+	return xxx_messageInfo_PluginCapability_Service.Size(m)
+}
+func (m *PluginCapability_Service) XXX_DiscardUnknown() {
+	xxx_messageInfo_PluginCapability_Service.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PluginCapability_Service proto.InternalMessageInfo
+
+func (m *PluginCapability_Service) GetType() PluginCapability_Service_Type {
+	if m != nil {
+		return m.Type
+	}
+	return PluginCapability_Service_UNKNOWN
+}
+
+type PluginCapability_VolumeExpansion struct {
+	Type                 PluginCapability_VolumeExpansion_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_VolumeExpansion_Type" json:"type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                              `json:"-"`
+	XXX_unrecognized     []byte                                `json:"-"`
+	XXX_sizecache        int32                                 `json:"-"`
+}
+
+func (m *PluginCapability_VolumeExpansion) Reset()         { *m = PluginCapability_VolumeExpansion{} }
+func (m *PluginCapability_VolumeExpansion) String() string { return proto.CompactTextString(m) }
+func (*PluginCapability_VolumeExpansion) ProtoMessage()    {}
+func (*PluginCapability_VolumeExpansion) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{4, 1}
+}
+
+func (m *PluginCapability_VolumeExpansion) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PluginCapability_VolumeExpansion.Unmarshal(m, b)
+}
+func (m *PluginCapability_VolumeExpansion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PluginCapability_VolumeExpansion.Marshal(b, m, deterministic)
+}
+func (m *PluginCapability_VolumeExpansion) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PluginCapability_VolumeExpansion.Merge(m, src)
+}
+func (m *PluginCapability_VolumeExpansion) XXX_Size() int {
+	return xxx_messageInfo_PluginCapability_VolumeExpansion.Size(m)
+}
+func (m *PluginCapability_VolumeExpansion) XXX_DiscardUnknown() {
+	xxx_messageInfo_PluginCapability_VolumeExpansion.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PluginCapability_VolumeExpansion proto.InternalMessageInfo
+
+func (m *PluginCapability_VolumeExpansion) GetType() PluginCapability_VolumeExpansion_Type {
+	if m != nil {
+		return m.Type
+	}
+	return PluginCapability_VolumeExpansion_UNKNOWN
+}
+
+type ProbeRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ProbeRequest) Reset()         { *m = ProbeRequest{} }
+func (m *ProbeRequest) String() string { return proto.CompactTextString(m) }
+func (*ProbeRequest) ProtoMessage()    {}
+func (*ProbeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{5}
+}
+
+func (m *ProbeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ProbeRequest.Unmarshal(m, b)
+}
+func (m *ProbeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ProbeRequest.Marshal(b, m, deterministic)
+}
+func (m *ProbeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ProbeRequest.Merge(m, src)
+}
+func (m *ProbeRequest) XXX_Size() int {
+	return xxx_messageInfo_ProbeRequest.Size(m)
+}
+func (m *ProbeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ProbeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProbeRequest proto.InternalMessageInfo
+
+type ProbeResponse struct {
+	// Readiness allows a plugin to report its initialization status back
+	// to the CO. Initialization for some plugins MAY be time consuming
+	// and it is important for a CO to distinguish between the following
+	// cases:
+	//
+	// 1) The plugin is in an unhealthy state and MAY need restarting. In
+	//    this case a gRPC error code SHALL be returned.
+	// 2) The plugin is still initializing, but is otherwise perfectly
+	//    healthy. In this case a successful response SHALL be returned
+	//    with a readiness value of `false`. Calls to the plugin's
+	//    Controller and/or Node services MAY fail due to an incomplete
+	//    initialization state.
+	// 3) The plugin has finished initializing and is ready to service
+	//    calls to its Controller and/or Node services. A successful
+	//    response is returned with a readiness value of `true`.
+	//
+	// This field is OPTIONAL. If not present, the caller SHALL assume
+	// that the plugin is in a ready state and is accepting calls to its
+	// Controller and/or Node services (according to the plugin's reported
+	// capabilities).
+	Ready                *wrappers.BoolValue `protobuf:"bytes,1,opt,name=ready,proto3" json:"ready,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *ProbeResponse) Reset()         { *m = ProbeResponse{} }
+func (m *ProbeResponse) String() string { return proto.CompactTextString(m) }
+func (*ProbeResponse) ProtoMessage()    {}
+func (*ProbeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{6}
+}
+
+func (m *ProbeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ProbeResponse.Unmarshal(m, b)
+}
+func (m *ProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ProbeResponse.Marshal(b, m, deterministic)
+}
+func (m *ProbeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ProbeResponse.Merge(m, src)
+}
+func (m *ProbeResponse) XXX_Size() int {
+	return xxx_messageInfo_ProbeResponse.Size(m)
+}
+func (m *ProbeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ProbeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProbeResponse proto.InternalMessageInfo
+
+func (m *ProbeResponse) GetReady() *wrappers.BoolValue {
+	if m != nil {
+		return m.Ready
+	}
+	return nil
+}
+
+type CreateVolumeRequest struct {
+	// The suggested name for the storage space. This field is REQUIRED.
+	// It serves two purposes:
+	// 1) Idempotency - This name is generated by the CO to achieve
+	//    idempotency.  The Plugin SHOULD ensure that multiple
+	//    `CreateVolume` calls for the same name do not result in more
+	//    than one piece of storage provisioned corresponding to that
+	//    name. If a Plugin is unable to enforce idempotency, the CO's
+	//    error recovery logic could result in multiple (unused) volumes
+	//    being provisioned.
+	//    In the case of error, the CO MUST handle the gRPC error codes
+	//    per the recovery behavior defined in the "CreateVolume Errors"
+	//    section below.
+	//    The CO is responsible for cleaning up volumes it provisioned
+	//    that it no longer needs. If the CO is uncertain whether a volume
+	//    was provisioned or not when a `CreateVolume` call fails, the CO
+	//    MAY call `CreateVolume` again, with the same name, to ensure the
+	//    volume exists and to retrieve the volume's `volume_id` (unless
+	//    otherwise prohibited by "CreateVolume Errors").
+	// 2) Suggested name - Some storage systems allow callers to specify
+	//    an identifier by which to refer to the newly provisioned
+	//    storage. If a storage system supports this, it can optionally
+	//    use this name as the identifier for the new volume.
+	// Any Unicode string that conforms to the length limit is allowed
+	// except those containing the following banned characters:
+	// U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F.
+	// (These are control characters other than commonly used whitespace.)
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// This field is OPTIONAL. This allows the CO to specify the capacity
+	// requirement of the volume to be provisioned. If not specified, the
+	// Plugin MAY choose an implementation-defined capacity range. If
+	// specified it MUST always be honored, even when creating volumes
+	// from a source; which MAY force some backends to internally extend
+	// the volume after creating it.
+	CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"`
+	// The capabilities that the provisioned volume MUST have. SP MUST
+	// provision a volume that will satisfy ALL of the capabilities
+	// specified in this list. Otherwise SP MUST return the appropriate
+	// gRPC error code.
+	// The Plugin MUST assume that the CO MAY use the provisioned volume
+	// with ANY of the capabilities specified in this list.
+	// For example, a CO MAY specify two volume capabilities: one with
+	// access mode SINGLE_NODE_WRITER and another with access mode
+	// MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the
+	// provisioned volume can be used in either mode.
+	// This also enables the CO to do early validation: If ANY of the
+	// specified volume capabilities are not supported by the SP, the call
+	// MUST return the appropriate gRPC error code.
+	// This field is REQUIRED.
+	VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"`
+	// Plugin specific parameters passed in as opaque key-value pairs.
+	// This field is OPTIONAL. The Plugin is responsible for parsing and
+	// validating these parameters. COs will treat these as opaque.
+	Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Secrets required by plugin to complete volume creation request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// If specified, the new volume will be pre-populated with data from
+	// this source. This field is OPTIONAL.
+	VolumeContentSource *VolumeContentSource `protobuf:"bytes,6,opt,name=volume_content_source,json=volumeContentSource,proto3" json:"volume_content_source,omitempty"`
+	// Specifies where (regions, zones, racks, etc.) the provisioned
+	// volume MUST be accessible from.
+	// An SP SHALL advertise the requirements for topological
+	// accessibility information in documentation. COs SHALL only specify
+	// topological accessibility information supported by the SP.
+	// This field is OPTIONAL.
+	// This field SHALL NOT be specified unless the SP has the
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability.
+	// If this field is not specified and the SP has the
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY
+	// choose where the provisioned volume is accessible from.
+	AccessibilityRequirements *TopologyRequirement `protobuf:"bytes,7,opt,name=accessibility_requirements,json=accessibilityRequirements,proto3" json:"accessibility_requirements,omitempty"`
+	XXX_NoUnkeyedLiteral      struct{}             `json:"-"`
+	XXX_unrecognized          []byte               `json:"-"`
+	XXX_sizecache             int32                `json:"-"`
+}
+
+func (m *CreateVolumeRequest) Reset()         { *m = CreateVolumeRequest{} }
+func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateVolumeRequest) ProtoMessage()    {}
+func (*CreateVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{7}
+}
+
+func (m *CreateVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CreateVolumeRequest.Unmarshal(m, b)
+}
+func (m *CreateVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CreateVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateVolumeRequest.Merge(m, src)
+}
+func (m *CreateVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_CreateVolumeRequest.Size(m)
+}
+func (m *CreateVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateVolumeRequest proto.InternalMessageInfo
+
+func (m *CreateVolumeRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange {
+	if m != nil {
+		return m.CapacityRange
+	}
+	return nil
+}
+
+func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability {
+	if m != nil {
+		return m.VolumeCapabilities
+	}
+	return nil
+}
+
+func (m *CreateVolumeRequest) GetParameters() map[string]string {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+func (m *CreateVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+func (m *CreateVolumeRequest) GetVolumeContentSource() *VolumeContentSource {
+	if m != nil {
+		return m.VolumeContentSource
+	}
+	return nil
+}
+
+func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequirement {
+	if m != nil {
+		return m.AccessibilityRequirements
+	}
+	return nil
+}
+
+// Specifies what source the volume will be created from. One of the
+// type fields MUST be specified.
+type VolumeContentSource struct {
+	// Types that are valid to be assigned to Type:
+	//	*VolumeContentSource_Snapshot
+	//	*VolumeContentSource_Volume
+	Type                 isVolumeContentSource_Type `protobuf_oneof:"type"`
+	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
+	XXX_unrecognized     []byte                     `json:"-"`
+	XXX_sizecache        int32                      `json:"-"`
+}
+
+func (m *VolumeContentSource) Reset()         { *m = VolumeContentSource{} }
+func (m *VolumeContentSource) String() string { return proto.CompactTextString(m) }
+func (*VolumeContentSource) ProtoMessage()    {}
+func (*VolumeContentSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{8}
+}
+
+func (m *VolumeContentSource) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeContentSource.Unmarshal(m, b)
+}
+func (m *VolumeContentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeContentSource.Marshal(b, m, deterministic)
+}
+func (m *VolumeContentSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeContentSource.Merge(m, src)
+}
+func (m *VolumeContentSource) XXX_Size() int {
+	return xxx_messageInfo_VolumeContentSource.Size(m)
+}
+func (m *VolumeContentSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeContentSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeContentSource proto.InternalMessageInfo
+
+type isVolumeContentSource_Type interface {
+	isVolumeContentSource_Type()
+}
+
+type VolumeContentSource_Snapshot struct {
+	Snapshot *VolumeContentSource_SnapshotSource `protobuf:"bytes,1,opt,name=snapshot,proto3,oneof"`
+}
+
+type VolumeContentSource_Volume struct {
+	Volume *VolumeContentSource_VolumeSource `protobuf:"bytes,2,opt,name=volume,proto3,oneof"`
+}
+
+func (*VolumeContentSource_Snapshot) isVolumeContentSource_Type() {}
+
+func (*VolumeContentSource_Volume) isVolumeContentSource_Type() {}
+
+func (m *VolumeContentSource) GetType() isVolumeContentSource_Type {
+	if m != nil {
+		return m.Type
+	}
+	return nil
+}
+
+func (m *VolumeContentSource) GetSnapshot() *VolumeContentSource_SnapshotSource {
+	if x, ok := m.GetType().(*VolumeContentSource_Snapshot); ok {
+		return x.Snapshot
+	}
+	return nil
+}
+
+func (m *VolumeContentSource) GetVolume() *VolumeContentSource_VolumeSource {
+	if x, ok := m.GetType().(*VolumeContentSource_Volume); ok {
+		return x.Volume
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*VolumeContentSource) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*VolumeContentSource_Snapshot)(nil),
+		(*VolumeContentSource_Volume)(nil),
+	}
+}
+
+type VolumeContentSource_SnapshotSource struct {
+	// Contains identity information for the existing source snapshot.
+	// This field is REQUIRED. Plugin is REQUIRED to support creating
+	// volume from snapshot if it supports the capability
+	// CREATE_DELETE_SNAPSHOT.
+	SnapshotId           string   `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *VolumeContentSource_SnapshotSource) Reset()         { *m = VolumeContentSource_SnapshotSource{} }
+func (m *VolumeContentSource_SnapshotSource) String() string { return proto.CompactTextString(m) }
+func (*VolumeContentSource_SnapshotSource) ProtoMessage()    {}
+func (*VolumeContentSource_SnapshotSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{8, 0}
+}
+
+func (m *VolumeContentSource_SnapshotSource) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeContentSource_SnapshotSource.Unmarshal(m, b)
+}
+func (m *VolumeContentSource_SnapshotSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeContentSource_SnapshotSource.Marshal(b, m, deterministic)
+}
+func (m *VolumeContentSource_SnapshotSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeContentSource_SnapshotSource.Merge(m, src)
+}
+func (m *VolumeContentSource_SnapshotSource) XXX_Size() int {
+	return xxx_messageInfo_VolumeContentSource_SnapshotSource.Size(m)
+}
+func (m *VolumeContentSource_SnapshotSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeContentSource_SnapshotSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeContentSource_SnapshotSource proto.InternalMessageInfo
+
+func (m *VolumeContentSource_SnapshotSource) GetSnapshotId() string {
+	if m != nil {
+		return m.SnapshotId
+	}
+	return ""
+}
+
+type VolumeContentSource_VolumeSource struct {
+	// Contains identity information for the existing source volume.
+	// This field is REQUIRED. Plugins reporting CLONE_VOLUME
+	// capability MUST support creating a volume from another volume.
+	VolumeId             string   `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *VolumeContentSource_VolumeSource) Reset()         { *m = VolumeContentSource_VolumeSource{} }
+func (m *VolumeContentSource_VolumeSource) String() string { return proto.CompactTextString(m) }
+func (*VolumeContentSource_VolumeSource) ProtoMessage()    {}
+func (*VolumeContentSource_VolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{8, 1}
+}
+
+func (m *VolumeContentSource_VolumeSource) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeContentSource_VolumeSource.Unmarshal(m, b)
+}
+func (m *VolumeContentSource_VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeContentSource_VolumeSource.Marshal(b, m, deterministic)
+}
+func (m *VolumeContentSource_VolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeContentSource_VolumeSource.Merge(m, src)
+}
+func (m *VolumeContentSource_VolumeSource) XXX_Size() int {
+	return xxx_messageInfo_VolumeContentSource_VolumeSource.Size(m)
+}
+func (m *VolumeContentSource_VolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeContentSource_VolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeContentSource_VolumeSource proto.InternalMessageInfo
+
+func (m *VolumeContentSource_VolumeSource) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+type CreateVolumeResponse struct {
+	// Contains all attributes of the newly created volume that are
+	// relevant to the CO along with information required by the Plugin
+	// to uniquely identify the volume. This field is REQUIRED.
+	Volume               *Volume  `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CreateVolumeResponse) Reset()         { *m = CreateVolumeResponse{} }
+func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateVolumeResponse) ProtoMessage()    {}
+func (*CreateVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{9}
+}
+
+func (m *CreateVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CreateVolumeResponse.Unmarshal(m, b)
+}
+func (m *CreateVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CreateVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *CreateVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateVolumeResponse.Merge(m, src)
+}
+func (m *CreateVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_CreateVolumeResponse.Size(m)
+}
+func (m *CreateVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateVolumeResponse proto.InternalMessageInfo
+
+func (m *CreateVolumeResponse) GetVolume() *Volume {
+	if m != nil {
+		return m.Volume
+	}
+	return nil
+}
+
+// Specify a capability of a volume.
+type VolumeCapability struct {
+	// Specifies what API the volume will be accessed using. One of the
+	// following fields MUST be specified.
+	//
+	// Types that are valid to be assigned to AccessType:
+	//	*VolumeCapability_Block
+	//	*VolumeCapability_Mount
+	AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"`
+	// This is a REQUIRED field.
+	AccessMode           *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode,proto3" json:"access_mode,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
+}
+
+func (m *VolumeCapability) Reset()         { *m = VolumeCapability{} }
+func (m *VolumeCapability) String() string { return proto.CompactTextString(m) }
+func (*VolumeCapability) ProtoMessage()    {}
+func (*VolumeCapability) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{10}
+}
+
+func (m *VolumeCapability) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeCapability.Unmarshal(m, b)
+}
+func (m *VolumeCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeCapability.Marshal(b, m, deterministic)
+}
+func (m *VolumeCapability) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeCapability.Merge(m, src)
+}
+func (m *VolumeCapability) XXX_Size() int {
+	return xxx_messageInfo_VolumeCapability.Size(m)
+}
+func (m *VolumeCapability) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeCapability.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeCapability proto.InternalMessageInfo
+
+type isVolumeCapability_AccessType interface {
+	isVolumeCapability_AccessType()
+}
+
+type VolumeCapability_Block struct {
+	Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,proto3,oneof"`
+}
+
+type VolumeCapability_Mount struct {
+	Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,proto3,oneof"`
+}
+
+func (*VolumeCapability_Block) isVolumeCapability_AccessType() {}
+
+func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {}
+
+func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType {
+	if m != nil {
+		return m.AccessType
+	}
+	return nil
+}
+
+func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume {
+	if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok {
+		return x.Block
+	}
+	return nil
+}
+
+func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume {
+	if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok {
+		return x.Mount
+	}
+	return nil
+}
+
+func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode {
+	if m != nil {
+		return m.AccessMode
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*VolumeCapability) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*VolumeCapability_Block)(nil),
+		(*VolumeCapability_Mount)(nil),
+	}
+}
+
+// Indicate that the volume will be accessed via the block device API.
+type VolumeCapability_BlockVolume struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *VolumeCapability_BlockVolume) Reset()         { *m = VolumeCapability_BlockVolume{} }
+func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) }
+func (*VolumeCapability_BlockVolume) ProtoMessage()    {}
+func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{10, 0}
+}
+
+func (m *VolumeCapability_BlockVolume) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeCapability_BlockVolume.Unmarshal(m, b)
+}
+func (m *VolumeCapability_BlockVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeCapability_BlockVolume.Marshal(b, m, deterministic)
+}
+func (m *VolumeCapability_BlockVolume) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeCapability_BlockVolume.Merge(m, src)
+}
+func (m *VolumeCapability_BlockVolume) XXX_Size() int {
+	return xxx_messageInfo_VolumeCapability_BlockVolume.Size(m)
+}
+func (m *VolumeCapability_BlockVolume) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeCapability_BlockVolume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeCapability_BlockVolume proto.InternalMessageInfo
+
+// Indicate that the volume will be accessed via the filesystem API.
+type VolumeCapability_MountVolume struct {
+	// The filesystem type. This field is OPTIONAL.
+	// An empty string is equal to an unspecified field value.
+	FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType,proto3" json:"fs_type,omitempty"`
+	// The mount options that can be used for the volume. This field is
+	// OPTIONAL. `mount_flags` MAY contain sensitive information.
+	// Therefore, the CO and the Plugin MUST NOT leak this information
+	// to untrusted entities. The total size of this repeated field
+	// SHALL NOT exceed 4 KiB.
+	MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags,proto3" json:"mount_flags,omitempty"`
+	// If SP has VOLUME_MOUNT_GROUP node capability and CO provides
+	// this field then SP MUST ensure that the volume_mount_group
+	// parameter is passed as the group identifier to the underlying
+	// operating system mount system call, with the understanding
+	// that the set of available mount call parameters and/or
+	// mount implementations may vary across operating systems.
+	// Additionally, new file and/or directory entries written to
+	// the underlying filesystem SHOULD be permission-labeled in such a
+	// manner, unless otherwise modified by a workload, that they are
+	// both readable and writable by said mount group identifier.
+	// This is an OPTIONAL field.
+	VolumeMountGroup     string   `protobuf:"bytes,3,opt,name=volume_mount_group,json=volumeMountGroup,proto3" json:"volume_mount_group,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *VolumeCapability_MountVolume) Reset()         { *m = VolumeCapability_MountVolume{} }
+func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) }
+func (*VolumeCapability_MountVolume) ProtoMessage()    {}
+func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{10, 1}
+}
+
+func (m *VolumeCapability_MountVolume) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeCapability_MountVolume.Unmarshal(m, b)
+}
+func (m *VolumeCapability_MountVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeCapability_MountVolume.Marshal(b, m, deterministic)
+}
+func (m *VolumeCapability_MountVolume) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeCapability_MountVolume.Merge(m, src)
+}
+func (m *VolumeCapability_MountVolume) XXX_Size() int {
+	return xxx_messageInfo_VolumeCapability_MountVolume.Size(m)
+}
+func (m *VolumeCapability_MountVolume) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeCapability_MountVolume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeCapability_MountVolume proto.InternalMessageInfo
+
+func (m *VolumeCapability_MountVolume) GetFsType() string {
+	if m != nil {
+		return m.FsType
+	}
+	return ""
+}
+
+func (m *VolumeCapability_MountVolume) GetMountFlags() []string {
+	if m != nil {
+		return m.MountFlags
+	}
+	return nil
+}
+
+func (m *VolumeCapability_MountVolume) GetVolumeMountGroup() string {
+	if m != nil {
+		return m.VolumeMountGroup
+	}
+	return ""
+}
+
+// Specify how a volume can be accessed.
+type VolumeCapability_AccessMode struct {
+	// This field is REQUIRED.
+	Mode                 VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=csi.v1.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                         `json:"-"`
+	XXX_unrecognized     []byte                           `json:"-"`
+	XXX_sizecache        int32                            `json:"-"`
+}
+
+func (m *VolumeCapability_AccessMode) Reset()         { *m = VolumeCapability_AccessMode{} }
+func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) }
+func (*VolumeCapability_AccessMode) ProtoMessage()    {}
+func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{10, 2}
+}
+
+func (m *VolumeCapability_AccessMode) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeCapability_AccessMode.Unmarshal(m, b)
+}
+func (m *VolumeCapability_AccessMode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeCapability_AccessMode.Marshal(b, m, deterministic)
+}
+func (m *VolumeCapability_AccessMode) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeCapability_AccessMode.Merge(m, src)
+}
+func (m *VolumeCapability_AccessMode) XXX_Size() int {
+	return xxx_messageInfo_VolumeCapability_AccessMode.Size(m)
+}
+func (m *VolumeCapability_AccessMode) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeCapability_AccessMode.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeCapability_AccessMode proto.InternalMessageInfo
+
+func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode {
+	if m != nil {
+		return m.Mode
+	}
+	return VolumeCapability_AccessMode_UNKNOWN
+}
+
+// The capacity of the storage space in bytes. To specify an exact size,
+// `required_bytes` and `limit_bytes` SHALL be set to the same value. At
+// least one of the these fields MUST be specified.
+type CapacityRange struct {
+	// Volume MUST be at least this big. This field is OPTIONAL.
+	// A value of 0 is equal to an unspecified field value.
+	// The value of this field MUST NOT be negative.
+	RequiredBytes int64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes,proto3" json:"required_bytes,omitempty"`
+	// Volume MUST not be bigger than this. This field is OPTIONAL.
+	// A value of 0 is equal to an unspecified field value.
+	// The value of this field MUST NOT be negative.
+	LimitBytes           int64    `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes,proto3" json:"limit_bytes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CapacityRange) Reset()         { *m = CapacityRange{} }
+func (m *CapacityRange) String() string { return proto.CompactTextString(m) }
+func (*CapacityRange) ProtoMessage()    {}
+func (*CapacityRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{11}
+}
+
+func (m *CapacityRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CapacityRange.Unmarshal(m, b)
+}
+func (m *CapacityRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CapacityRange.Marshal(b, m, deterministic)
+}
+func (m *CapacityRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CapacityRange.Merge(m, src)
+}
+func (m *CapacityRange) XXX_Size() int {
+	return xxx_messageInfo_CapacityRange.Size(m)
+}
+func (m *CapacityRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_CapacityRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CapacityRange proto.InternalMessageInfo
+
+func (m *CapacityRange) GetRequiredBytes() int64 {
+	if m != nil {
+		return m.RequiredBytes
+	}
+	return 0
+}
+
+func (m *CapacityRange) GetLimitBytes() int64 {
+	if m != nil {
+		return m.LimitBytes
+	}
+	return 0
+}
+
+// Information about a specific volume.
+type Volume struct {
+	// The capacity of the volume in bytes. This field is OPTIONAL. If not
+	// set (value of 0), it indicates that the capacity of the volume is
+	// unknown (e.g., NFS share).
+	// The value of this field MUST NOT be negative.
+	CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"`
+	// The identifier for this volume, generated by the plugin.
+	// This field is REQUIRED.
+	// This field MUST contain enough information to uniquely identify
+	// this specific volume vs all other volumes supported by this plugin.
+	// This field SHALL be used by the CO in subsequent calls to refer to
+	// this volume.
+	// The SP is NOT responsible for global uniqueness of volume_id across
+	// multiple SPs.
+	VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// Opaque static properties of the volume. SP MAY use this field to
+	// ensure subsequent volume validation and publishing calls have
+	// contextual information.
+	// The contents of this field SHALL be opaque to a CO.
+	// The contents of this field SHALL NOT be mutable.
+	// The contents of this field SHALL be safe for the CO to cache.
+	// The contents of this field SHOULD NOT contain sensitive
+	// information.
+	// The contents of this field SHOULD NOT be used for uniquely
+	// identifying a volume. The `volume_id` alone SHOULD be sufficient to
+	// identify the volume.
+	// A volume uniquely identified by `volume_id` SHALL always report the
+	// same volume_context.
+	// This field is OPTIONAL and when present MUST be passed to volume
+	// validation and publishing calls.
+	VolumeContext map[string]string `protobuf:"bytes,3,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// If specified, indicates that the volume is not empty and is
+	// pre-populated with data from the specified source.
+	// This field is OPTIONAL.
+	ContentSource *VolumeContentSource `protobuf:"bytes,4,opt,name=content_source,json=contentSource,proto3" json:"content_source,omitempty"`
+	// Specifies where (regions, zones, racks, etc.) the provisioned
+	// volume is accessible from.
+	// A plugin that returns this field MUST also set the
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability.
+	// An SP MAY specify multiple topologies to indicate the volume is
+	// accessible from multiple locations.
+	// COs MAY use this information along with the topology information
+	// returned by NodeGetInfo to ensure that a given volume is accessible
+	// from a given node when scheduling workloads.
+	// This field is OPTIONAL. If it is not specified, the CO MAY assume
+	// the volume is equally accessible from all nodes in the cluster and
+	// MAY schedule workloads referencing the volume on any available
+	// node.
+	//
+	// Example 1:
+	//   accessible_topology = {"region": "R1", "zone": "Z2"}
+	// Indicates a volume accessible only from the "region" "R1" and the
+	// "zone" "Z2".
+	//
+	// Example 2:
+	//   accessible_topology =
+	//     {"region": "R1", "zone": "Z2"},
+	//     {"region": "R1", "zone": "Z3"}
+	// Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3"
+	// in the "region" "R1".
+	AccessibleTopology   []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *Volume) Reset()         { *m = Volume{} }
+func (m *Volume) String() string { return proto.CompactTextString(m) }
+func (*Volume) ProtoMessage()    {}
+func (*Volume) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{12}
+}
+
+func (m *Volume) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Volume.Unmarshal(m, b)
+}
+func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Volume.Marshal(b, m, deterministic)
+}
+func (m *Volume) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Volume.Merge(m, src)
+}
+func (m *Volume) XXX_Size() int {
+	return xxx_messageInfo_Volume.Size(m)
+}
+func (m *Volume) XXX_DiscardUnknown() {
+	xxx_messageInfo_Volume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Volume proto.InternalMessageInfo
+
+func (m *Volume) GetCapacityBytes() int64 {
+	if m != nil {
+		return m.CapacityBytes
+	}
+	return 0
+}
+
+func (m *Volume) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *Volume) GetVolumeContext() map[string]string {
+	if m != nil {
+		return m.VolumeContext
+	}
+	return nil
+}
+
+func (m *Volume) GetContentSource() *VolumeContentSource {
+	if m != nil {
+		return m.ContentSource
+	}
+	return nil
+}
+
+func (m *Volume) GetAccessibleTopology() []*Topology {
+	if m != nil {
+		return m.AccessibleTopology
+	}
+	return nil
+}
+
+type TopologyRequirement struct {
+	// Specifies the list of topologies the provisioned volume MUST be
+	// accessible from.
+	// This field is OPTIONAL. If TopologyRequirement is specified either
+	// requisite or preferred or both MUST be specified.
+	//
+	// If requisite is specified, the provisioned volume MUST be
+	// accessible from at least one of the requisite topologies.
+	//
+	// Given
+	//   x = number of topologies provisioned volume is accessible from
+	//   n = number of requisite topologies
+	// The CO MUST ensure n >= 1. The SP MUST ensure x >= 1
+	// If x==n, then the SP MUST make the provisioned volume available to
+	// all topologies from the list of requisite topologies. If it is
+	// unable to do so, the SP MUST fail the CreateVolume call.
+	// For example, if a volume should be accessible from a single zone,
+	// and requisite =
+	//   {"region": "R1", "zone": "Z2"}
+	// then the provisioned volume MUST be accessible from the "region"
+	// "R1" and the "zone" "Z2".
+	// Similarly, if a volume should be accessible from two zones, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"}
+	// then the provisioned volume MUST be accessible from the "region"
+	// "R1" and both "zone" "Z2" and "zone" "Z3".
+	//
+	// If x<n, then the SP SHALL choose x unique topologies from the list
+	// of requisite topologies. If it is unable to do so, the SP MUST fail
+	// the CreateVolume call.
+	// For example, if a volume should be accessible from a single zone,
+	// and requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"}
+	// then the SP may choose to make the provisioned volume available in
+	// either the "zone" "Z2" or the "zone" "Z3" in the "region" "R1".
+	// Similarly, if a volume should be accessible from two zones, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"},
+	//   {"region": "R1", "zone": "Z4"}
+	// then the provisioned volume MUST be accessible from any combination
+	// of two unique topologies: e.g. "R1/Z2" and "R1/Z3", or "R1/Z2" and
+	//  "R1/Z4", or "R1/Z3" and "R1/Z4".
+	//
+	// If x>n, then the SP MUST make the provisioned volume available from
+	// all topologies from the list of requisite topologies and MAY choose
+	// the remaining x-n unique topologies from the list of all possible
+	// topologies. If it is unable to do so, the SP MUST fail the
+	// CreateVolume call.
+	// For example, if a volume should be accessible from two zones, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"}
+	// then the provisioned volume MUST be accessible from the "region"
+	// "R1" and the "zone" "Z2" and the SP may select the second zone
+	// independently, e.g. "R1/Z4".
+	Requisite []*Topology `protobuf:"bytes,1,rep,name=requisite,proto3" json:"requisite,omitempty"`
+	// Specifies the list of topologies the CO would prefer the volume to
+	// be provisioned in.
+	//
+	// This field is OPTIONAL. If TopologyRequirement is specified either
+	// requisite or preferred or both MUST be specified.
+	//
+	// An SP MUST attempt to make the provisioned volume available using
+	// the preferred topologies in order from first to last.
+	//
+	// If requisite is specified, all topologies in preferred list MUST
+	// also be present in the list of requisite topologies.
+	//
+	// If the SP is unable to to make the provisioned volume available
+	// from any of the preferred topologies, the SP MAY choose a topology
+	// from the list of requisite topologies.
+	// If the list of requisite topologies is not specified, then the SP
+	// MAY choose from the list of all possible topologies.
+	// If the list of requisite topologies is specified and the SP is
+	// unable to to make the provisioned volume available from any of the
+	// requisite topologies it MUST fail the CreateVolume call.
+	//
+	// Example 1:
+	// Given a volume should be accessible from a single zone, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"}
+	// preferred =
+	//   {"region": "R1", "zone": "Z3"}
+	// then the the SP SHOULD first attempt to make the provisioned volume
+	// available from "zone" "Z3" in the "region" "R1" and fall back to
+	// "zone" "Z2" in the "region" "R1" if that is not possible.
+	//
+	// Example 2:
+	// Given a volume should be accessible from a single zone, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"},
+	//   {"region": "R1", "zone": "Z4"},
+	//   {"region": "R1", "zone": "Z5"}
+	// preferred =
+	//   {"region": "R1", "zone": "Z4"},
+	//   {"region": "R1", "zone": "Z2"}
+	// then the the SP SHOULD first attempt to make the provisioned volume
+	// accessible from "zone" "Z4" in the "region" "R1" and fall back to
+	// "zone" "Z2" in the "region" "R1" if that is not possible. If that
+	// is not possible, the SP may choose between either the "zone"
+	// "Z3" or "Z5" in the "region" "R1".
+	//
+	// Example 3:
+	// Given a volume should be accessible from TWO zones (because an
+	// opaque parameter in CreateVolumeRequest, for example, specifies
+	// the volume is accessible from two zones, aka synchronously
+	// replicated), and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"},
+	//   {"region": "R1", "zone": "Z4"},
+	//   {"region": "R1", "zone": "Z5"}
+	// preferred =
+	//   {"region": "R1", "zone": "Z5"},
+	//   {"region": "R1", "zone": "Z3"}
+	// then the the SP SHOULD first attempt to make the provisioned volume
+	// accessible from the combination of the two "zones" "Z5" and "Z3" in
+	// the "region" "R1". If that's not possible, it should fall back to
+	// a combination of "Z5" and other possibilities from the list of
+	// requisite. If that's not possible, it should fall back  to a
+	// combination of "Z3" and other possibilities from the list of
+	// requisite. If that's not possible, it should fall back  to a
+	// combination of other possibilities from the list of requisite.
+	Preferred            []*Topology `protobuf:"bytes,2,rep,name=preferred,proto3" json:"preferred,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *TopologyRequirement) Reset()         { *m = TopologyRequirement{} }
+func (m *TopologyRequirement) String() string { return proto.CompactTextString(m) }
+func (*TopologyRequirement) ProtoMessage()    {}
+func (*TopologyRequirement) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{13}
+}
+
+func (m *TopologyRequirement) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TopologyRequirement.Unmarshal(m, b)
+}
+func (m *TopologyRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TopologyRequirement.Marshal(b, m, deterministic)
+}
+func (m *TopologyRequirement) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TopologyRequirement.Merge(m, src)
+}
+func (m *TopologyRequirement) XXX_Size() int {
+	return xxx_messageInfo_TopologyRequirement.Size(m)
+}
+func (m *TopologyRequirement) XXX_DiscardUnknown() {
+	xxx_messageInfo_TopologyRequirement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TopologyRequirement proto.InternalMessageInfo
+
+func (m *TopologyRequirement) GetRequisite() []*Topology {
+	if m != nil {
+		return m.Requisite
+	}
+	return nil
+}
+
+func (m *TopologyRequirement) GetPreferred() []*Topology {
+	if m != nil {
+		return m.Preferred
+	}
+	return nil
+}
+
+// Topology is a map of topological domains to topological segments.
+// A topological domain is a sub-division of a cluster, like "region",
+// "zone", "rack", etc.
+// A topological segment is a specific instance of a topological domain,
+// like "zone3", "rack3", etc.
+// For example {"com.company/zone": "Z1", "com.company/rack": "R3"}
+// Valid keys have two segments: an OPTIONAL prefix and name, separated
+// by a slash (/), for example: "com.company.example/zone".
+// The key name segment is REQUIRED. The prefix is OPTIONAL.
+// The key name MUST be 63 characters or less, begin and end with an
+// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
+// underscores (_), dots (.), or alphanumerics in between, for example
+// "zone".
+// The key prefix MUST be 63 characters or less, begin and end with a
+// lower-case alphanumeric character ([a-z0-9]), contain only
+// dashes (-), dots (.), or lower-case alphanumerics in between, and
+// follow domain name notation format
+// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
+// The key prefix SHOULD include the plugin's host company name and/or
+// the plugin name, to minimize the possibility of collisions with keys
+// from other plugins.
+// If a key prefix is specified, it MUST be identical across all
+// topology keys returned by the SP (across all RPCs).
+// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone"
+// MUST not both exist.
+// Each value (topological segment) MUST contain 1 or more strings.
+// Each string MUST be 63 characters or less and begin and end with an
+// alphanumeric character with '-', '_', '.', or alphanumerics in
+// between.
+type Topology struct {
+	Segments             map[string]string `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *Topology) Reset()         { *m = Topology{} }
+func (m *Topology) String() string { return proto.CompactTextString(m) }
+func (*Topology) ProtoMessage()    {}
+func (*Topology) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{14}
+}
+
+func (m *Topology) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Topology.Unmarshal(m, b)
+}
+func (m *Topology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Topology.Marshal(b, m, deterministic)
+}
+func (m *Topology) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Topology.Merge(m, src)
+}
+func (m *Topology) XXX_Size() int {
+	return xxx_messageInfo_Topology.Size(m)
+}
+func (m *Topology) XXX_DiscardUnknown() {
+	xxx_messageInfo_Topology.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Topology proto.InternalMessageInfo
+
+func (m *Topology) GetSegments() map[string]string {
+	if m != nil {
+		return m.Segments
+	}
+	return nil
+}
+
+type DeleteVolumeRequest struct {
+	// The ID of the volume to be deprovisioned.
+	// This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// Secrets required by plugin to complete volume deletion request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets              map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *DeleteVolumeRequest) Reset()         { *m = DeleteVolumeRequest{} }
+func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteVolumeRequest) ProtoMessage()    {}
+func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{15}
+}
+
+func (m *DeleteVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeleteVolumeRequest.Unmarshal(m, b)
+}
+func (m *DeleteVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeleteVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *DeleteVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteVolumeRequest.Merge(m, src)
+}
+func (m *DeleteVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_DeleteVolumeRequest.Size(m)
+}
+func (m *DeleteVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteVolumeRequest proto.InternalMessageInfo
+
+func (m *DeleteVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *DeleteVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+type DeleteVolumeResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeleteVolumeResponse) Reset()         { *m = DeleteVolumeResponse{} }
+func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteVolumeResponse) ProtoMessage()    {}
+func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{16}
+}
+
+func (m *DeleteVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeleteVolumeResponse.Unmarshal(m, b)
+}
+func (m *DeleteVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeleteVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *DeleteVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteVolumeResponse.Merge(m, src)
+}
+func (m *DeleteVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_DeleteVolumeResponse.Size(m)
+}
+func (m *DeleteVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteVolumeResponse proto.InternalMessageInfo
+
+type ControllerPublishVolumeRequest struct {
+	// The ID of the volume to be used on a node.
+	// This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The ID of the node. This field is REQUIRED. The CO SHALL set this
+	// field to match the node ID returned by `NodeGetInfo`.
+	NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+	// Volume capability describing how the CO intends to use this volume.
+	// SP MUST ensure the CO can use the published volume as described.
+	// Otherwise SP MUST return the appropriate gRPC error code.
+	// This is a REQUIRED field.
+	VolumeCapability *VolumeCapability `protobuf:"bytes,3,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"`
+	// Indicates SP MUST publish the volume in readonly mode.
+	// CO MUST set this field to false if SP does not have the
+	// PUBLISH_READONLY controller capability.
+	// This is a REQUIRED field.
+	Readonly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"`
+	// Secrets required by plugin to complete controller publish volume
+	// request. This field is OPTIONAL. Refer to the
+	// `Secrets Requirements` section on how to use this field.
+	Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Volume context as returned by SP in
+	// CreateVolumeResponse.Volume.volume_context.
+	// This field is OPTIONAL and MUST match the volume_context of the
+	// volume identified by `volume_id`.
+	VolumeContext        map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ControllerPublishVolumeRequest) Reset()         { *m = ControllerPublishVolumeRequest{} }
+func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*ControllerPublishVolumeRequest) ProtoMessage()    {}
+func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{17}
+}
+
+func (m *ControllerPublishVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerPublishVolumeRequest.Unmarshal(m, b)
+}
+func (m *ControllerPublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerPublishVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *ControllerPublishVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerPublishVolumeRequest.Merge(m, src)
+}
+func (m *ControllerPublishVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_ControllerPublishVolumeRequest.Size(m)
+}
+func (m *ControllerPublishVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerPublishVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerPublishVolumeRequest proto.InternalMessageInfo
+
+func (m *ControllerPublishVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *ControllerPublishVolumeRequest) GetNodeId() string {
+	if m != nil {
+		return m.NodeId
+	}
+	return ""
+}
+
+func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability {
+	if m != nil {
+		return m.VolumeCapability
+	}
+	return nil
+}
+
+func (m *ControllerPublishVolumeRequest) GetReadonly() bool {
+	if m != nil {
+		return m.Readonly
+	}
+	return false
+}
+
+func (m *ControllerPublishVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+func (m *ControllerPublishVolumeRequest) GetVolumeContext() map[string]string {
+	if m != nil {
+		return m.VolumeContext
+	}
+	return nil
+}
+
+type ControllerPublishVolumeResponse struct {
+	// Opaque static publish properties of the volume. SP MAY use this
+	// field to ensure subsequent `NodeStageVolume` or `NodePublishVolume`
+	// calls calls have contextual information.
+	// The contents of this field SHALL be opaque to a CO.
+	// The contents of this field SHALL NOT be mutable.
+	// The contents of this field SHALL be safe for the CO to cache.
+	// The contents of this field SHOULD NOT contain sensitive
+	// information.
+	// The contents of this field SHOULD NOT be used for uniquely
+	// identifying a volume. The `volume_id` alone SHOULD be sufficient to
+	// identify the volume.
+	// This field is OPTIONAL and when present MUST be passed to
+	// subsequent `NodeStageVolume` or `NodePublishVolume` calls
+	PublishContext       map[string]string `protobuf:"bytes,1,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ControllerPublishVolumeResponse) Reset()         { *m = ControllerPublishVolumeResponse{} }
+func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*ControllerPublishVolumeResponse) ProtoMessage()    {}
+func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{18}
+}
+
+func (m *ControllerPublishVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerPublishVolumeResponse.Unmarshal(m, b)
+}
+func (m *ControllerPublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerPublishVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *ControllerPublishVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerPublishVolumeResponse.Merge(m, src)
+}
+func (m *ControllerPublishVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_ControllerPublishVolumeResponse.Size(m)
+}
+func (m *ControllerPublishVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerPublishVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerPublishVolumeResponse proto.InternalMessageInfo
+
+func (m *ControllerPublishVolumeResponse) GetPublishContext() map[string]string {
+	if m != nil {
+		return m.PublishContext
+	}
+	return nil
+}
+
+type ControllerUnpublishVolumeRequest struct {
+	// The ID of the volume. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The ID of the node. This field is OPTIONAL. The CO SHOULD set this
+	// field to match the node ID returned by `NodeGetInfo` or leave it
+	// unset. If the value is set, the SP MUST unpublish the volume from
+	// the specified node. If the value is unset, the SP MUST unpublish
+	// the volume from all nodes it is published to.
+	NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+	// Secrets required by plugin to complete controller unpublish volume
+	// request. This SHOULD be the same secrets passed to the
+	// ControllerPublishVolume call for the specified volume.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets              map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ControllerUnpublishVolumeRequest) Reset()         { *m = ControllerUnpublishVolumeRequest{} }
+func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*ControllerUnpublishVolumeRequest) ProtoMessage()    {}
+func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{19}
+}
+
+func (m *ControllerUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerUnpublishVolumeRequest.Unmarshal(m, b)
+}
+func (m *ControllerUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerUnpublishVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *ControllerUnpublishVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerUnpublishVolumeRequest.Merge(m, src)
+}
+func (m *ControllerUnpublishVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_ControllerUnpublishVolumeRequest.Size(m)
+}
+func (m *ControllerUnpublishVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerUnpublishVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerUnpublishVolumeRequest proto.InternalMessageInfo
+
+func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *ControllerUnpublishVolumeRequest) GetNodeId() string {
+	if m != nil {
+		return m.NodeId
+	}
+	return ""
+}
+
+func (m *ControllerUnpublishVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+type ControllerUnpublishVolumeResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ControllerUnpublishVolumeResponse) Reset()         { *m = ControllerUnpublishVolumeResponse{} }
+func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*ControllerUnpublishVolumeResponse) ProtoMessage()    {}
+func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{20}
+}
+
+func (m *ControllerUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerUnpublishVolumeResponse.Unmarshal(m, b)
+}
+func (m *ControllerUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerUnpublishVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *ControllerUnpublishVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerUnpublishVolumeResponse.Merge(m, src)
+}
+func (m *ControllerUnpublishVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_ControllerUnpublishVolumeResponse.Size(m)
+}
+func (m *ControllerUnpublishVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerUnpublishVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerUnpublishVolumeResponse proto.InternalMessageInfo
+
+type ValidateVolumeCapabilitiesRequest struct {
+	// The ID of the volume to check. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// Volume context as returned by SP in
+	// CreateVolumeResponse.Volume.volume_context.
+	// This field is OPTIONAL and MUST match the volume_context of the
+	// volume identified by `volume_id`.
+	VolumeContext map[string]string `protobuf:"bytes,2,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// The capabilities that the CO wants to check for the volume. This
+	// call SHALL return "confirmed" only if all the volume capabilities
+	// specified below are supported. This field is REQUIRED.
+	VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"`
+	// See CreateVolumeRequest.parameters.
+	// This field is OPTIONAL.
+	Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Secrets required by plugin to complete volume validation request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets              map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ValidateVolumeCapabilitiesRequest) Reset()         { *m = ValidateVolumeCapabilitiesRequest{} }
+func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) }
+func (*ValidateVolumeCapabilitiesRequest) ProtoMessage()    {}
+func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{21}
+}
+
+func (m *ValidateVolumeCapabilitiesRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Unmarshal(m, b)
+}
+func (m *ValidateVolumeCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Marshal(b, m, deterministic)
+}
+func (m *ValidateVolumeCapabilitiesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Merge(m, src)
+}
+func (m *ValidateVolumeCapabilitiesRequest) XXX_Size() int {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Size(m)
+}
+func (m *ValidateVolumeCapabilitiesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ValidateVolumeCapabilitiesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidateVolumeCapabilitiesRequest proto.InternalMessageInfo
+
+func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *ValidateVolumeCapabilitiesRequest) GetVolumeContext() map[string]string {
+	if m != nil {
+		return m.VolumeContext
+	}
+	return nil
+}
+
+func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability {
+	if m != nil {
+		return m.VolumeCapabilities
+	}
+	return nil
+}
+
+func (m *ValidateVolumeCapabilitiesRequest) GetParameters() map[string]string {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+func (m *ValidateVolumeCapabilitiesRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+type ValidateVolumeCapabilitiesResponse struct {
+	// Confirmed indicates to the CO the set of capabilities that the
+	// plugin has validated. This field SHALL only be set to a non-empty
+	// value for successful validation responses.
+	// For successful validation responses, the CO SHALL compare the
+	// fields of this message to the originally requested capabilities in
+	// order to guard against an older plugin reporting "valid" for newer
+	// capability fields that it does not yet understand.
+	// This field is OPTIONAL.
+	Confirmed *ValidateVolumeCapabilitiesResponse_Confirmed `protobuf:"bytes,1,opt,name=confirmed,proto3" json:"confirmed,omitempty"`
+	// Message to the CO if `confirmed` above is empty. This field is
+	// OPTIONAL.
+	// An empty string is equal to an unspecified field value.
+	Message              string   `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ValidateVolumeCapabilitiesResponse) Reset()         { *m = ValidateVolumeCapabilitiesResponse{} }
+func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) }
+func (*ValidateVolumeCapabilitiesResponse) ProtoMessage()    {}
+func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{22}
+}
+
+func (m *ValidateVolumeCapabilitiesResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Unmarshal(m, b)
+}
+func (m *ValidateVolumeCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Marshal(b, m, deterministic)
+}
+func (m *ValidateVolumeCapabilitiesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Merge(m, src)
+}
+func (m *ValidateVolumeCapabilitiesResponse) XXX_Size() int {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Size(m)
+}
+func (m *ValidateVolumeCapabilitiesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ValidateVolumeCapabilitiesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidateVolumeCapabilitiesResponse proto.InternalMessageInfo
+
+func (m *ValidateVolumeCapabilitiesResponse) GetConfirmed() *ValidateVolumeCapabilitiesResponse_Confirmed {
+	if m != nil {
+		return m.Confirmed
+	}
+	return nil
+}
+
+func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string {
+	if m != nil {
+		return m.Message
+	}
+	return ""
+}
+
+type ValidateVolumeCapabilitiesResponse_Confirmed struct {
+	// Volume context validated by the plugin.
+	// This field is OPTIONAL.
+	VolumeContext map[string]string `protobuf:"bytes,1,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Volume capabilities supported by the plugin.
+	// This field is REQUIRED.
+	VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"`
+	// The volume creation parameters validated by the plugin.
+	// This field is OPTIONAL.
+	Parameters           map[string]string `protobuf:"bytes,3,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) Reset() {
+	*m = ValidateVolumeCapabilitiesResponse_Confirmed{}
+}
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) String() string {
+	return proto.CompactTextString(m)
+}
+func (*ValidateVolumeCapabilitiesResponse_Confirmed) ProtoMessage() {}
+func (*ValidateVolumeCapabilitiesResponse_Confirmed) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{22, 0}
+}
+
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Unmarshal(m, b)
+}
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Marshal(b, m, deterministic)
+}
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Merge(m, src)
+}
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Size() int {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Size(m)
+}
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_DiscardUnknown() {
+	xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed proto.InternalMessageInfo
+
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeContext() map[string]string {
+	if m != nil {
+		return m.VolumeContext
+	}
+	return nil
+}
+
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeCapabilities() []*VolumeCapability {
+	if m != nil {
+		return m.VolumeCapabilities
+	}
+	return nil
+}
+
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetParameters() map[string]string {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+type ListVolumesRequest struct {
+	// If specified (non-zero value), the Plugin MUST NOT return more
+	// entries than this number in the response. If the actual number of
+	// entries is more than this number, the Plugin MUST set `next_token`
+	// in the response which can be used to get the next page of entries
+	// in the subsequent `ListVolumes` call. This field is OPTIONAL. If
+	// not specified (zero value), it means there is no restriction on the
+	// number of entries that can be returned.
+	// The value of this field MUST NOT be negative.
+	MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"`
+	// A token to specify where to start paginating. Set this field to
+	// `next_token` returned by a previous `ListVolumes` call to get the
+	// next page of entries. This field is OPTIONAL.
+	// An empty string is equal to an unspecified field value.
+	StartingToken        string   `protobuf:"bytes,2,opt,name=starting_token,json=startingToken,proto3" json:"starting_token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ListVolumesRequest) Reset()         { *m = ListVolumesRequest{} }
+func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListVolumesRequest) ProtoMessage()    {}
+func (*ListVolumesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{23}
+}
+
+func (m *ListVolumesRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListVolumesRequest.Unmarshal(m, b)
+}
+func (m *ListVolumesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListVolumesRequest.Marshal(b, m, deterministic)
+}
+func (m *ListVolumesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListVolumesRequest.Merge(m, src)
+}
+func (m *ListVolumesRequest) XXX_Size() int {
+	return xxx_messageInfo_ListVolumesRequest.Size(m)
+}
+func (m *ListVolumesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListVolumesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListVolumesRequest proto.InternalMessageInfo
+
+func (m *ListVolumesRequest) GetMaxEntries() int32 {
+	if m != nil {
+		return m.MaxEntries
+	}
+	return 0
+}
+
+func (m *ListVolumesRequest) GetStartingToken() string {
+	if m != nil {
+		return m.StartingToken
+	}
+	return ""
+}
+
+type ListVolumesResponse struct {
+	Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+	// This token allows you to get the next page of entries for
+	// `ListVolumes` request. If the number of entries is larger than
+	// `max_entries`, use the `next_token` as a value for the
+	// `starting_token` field in the next `ListVolumes` request. This
+	// field is OPTIONAL.
+	// An empty string is equal to an unspecified field value.
+	NextToken            string   `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ListVolumesResponse) Reset()         { *m = ListVolumesResponse{} }
+func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) }
+func (*ListVolumesResponse) ProtoMessage()    {}
+func (*ListVolumesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{24}
+}
+
+func (m *ListVolumesResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListVolumesResponse.Unmarshal(m, b)
+}
+func (m *ListVolumesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListVolumesResponse.Marshal(b, m, deterministic)
+}
+func (m *ListVolumesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListVolumesResponse.Merge(m, src)
+}
+func (m *ListVolumesResponse) XXX_Size() int {
+	return xxx_messageInfo_ListVolumesResponse.Size(m)
+}
+func (m *ListVolumesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListVolumesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListVolumesResponse proto.InternalMessageInfo
+
+func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry {
+	if m != nil {
+		return m.Entries
+	}
+	return nil
+}
+
+func (m *ListVolumesResponse) GetNextToken() string {
+	if m != nil {
+		return m.NextToken
+	}
+	return ""
+}
+
+type ListVolumesResponse_VolumeStatus struct {
+	// A list of all `node_id` of nodes that the volume in this entry
+	// is controller published on.
+	// This field is OPTIONAL. If it is not specified and the SP has
+	// the LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO
+	// MAY assume the volume is not controller published to any nodes.
+	// If the field is not specified and the SP does not have the
+	// LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO MUST
+	// not interpret this field.
+	// published_node_ids MAY include nodes not published to or
+	// reported by the SP. The CO MUST be resilient to that.
+	PublishedNodeIds []string `protobuf:"bytes,1,rep,name=published_node_ids,json=publishedNodeIds,proto3" json:"published_node_ids,omitempty"`
+	// Information about the current condition of the volume.
+	// This field is OPTIONAL.
+	// This field MUST be specified if the
+	// VOLUME_CONDITION controller capability is supported.
+	VolumeCondition      *VolumeCondition `protobuf:"bytes,2,opt,name=volume_condition,json=volumeCondition,proto3" json:"volume_condition,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *ListVolumesResponse_VolumeStatus) Reset()         { *m = ListVolumesResponse_VolumeStatus{} }
+func (m *ListVolumesResponse_VolumeStatus) String() string { return proto.CompactTextString(m) }
+func (*ListVolumesResponse_VolumeStatus) ProtoMessage()    {}
+func (*ListVolumesResponse_VolumeStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{24, 0}
+}
+
+func (m *ListVolumesResponse_VolumeStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Unmarshal(m, b)
+}
+func (m *ListVolumesResponse_VolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Marshal(b, m, deterministic)
+}
+func (m *ListVolumesResponse_VolumeStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListVolumesResponse_VolumeStatus.Merge(m, src)
+}
+func (m *ListVolumesResponse_VolumeStatus) XXX_Size() int {
+	return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Size(m)
+}
+func (m *ListVolumesResponse_VolumeStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListVolumesResponse_VolumeStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListVolumesResponse_VolumeStatus proto.InternalMessageInfo
+
+func (m *ListVolumesResponse_VolumeStatus) GetPublishedNodeIds() []string {
+	if m != nil {
+		return m.PublishedNodeIds
+	}
+	return nil
+}
+
+func (m *ListVolumesResponse_VolumeStatus) GetVolumeCondition() *VolumeCondition {
+	if m != nil {
+		return m.VolumeCondition
+	}
+	return nil
+}
+
+type ListVolumesResponse_Entry struct {
+	// This field is REQUIRED
+	Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"`
+	// This field is OPTIONAL. This field MUST be specified if the
+	// LIST_VOLUMES_PUBLISHED_NODES controller capability is
+	// supported.
+	Status               *ListVolumesResponse_VolumeStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                          `json:"-"`
+	XXX_unrecognized     []byte                            `json:"-"`
+	XXX_sizecache        int32                             `json:"-"`
+}
+
+func (m *ListVolumesResponse_Entry) Reset()         { *m = ListVolumesResponse_Entry{} }
+func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) }
+func (*ListVolumesResponse_Entry) ProtoMessage()    {}
+func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{24, 1}
+}
+
+func (m *ListVolumesResponse_Entry) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListVolumesResponse_Entry.Unmarshal(m, b)
+}
+func (m *ListVolumesResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListVolumesResponse_Entry.Marshal(b, m, deterministic)
+}
+func (m *ListVolumesResponse_Entry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListVolumesResponse_Entry.Merge(m, src)
+}
+func (m *ListVolumesResponse_Entry) XXX_Size() int {
+	return xxx_messageInfo_ListVolumesResponse_Entry.Size(m)
+}
+func (m *ListVolumesResponse_Entry) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListVolumesResponse_Entry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListVolumesResponse_Entry proto.InternalMessageInfo
+
+func (m *ListVolumesResponse_Entry) GetVolume() *Volume {
+	if m != nil {
+		return m.Volume
+	}
+	return nil
+}
+
+func (m *ListVolumesResponse_Entry) GetStatus() *ListVolumesResponse_VolumeStatus {
+	if m != nil {
+		return m.Status
+	}
+	return nil
+}
+
+type ControllerGetVolumeRequest struct {
+	// The ID of the volume to fetch current volume information for.
+	// This field is REQUIRED.
+	VolumeId             string   `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ControllerGetVolumeRequest) Reset()         { *m = ControllerGetVolumeRequest{} }
+func (m *ControllerGetVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*ControllerGetVolumeRequest) ProtoMessage()    {}
+func (*ControllerGetVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{25}
+}
+
+func (m *ControllerGetVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerGetVolumeRequest.Unmarshal(m, b)
+}
+func (m *ControllerGetVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerGetVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *ControllerGetVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerGetVolumeRequest.Merge(m, src)
+}
+func (m *ControllerGetVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_ControllerGetVolumeRequest.Size(m)
+}
+func (m *ControllerGetVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerGetVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerGetVolumeRequest proto.InternalMessageInfo
+
+func (m *ControllerGetVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+type ControllerGetVolumeResponse struct {
+	// This field is REQUIRED
+	Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"`
+	// This field is REQUIRED.
+	Status               *ControllerGetVolumeResponse_VolumeStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                                  `json:"-"`
+	XXX_unrecognized     []byte                                    `json:"-"`
+	XXX_sizecache        int32                                     `json:"-"`
+}
+
+func (m *ControllerGetVolumeResponse) Reset()         { *m = ControllerGetVolumeResponse{} }
+func (m *ControllerGetVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*ControllerGetVolumeResponse) ProtoMessage()    {}
+func (*ControllerGetVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{26}
+}
+
+func (m *ControllerGetVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerGetVolumeResponse.Unmarshal(m, b)
+}
+func (m *ControllerGetVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerGetVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *ControllerGetVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerGetVolumeResponse.Merge(m, src)
+}
+func (m *ControllerGetVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_ControllerGetVolumeResponse.Size(m)
+}
+func (m *ControllerGetVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerGetVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerGetVolumeResponse proto.InternalMessageInfo
+
+func (m *ControllerGetVolumeResponse) GetVolume() *Volume {
+	if m != nil {
+		return m.Volume
+	}
+	return nil
+}
+
+func (m *ControllerGetVolumeResponse) GetStatus() *ControllerGetVolumeResponse_VolumeStatus {
+	if m != nil {
+		return m.Status
+	}
+	return nil
+}
+
+type ControllerGetVolumeResponse_VolumeStatus struct {
+	// A list of all the `node_id` of nodes that this volume is
+	// controller published on.
+	// This field is OPTIONAL.
+	// This field MUST be specified if the PUBLISH_UNPUBLISH_VOLUME
+	// controller capability is supported.
+	// published_node_ids MAY include nodes not published to or
+	// reported by the SP. The CO MUST be resilient to that.
+	PublishedNodeIds []string `protobuf:"bytes,1,rep,name=published_node_ids,json=publishedNodeIds,proto3" json:"published_node_ids,omitempty"`
+	// Information about the current condition of the volume.
+	// This field is OPTIONAL.
+	// This field MUST be specified if the
+	// VOLUME_CONDITION controller capability is supported.
+	VolumeCondition      *VolumeCondition `protobuf:"bytes,2,opt,name=volume_condition,json=volumeCondition,proto3" json:"volume_condition,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *ControllerGetVolumeResponse_VolumeStatus) Reset() {
+	*m = ControllerGetVolumeResponse_VolumeStatus{}
+}
+func (m *ControllerGetVolumeResponse_VolumeStatus) String() string { return proto.CompactTextString(m) }
+func (*ControllerGetVolumeResponse_VolumeStatus) ProtoMessage()    {}
+func (*ControllerGetVolumeResponse_VolumeStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{26, 0}
+}
+
+func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.Unmarshal(m, b)
+}
+func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.Marshal(b, m, deterministic)
+}
+func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.Merge(m, src)
+}
+func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_Size() int {
+	return xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.Size(m)
+}
+func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus proto.InternalMessageInfo
+
+func (m *ControllerGetVolumeResponse_VolumeStatus) GetPublishedNodeIds() []string {
+	if m != nil {
+		return m.PublishedNodeIds
+	}
+	return nil
+}
+
+func (m *ControllerGetVolumeResponse_VolumeStatus) GetVolumeCondition() *VolumeCondition {
+	if m != nil {
+		return m.VolumeCondition
+	}
+	return nil
+}
+
+type GetCapacityRequest struct {
+	// If specified, the Plugin SHALL report the capacity of the storage
+	// that can be used to provision volumes that satisfy ALL of the
+	// specified `volume_capabilities`. These are the same
+	// `volume_capabilities` the CO will use in `CreateVolumeRequest`.
+	// This field is OPTIONAL.
+	VolumeCapabilities []*VolumeCapability `protobuf:"bytes,1,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"`
+	// If specified, the Plugin SHALL report the capacity of the storage
+	// that can be used to provision volumes with the given Plugin
+	// specific `parameters`. These are the same `parameters` the CO will
+	// use in `CreateVolumeRequest`. This field is OPTIONAL.
+	Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// If specified, the Plugin SHALL report the capacity of the storage
+	// that can be used to provision volumes that in the specified
+	// `accessible_topology`. This is the same as the
+	// `accessible_topology` the CO returns in a `CreateVolumeResponse`.
+	// This field is OPTIONAL. This field SHALL NOT be set unless the
+	// plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability.
+	AccessibleTopology   *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *GetCapacityRequest) Reset()         { *m = GetCapacityRequest{} }
+func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) }
+func (*GetCapacityRequest) ProtoMessage()    {}
+func (*GetCapacityRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{27}
+}
+
+func (m *GetCapacityRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetCapacityRequest.Unmarshal(m, b)
+}
+func (m *GetCapacityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetCapacityRequest.Marshal(b, m, deterministic)
+}
+func (m *GetCapacityRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetCapacityRequest.Merge(m, src)
+}
+func (m *GetCapacityRequest) XXX_Size() int {
+	return xxx_messageInfo_GetCapacityRequest.Size(m)
+}
+func (m *GetCapacityRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetCapacityRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetCapacityRequest proto.InternalMessageInfo
+
+func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability {
+	if m != nil {
+		return m.VolumeCapabilities
+	}
+	return nil
+}
+
+func (m *GetCapacityRequest) GetParameters() map[string]string {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+func (m *GetCapacityRequest) GetAccessibleTopology() *Topology {
+	if m != nil {
+		return m.AccessibleTopology
+	}
+	return nil
+}
+
+type GetCapacityResponse struct {
+	// The available capacity, in bytes, of the storage that can be used
+	// to provision volumes. If `volume_capabilities` or `parameters` is
+	// specified in the request, the Plugin SHALL take those into
+	// consideration when calculating the available capacity of the
+	// storage. This field is REQUIRED.
+	// The value of this field MUST NOT be negative.
+	AvailableCapacity int64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity,proto3" json:"available_capacity,omitempty"`
+	// The largest size that may be used in a
+	// CreateVolumeRequest.capacity_range.required_bytes field
+	// to create a volume with the same parameters as those in
+	// GetCapacityRequest.
+	//
+	// If `volume_capabilities` or `parameters` is
+	// specified in the request, the Plugin SHALL take those into
+	// consideration when calculating the minimum volume size of the
+	// storage.
+	//
+	// This field is OPTIONAL. MUST NOT be negative.
+	// The Plugin SHOULD provide a value for this field if it has
+	// a maximum size for individual volumes and leave it unset
+	// otherwise. COs MAY use it to make decision about
+	// where to create volumes.
+	MaximumVolumeSize *wrappers.Int64Value `protobuf:"bytes,2,opt,name=maximum_volume_size,json=maximumVolumeSize,proto3" json:"maximum_volume_size,omitempty"`
+	// The smallest size that may be used in a
+	// CreateVolumeRequest.capacity_range.limit_bytes field
+	// to create a volume with the same parameters as those in
+	// GetCapacityRequest.
+	//
+	// If `volume_capabilities` or `parameters` is
+	// specified in the request, the Plugin SHALL take those into
+	// consideration when calculating the maximum volume size of the
+	// storage.
+	//
+	// This field is OPTIONAL. MUST NOT be negative.
+	// The Plugin SHOULD provide a value for this field if it has
+	// a minimum size for individual volumes and leave it unset
+	// otherwise. COs MAY use it to make decision about
+	// where to create volumes.
+	MinimumVolumeSize    *wrappers.Int64Value `protobuf:"bytes,3,opt,name=minimum_volume_size,json=minimumVolumeSize,proto3" json:"minimum_volume_size,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *GetCapacityResponse) Reset()         { *m = GetCapacityResponse{} }
+func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) }
+func (*GetCapacityResponse) ProtoMessage()    {}
+func (*GetCapacityResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{28}
+}
+
+func (m *GetCapacityResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetCapacityResponse.Unmarshal(m, b)
+}
+func (m *GetCapacityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetCapacityResponse.Marshal(b, m, deterministic)
+}
+func (m *GetCapacityResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetCapacityResponse.Merge(m, src)
+}
+func (m *GetCapacityResponse) XXX_Size() int {
+	return xxx_messageInfo_GetCapacityResponse.Size(m)
+}
+func (m *GetCapacityResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetCapacityResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetCapacityResponse proto.InternalMessageInfo
+
+func (m *GetCapacityResponse) GetAvailableCapacity() int64 {
+	if m != nil {
+		return m.AvailableCapacity
+	}
+	return 0
+}
+
+func (m *GetCapacityResponse) GetMaximumVolumeSize() *wrappers.Int64Value {
+	if m != nil {
+		return m.MaximumVolumeSize
+	}
+	return nil
+}
+
+func (m *GetCapacityResponse) GetMinimumVolumeSize() *wrappers.Int64Value {
+	if m != nil {
+		return m.MinimumVolumeSize
+	}
+	return nil
+}
+
+type ControllerGetCapabilitiesRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ControllerGetCapabilitiesRequest) Reset()         { *m = ControllerGetCapabilitiesRequest{} }
+func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) }
+func (*ControllerGetCapabilitiesRequest) ProtoMessage()    {}
+func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{29}
+}
+
+func (m *ControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerGetCapabilitiesRequest.Unmarshal(m, b)
+}
+func (m *ControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerGetCapabilitiesRequest.Marshal(b, m, deterministic)
+}
+func (m *ControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerGetCapabilitiesRequest.Merge(m, src)
+}
+func (m *ControllerGetCapabilitiesRequest) XXX_Size() int {
+	return xxx_messageInfo_ControllerGetCapabilitiesRequest.Size(m)
+}
+func (m *ControllerGetCapabilitiesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerGetCapabilitiesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerGetCapabilitiesRequest proto.InternalMessageInfo
+
+type ControllerGetCapabilitiesResponse struct {
+	// All the capabilities that the controller service supports. This
+	// field is OPTIONAL.
+	Capabilities         []*ControllerServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
+	XXX_unrecognized     []byte                         `json:"-"`
+	XXX_sizecache        int32                          `json:"-"`
+}
+
+func (m *ControllerGetCapabilitiesResponse) Reset()         { *m = ControllerGetCapabilitiesResponse{} }
+func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) }
+func (*ControllerGetCapabilitiesResponse) ProtoMessage()    {}
+func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{30}
+}
+
+func (m *ControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerGetCapabilitiesResponse.Unmarshal(m, b)
+}
+func (m *ControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerGetCapabilitiesResponse.Marshal(b, m, deterministic)
+}
+func (m *ControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerGetCapabilitiesResponse.Merge(m, src)
+}
+func (m *ControllerGetCapabilitiesResponse) XXX_Size() int {
+	return xxx_messageInfo_ControllerGetCapabilitiesResponse.Size(m)
+}
+func (m *ControllerGetCapabilitiesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerGetCapabilitiesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerGetCapabilitiesResponse proto.InternalMessageInfo
+
+func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability {
+	if m != nil {
+		return m.Capabilities
+	}
+	return nil
+}
+
+// Specifies a capability of the controller service.
+type ControllerServiceCapability struct {
+	// Types that are valid to be assigned to Type:
+	//	*ControllerServiceCapability_Rpc
+	Type                 isControllerServiceCapability_Type `protobuf_oneof:"type"`
+	XXX_NoUnkeyedLiteral struct{}                           `json:"-"`
+	XXX_unrecognized     []byte                             `json:"-"`
+	XXX_sizecache        int32                              `json:"-"`
+}
+
+func (m *ControllerServiceCapability) Reset()         { *m = ControllerServiceCapability{} }
+func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) }
+func (*ControllerServiceCapability) ProtoMessage()    {}
+func (*ControllerServiceCapability) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{31}
+}
+
+func (m *ControllerServiceCapability) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerServiceCapability.Unmarshal(m, b)
+}
+func (m *ControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerServiceCapability.Marshal(b, m, deterministic)
+}
+func (m *ControllerServiceCapability) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerServiceCapability.Merge(m, src)
+}
+func (m *ControllerServiceCapability) XXX_Size() int {
+	return xxx_messageInfo_ControllerServiceCapability.Size(m)
+}
+func (m *ControllerServiceCapability) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerServiceCapability.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerServiceCapability proto.InternalMessageInfo
+
+type isControllerServiceCapability_Type interface {
+	isControllerServiceCapability_Type()
+}
+
+type ControllerServiceCapability_Rpc struct {
+	Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"`
+}
+
+func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {}
+
+func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type {
+	if m != nil {
+		return m.Type
+	}
+	return nil
+}
+
+func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC {
+	if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok {
+		return x.Rpc
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*ControllerServiceCapability) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*ControllerServiceCapability_Rpc)(nil),
+	}
+}
+
+type ControllerServiceCapability_RPC struct {
+	Type                 ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.ControllerServiceCapability_RPC_Type" json:"type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                             `json:"-"`
+	XXX_unrecognized     []byte                               `json:"-"`
+	XXX_sizecache        int32                                `json:"-"`
+}
+
+func (m *ControllerServiceCapability_RPC) Reset()         { *m = ControllerServiceCapability_RPC{} }
+func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) }
+func (*ControllerServiceCapability_RPC) ProtoMessage()    {}
+func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{31, 0}
+}
+
+func (m *ControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerServiceCapability_RPC.Unmarshal(m, b)
+}
+func (m *ControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerServiceCapability_RPC.Marshal(b, m, deterministic)
+}
+func (m *ControllerServiceCapability_RPC) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerServiceCapability_RPC.Merge(m, src)
+}
+func (m *ControllerServiceCapability_RPC) XXX_Size() int {
+	return xxx_messageInfo_ControllerServiceCapability_RPC.Size(m)
+}
+func (m *ControllerServiceCapability_RPC) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerServiceCapability_RPC.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerServiceCapability_RPC proto.InternalMessageInfo
+
+func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type {
+	if m != nil {
+		return m.Type
+	}
+	return ControllerServiceCapability_RPC_UNKNOWN
+}
+
+type CreateSnapshotRequest struct {
+	// The ID of the source volume to be snapshotted.
+	// This field is REQUIRED.
+	SourceVolumeId string `protobuf:"bytes,1,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"`
+	// The suggested name for the snapshot. This field is REQUIRED for
+	// idempotency.
+	// Any Unicode string that conforms to the length limit is allowed
+	// except those containing the following banned characters:
+	// U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F.
+	// (These are control characters other than commonly used whitespace.)
+	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	// Secrets required by plugin to complete snapshot creation request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Plugin specific parameters passed in as opaque key-value pairs.
+	// This field is OPTIONAL. The Plugin is responsible for parsing and
+	// validating these parameters. COs will treat these as opaque.
+	// Use cases for opaque parameters:
+	// - Specify a policy to automatically clean up the snapshot.
+	// - Specify an expiration date for the snapshot.
+	// - Specify whether the snapshot is readonly or read/write.
+	// - Specify if the snapshot should be replicated to some place.
+	// - Specify primary or secondary for replication systems that
+	//   support snapshotting only on primary.
+	Parameters           map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *CreateSnapshotRequest) Reset()         { *m = CreateSnapshotRequest{} }
+func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateSnapshotRequest) ProtoMessage()    {}
+func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{32}
+}
+
+func (m *CreateSnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CreateSnapshotRequest.Unmarshal(m, b)
+}
+func (m *CreateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CreateSnapshotRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateSnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateSnapshotRequest.Merge(m, src)
+}
+func (m *CreateSnapshotRequest) XXX_Size() int {
+	return xxx_messageInfo_CreateSnapshotRequest.Size(m)
+}
+func (m *CreateSnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateSnapshotRequest proto.InternalMessageInfo
+
+func (m *CreateSnapshotRequest) GetSourceVolumeId() string {
+	if m != nil {
+		return m.SourceVolumeId
+	}
+	return ""
+}
+
+func (m *CreateSnapshotRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *CreateSnapshotRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+func (m *CreateSnapshotRequest) GetParameters() map[string]string {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+type CreateSnapshotResponse struct {
+	// Contains all attributes of the newly created snapshot that are
+	// relevant to the CO along with information required by the Plugin
+	// to uniquely identify the snapshot. This field is REQUIRED.
+	Snapshot             *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *CreateSnapshotResponse) Reset()         { *m = CreateSnapshotResponse{} }
+func (m *CreateSnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateSnapshotResponse) ProtoMessage()    {}
+func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{33}
+}
+
+func (m *CreateSnapshotResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CreateSnapshotResponse.Unmarshal(m, b)
+}
+func (m *CreateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CreateSnapshotResponse.Marshal(b, m, deterministic)
+}
+func (m *CreateSnapshotResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateSnapshotResponse.Merge(m, src)
+}
+func (m *CreateSnapshotResponse) XXX_Size() int {
+	return xxx_messageInfo_CreateSnapshotResponse.Size(m)
+}
+func (m *CreateSnapshotResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateSnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateSnapshotResponse proto.InternalMessageInfo
+
+func (m *CreateSnapshotResponse) GetSnapshot() *Snapshot {
+	if m != nil {
+		return m.Snapshot
+	}
+	return nil
+}
+
+// Information about a specific snapshot.
+type Snapshot struct {
+	// This is the complete size of the snapshot in bytes. The purpose of
+	// this field is to give CO guidance on how much space is needed to
+	// create a volume from this snapshot. The size of the volume MUST NOT
+	// be less than the size of the source snapshot. This field is
+	// OPTIONAL. If this field is not set, it indicates that this size is
+	// unknown. The value of this field MUST NOT be negative and a size of
+	// zero means it is unspecified.
+	SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"`
+	// The identifier for this snapshot, generated by the plugin.
+	// This field is REQUIRED.
+	// This field MUST contain enough information to uniquely identify
+	// this specific snapshot vs all other snapshots supported by this
+	// plugin.
+	// This field SHALL be used by the CO in subsequent calls to refer to
+	// this snapshot.
+	// The SP is NOT responsible for global uniqueness of snapshot_id
+	// across multiple SPs.
+	SnapshotId string `protobuf:"bytes,2,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
+	// Identity information for the source volume. Note that creating a
+	// snapshot from a snapshot is not supported here so the source has to
+	// be a volume. This field is REQUIRED.
+	SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"`
+	// Timestamp when the point-in-time snapshot is taken on the storage
+	// system. This field is REQUIRED.
+	CreationTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"`
+	// Indicates if a snapshot is ready to use as a
+	// `volume_content_source` in a `CreateVolumeRequest`. The default
+	// value is false. This field is REQUIRED.
+	ReadyToUse           bool     `protobuf:"varint,5,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Snapshot) Reset()         { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()    {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{34}
+}
+
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Snapshot.Unmarshal(m, b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+}
+func (m *Snapshot) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Snapshot.Merge(m, src)
+}
+func (m *Snapshot) XXX_Size() int {
+	return xxx_messageInfo_Snapshot.Size(m)
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+	xxx_messageInfo_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
+
+func (m *Snapshot) GetSizeBytes() int64 {
+	if m != nil {
+		return m.SizeBytes
+	}
+	return 0
+}
+
+func (m *Snapshot) GetSnapshotId() string {
+	if m != nil {
+		return m.SnapshotId
+	}
+	return ""
+}
+
+func (m *Snapshot) GetSourceVolumeId() string {
+	if m != nil {
+		return m.SourceVolumeId
+	}
+	return ""
+}
+
+func (m *Snapshot) GetCreationTime() *timestamp.Timestamp {
+	if m != nil {
+		return m.CreationTime
+	}
+	return nil
+}
+
+func (m *Snapshot) GetReadyToUse() bool {
+	if m != nil {
+		return m.ReadyToUse
+	}
+	return false
+}
+
+type DeleteSnapshotRequest struct {
+	// The ID of the snapshot to be deleted.
+	// This field is REQUIRED.
+	SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
+	// Secrets required by plugin to complete snapshot deletion request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets              map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *DeleteSnapshotRequest) Reset()         { *m = DeleteSnapshotRequest{} }
+func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteSnapshotRequest) ProtoMessage()    {}
+func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{35}
+}
+
+func (m *DeleteSnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeleteSnapshotRequest.Unmarshal(m, b)
+}
+func (m *DeleteSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeleteSnapshotRequest.Marshal(b, m, deterministic)
+}
+func (m *DeleteSnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteSnapshotRequest.Merge(m, src)
+}
+func (m *DeleteSnapshotRequest) XXX_Size() int {
+	return xxx_messageInfo_DeleteSnapshotRequest.Size(m)
+}
+func (m *DeleteSnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteSnapshotRequest proto.InternalMessageInfo
+
+func (m *DeleteSnapshotRequest) GetSnapshotId() string {
+	if m != nil {
+		return m.SnapshotId
+	}
+	return ""
+}
+
+func (m *DeleteSnapshotRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+type DeleteSnapshotResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeleteSnapshotResponse) Reset()         { *m = DeleteSnapshotResponse{} }
+func (m *DeleteSnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteSnapshotResponse) ProtoMessage()    {}
+func (*DeleteSnapshotResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{36}
+}
+
+func (m *DeleteSnapshotResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeleteSnapshotResponse.Unmarshal(m, b)
+}
+func (m *DeleteSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeleteSnapshotResponse.Marshal(b, m, deterministic)
+}
+func (m *DeleteSnapshotResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteSnapshotResponse.Merge(m, src)
+}
+func (m *DeleteSnapshotResponse) XXX_Size() int {
+	return xxx_messageInfo_DeleteSnapshotResponse.Size(m)
+}
+func (m *DeleteSnapshotResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteSnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteSnapshotResponse proto.InternalMessageInfo
+
+// List all snapshots on the storage system regardless of how they were
+// created.
+type ListSnapshotsRequest struct {
+	// If specified (non-zero value), the Plugin MUST NOT return more
+	// entries than this number in the response. If the actual number of
+	// entries is more than this number, the Plugin MUST set `next_token`
+	// in the response which can be used to get the next page of entries
+	// in the subsequent `ListSnapshots` call. This field is OPTIONAL. If
+	// not specified (zero value), it means there is no restriction on the
+	// number of entries that can be returned.
+	// The value of this field MUST NOT be negative.
+	MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"`
+	// A token to specify where to start paginating. Set this field to
+	// `next_token` returned by a previous `ListSnapshots` call to get the
+	// next page of entries. This field is OPTIONAL.
+	// An empty string is equal to an unspecified field value.
+	StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken,proto3" json:"starting_token,omitempty"`
+	// Identity information for the source volume. This field is OPTIONAL.
+	// It can be used to list snapshots by volume.
+	SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"`
+	// Identity information for a specific snapshot. This field is
+	// OPTIONAL. It can be used to list only a specific snapshot.
+	// ListSnapshots will return with current snapshot information
+	// and will not block if the snapshot is being processed after
+	// it is cut.
+	SnapshotId string `protobuf:"bytes,4,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
+	// Secrets required by plugin to complete ListSnapshot request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets              map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ListSnapshotsRequest) Reset()         { *m = ListSnapshotsRequest{} }
+func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListSnapshotsRequest) ProtoMessage()    {}
+func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{37}
+}
+
+func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListSnapshotsRequest.Unmarshal(m, b)
+}
+func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic)
+}
+func (m *ListSnapshotsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListSnapshotsRequest.Merge(m, src)
+}
+func (m *ListSnapshotsRequest) XXX_Size() int {
+	return xxx_messageInfo_ListSnapshotsRequest.Size(m)
+}
+func (m *ListSnapshotsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo
+
+func (m *ListSnapshotsRequest) GetMaxEntries() int32 {
+	if m != nil {
+		return m.MaxEntries
+	}
+	return 0
+}
+
+func (m *ListSnapshotsRequest) GetStartingToken() string {
+	if m != nil {
+		return m.StartingToken
+	}
+	return ""
+}
+
+func (m *ListSnapshotsRequest) GetSourceVolumeId() string {
+	if m != nil {
+		return m.SourceVolumeId
+	}
+	return ""
+}
+
+func (m *ListSnapshotsRequest) GetSnapshotId() string {
+	if m != nil {
+		return m.SnapshotId
+	}
+	return ""
+}
+
+func (m *ListSnapshotsRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+type ListSnapshotsResponse struct {
+	Entries []*ListSnapshotsResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+	// This token allows you to get the next page of entries for
+	// `ListSnapshots` request. If the number of entries is larger than
+	// `max_entries`, use the `next_token` as a value for the
+	// `starting_token` field in the next `ListSnapshots` request. This
+	// field is OPTIONAL.
+	// An empty string is equal to an unspecified field value.
+	NextToken            string   `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ListSnapshotsResponse) Reset()         { *m = ListSnapshotsResponse{} }
+func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListSnapshotsResponse) ProtoMessage()    {}
+func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{38}
+}
+
+func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListSnapshotsResponse.Unmarshal(m, b)
+}
+func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic)
+}
+func (m *ListSnapshotsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListSnapshotsResponse.Merge(m, src)
+}
+func (m *ListSnapshotsResponse) XXX_Size() int {
+	return xxx_messageInfo_ListSnapshotsResponse.Size(m)
+}
+func (m *ListSnapshotsResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo
+
+func (m *ListSnapshotsResponse) GetEntries() []*ListSnapshotsResponse_Entry {
+	if m != nil {
+		return m.Entries
+	}
+	return nil
+}
+
+func (m *ListSnapshotsResponse) GetNextToken() string {
+	if m != nil {
+		return m.NextToken
+	}
+	return ""
+}
+
+type ListSnapshotsResponse_Entry struct {
+	Snapshot             *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *ListSnapshotsResponse_Entry) Reset()         { *m = ListSnapshotsResponse_Entry{} }
+func (m *ListSnapshotsResponse_Entry) String() string { return proto.CompactTextString(m) }
+func (*ListSnapshotsResponse_Entry) ProtoMessage()    {}
+func (*ListSnapshotsResponse_Entry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{38, 0}
+}
+
+func (m *ListSnapshotsResponse_Entry) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListSnapshotsResponse_Entry.Unmarshal(m, b)
+}
+func (m *ListSnapshotsResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListSnapshotsResponse_Entry.Marshal(b, m, deterministic)
+}
+func (m *ListSnapshotsResponse_Entry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListSnapshotsResponse_Entry.Merge(m, src)
+}
+func (m *ListSnapshotsResponse_Entry) XXX_Size() int {
+	return xxx_messageInfo_ListSnapshotsResponse_Entry.Size(m)
+}
+func (m *ListSnapshotsResponse_Entry) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListSnapshotsResponse_Entry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListSnapshotsResponse_Entry proto.InternalMessageInfo
+
+func (m *ListSnapshotsResponse_Entry) GetSnapshot() *Snapshot {
+	if m != nil {
+		return m.Snapshot
+	}
+	return nil
+}
+
+type ControllerExpandVolumeRequest struct {
+	// The ID of the volume to expand. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// This allows CO to specify the capacity requirements of the volume
+	// after expansion. This field is REQUIRED.
+	CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"`
+	// Secrets required by the plugin for expanding the volume.
+	// This field is OPTIONAL.
+	Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Volume capability describing how the CO intends to use this volume.
+	// This allows SP to determine if volume is being used as a block
+	// device or mounted file system. For example - if volume is
+	// being used as a block device - the SP MAY set
+	// node_expansion_required to false in ControllerExpandVolumeResponse
+	// to skip invocation of NodeExpandVolume on the node by the CO.
+	// This is an OPTIONAL field.
+	VolumeCapability     *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ControllerExpandVolumeRequest) Reset()         { *m = ControllerExpandVolumeRequest{} }
+func (m *ControllerExpandVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*ControllerExpandVolumeRequest) ProtoMessage()    {}
+func (*ControllerExpandVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{39}
+}
+
+func (m *ControllerExpandVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerExpandVolumeRequest.Unmarshal(m, b)
+}
+func (m *ControllerExpandVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerExpandVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *ControllerExpandVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerExpandVolumeRequest.Merge(m, src)
+}
+func (m *ControllerExpandVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_ControllerExpandVolumeRequest.Size(m)
+}
+func (m *ControllerExpandVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerExpandVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerExpandVolumeRequest proto.InternalMessageInfo
+
+func (m *ControllerExpandVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *ControllerExpandVolumeRequest) GetCapacityRange() *CapacityRange {
+	if m != nil {
+		return m.CapacityRange
+	}
+	return nil
+}
+
+func (m *ControllerExpandVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+func (m *ControllerExpandVolumeRequest) GetVolumeCapability() *VolumeCapability {
+	if m != nil {
+		return m.VolumeCapability
+	}
+	return nil
+}
+
+type ControllerExpandVolumeResponse struct {
+	// Capacity of volume after expansion. This field is REQUIRED.
+	CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"`
+	// Whether node expansion is required for the volume. When true
+	// the CO MUST make NodeExpandVolume RPC call on the node. This field
+	// is REQUIRED.
+	NodeExpansionRequired bool     `protobuf:"varint,2,opt,name=node_expansion_required,json=nodeExpansionRequired,proto3" json:"node_expansion_required,omitempty"`
+	XXX_NoUnkeyedLiteral  struct{} `json:"-"`
+	XXX_unrecognized      []byte   `json:"-"`
+	XXX_sizecache         int32    `json:"-"`
+}
+
+func (m *ControllerExpandVolumeResponse) Reset()         { *m = ControllerExpandVolumeResponse{} }
+func (m *ControllerExpandVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*ControllerExpandVolumeResponse) ProtoMessage()    {}
+func (*ControllerExpandVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{40}
+}
+
+func (m *ControllerExpandVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerExpandVolumeResponse.Unmarshal(m, b)
+}
+func (m *ControllerExpandVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerExpandVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *ControllerExpandVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerExpandVolumeResponse.Merge(m, src)
+}
+func (m *ControllerExpandVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_ControllerExpandVolumeResponse.Size(m)
+}
+func (m *ControllerExpandVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerExpandVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerExpandVolumeResponse proto.InternalMessageInfo
+
+func (m *ControllerExpandVolumeResponse) GetCapacityBytes() int64 {
+	if m != nil {
+		return m.CapacityBytes
+	}
+	return 0
+}
+
+func (m *ControllerExpandVolumeResponse) GetNodeExpansionRequired() bool {
+	if m != nil {
+		return m.NodeExpansionRequired
+	}
+	return false
+}
+
+type NodeStageVolumeRequest struct {
+	// The ID of the volume to publish. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The CO SHALL set this field to the value returned by
+	// `ControllerPublishVolume` if the corresponding Controller Plugin
+	// has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be
+	// left unset if the corresponding Controller Plugin does not have
+	// this capability. This is an OPTIONAL field.
+	PublishContext map[string]string `protobuf:"bytes,2,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// The path to which the volume MAY be staged. It MUST be an
+	// absolute path in the root filesystem of the process serving this
+	// request, and MUST be a directory. The CO SHALL ensure that there
+	// is only one `staging_target_path` per volume. The CO SHALL ensure
+	// that the path is directory and that the process serving the
+	// request has `read` and `write` permission to that directory. The
+	// CO SHALL be responsible for creating the directory if it does not
+	// exist.
+	// This is a REQUIRED field.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"`
+	// Volume capability describing how the CO intends to use this volume.
+	// SP MUST ensure the CO can use the staged volume as described.
+	// Otherwise SP MUST return the appropriate gRPC error code.
+	// This is a REQUIRED field.
+	VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"`
+	// Secrets required by plugin to complete node stage volume request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Volume context as returned by SP in
+	// CreateVolumeResponse.Volume.volume_context.
+	// This field is OPTIONAL and MUST match the volume_context of the
+	// volume identified by `volume_id`.
+	VolumeContext        map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *NodeStageVolumeRequest) Reset()         { *m = NodeStageVolumeRequest{} }
+func (m *NodeStageVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeStageVolumeRequest) ProtoMessage()    {}
+func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{41}
+}
+
+func (m *NodeStageVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeStageVolumeRequest.Unmarshal(m, b)
+}
+func (m *NodeStageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeStageVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeStageVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeStageVolumeRequest.Merge(m, src)
+}
+func (m *NodeStageVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeStageVolumeRequest.Size(m)
+}
+func (m *NodeStageVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeStageVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeStageVolumeRequest proto.InternalMessageInfo
+
+func (m *NodeStageVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *NodeStageVolumeRequest) GetPublishContext() map[string]string {
+	if m != nil {
+		return m.PublishContext
+	}
+	return nil
+}
+
+func (m *NodeStageVolumeRequest) GetStagingTargetPath() string {
+	if m != nil {
+		return m.StagingTargetPath
+	}
+	return ""
+}
+
+func (m *NodeStageVolumeRequest) GetVolumeCapability() *VolumeCapability {
+	if m != nil {
+		return m.VolumeCapability
+	}
+	return nil
+}
+
+func (m *NodeStageVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+func (m *NodeStageVolumeRequest) GetVolumeContext() map[string]string {
+	if m != nil {
+		return m.VolumeContext
+	}
+	return nil
+}
+
+type NodeStageVolumeResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeStageVolumeResponse) Reset()         { *m = NodeStageVolumeResponse{} }
+func (m *NodeStageVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeStageVolumeResponse) ProtoMessage()    {}
+func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{42}
+}
+
+func (m *NodeStageVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeStageVolumeResponse.Unmarshal(m, b)
+}
+func (m *NodeStageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeStageVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeStageVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeStageVolumeResponse.Merge(m, src)
+}
+func (m *NodeStageVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeStageVolumeResponse.Size(m)
+}
+func (m *NodeStageVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeStageVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeStageVolumeResponse proto.InternalMessageInfo
+
+type NodeUnstageVolumeRequest struct {
+	// The ID of the volume. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The path at which the volume was staged. It MUST be an absolute
+	// path in the root filesystem of the process serving this request.
+	// This is a REQUIRED field.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	StagingTargetPath    string   `protobuf:"bytes,2,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeUnstageVolumeRequest) Reset()         { *m = NodeUnstageVolumeRequest{} }
+func (m *NodeUnstageVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeUnstageVolumeRequest) ProtoMessage()    {}
+func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{43}
+}
+
+func (m *NodeUnstageVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeUnstageVolumeRequest.Unmarshal(m, b)
+}
+func (m *NodeUnstageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeUnstageVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeUnstageVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeUnstageVolumeRequest.Merge(m, src)
+}
+func (m *NodeUnstageVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeUnstageVolumeRequest.Size(m)
+}
+func (m *NodeUnstageVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeUnstageVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeUnstageVolumeRequest proto.InternalMessageInfo
+
+func (m *NodeUnstageVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *NodeUnstageVolumeRequest) GetStagingTargetPath() string {
+	if m != nil {
+		return m.StagingTargetPath
+	}
+	return ""
+}
+
+type NodeUnstageVolumeResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeUnstageVolumeResponse) Reset()         { *m = NodeUnstageVolumeResponse{} }
+func (m *NodeUnstageVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeUnstageVolumeResponse) ProtoMessage()    {}
+func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{44}
+}
+
+func (m *NodeUnstageVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeUnstageVolumeResponse.Unmarshal(m, b)
+}
+func (m *NodeUnstageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeUnstageVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeUnstageVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeUnstageVolumeResponse.Merge(m, src)
+}
+func (m *NodeUnstageVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeUnstageVolumeResponse.Size(m)
+}
+func (m *NodeUnstageVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeUnstageVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeUnstageVolumeResponse proto.InternalMessageInfo
+
+type NodePublishVolumeRequest struct {
+	// The ID of the volume to publish. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The CO SHALL set this field to the value returned by
+	// `ControllerPublishVolume` if the corresponding Controller Plugin
+	// has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be
+	// left unset if the corresponding Controller Plugin does not have
+	// this capability. This is an OPTIONAL field.
+	PublishContext map[string]string `protobuf:"bytes,2,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// The path to which the volume was staged by `NodeStageVolume`.
+	// It MUST be an absolute path in the root filesystem of the process
+	// serving this request.
+	// It MUST be set if the Node Plugin implements the
+	// `STAGE_UNSTAGE_VOLUME` node capability.
+	// This is an OPTIONAL field.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"`
+	// The path to which the volume will be published. It MUST be an
+	// absolute path in the root filesystem of the process serving this
+	// request. The CO SHALL ensure uniqueness of target_path per volume.
+	// The CO SHALL ensure that the parent directory of this path exists
+	// and that the process serving the request has `read` and `write`
+	// permissions to that parent directory.
+	// For volumes with an access type of block, the SP SHALL place the
+	// block device at target_path.
+	// For volumes with an access type of mount, the SP SHALL place the
+	// mounted directory at target_path.
+	// Creation of target_path is the responsibility of the SP.
+	// This is a REQUIRED field.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"`
+	// Volume capability describing how the CO intends to use this volume.
+	// SP MUST ensure the CO can use the published volume as described.
+	// Otherwise SP MUST return the appropriate gRPC error code.
+	// This is a REQUIRED field.
+	VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"`
+	// Indicates SP MUST publish the volume in readonly mode.
+	// This field is REQUIRED.
+	Readonly bool `protobuf:"varint,6,opt,name=readonly,proto3" json:"readonly,omitempty"`
+	// Secrets required by plugin to complete node publish volume request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets map[string]string `protobuf:"bytes,7,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Volume context as returned by SP in
+	// CreateVolumeResponse.Volume.volume_context.
+	// This field is OPTIONAL and MUST match the volume_context of the
+	// volume identified by `volume_id`.
+	VolumeContext        map[string]string `protobuf:"bytes,8,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *NodePublishVolumeRequest) Reset()         { *m = NodePublishVolumeRequest{} }
+func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*NodePublishVolumeRequest) ProtoMessage()    {}
+func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{45}
+}
+
+func (m *NodePublishVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodePublishVolumeRequest.Unmarshal(m, b)
+}
+func (m *NodePublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodePublishVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *NodePublishVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodePublishVolumeRequest.Merge(m, src)
+}
+func (m *NodePublishVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_NodePublishVolumeRequest.Size(m)
+}
+func (m *NodePublishVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodePublishVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodePublishVolumeRequest proto.InternalMessageInfo
+
+func (m *NodePublishVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *NodePublishVolumeRequest) GetPublishContext() map[string]string {
+	if m != nil {
+		return m.PublishContext
+	}
+	return nil
+}
+
+func (m *NodePublishVolumeRequest) GetStagingTargetPath() string {
+	if m != nil {
+		return m.StagingTargetPath
+	}
+	return ""
+}
+
+func (m *NodePublishVolumeRequest) GetTargetPath() string {
+	if m != nil {
+		return m.TargetPath
+	}
+	return ""
+}
+
+func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability {
+	if m != nil {
+		return m.VolumeCapability
+	}
+	return nil
+}
+
+func (m *NodePublishVolumeRequest) GetReadonly() bool {
+	if m != nil {
+		return m.Readonly
+	}
+	return false
+}
+
+func (m *NodePublishVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+func (m *NodePublishVolumeRequest) GetVolumeContext() map[string]string {
+	if m != nil {
+		return m.VolumeContext
+	}
+	return nil
+}
+
+type NodePublishVolumeResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodePublishVolumeResponse) Reset()         { *m = NodePublishVolumeResponse{} }
+func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*NodePublishVolumeResponse) ProtoMessage()    {}
+func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{46}
+}
+
+func (m *NodePublishVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodePublishVolumeResponse.Unmarshal(m, b)
+}
+func (m *NodePublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodePublishVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *NodePublishVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodePublishVolumeResponse.Merge(m, src)
+}
+func (m *NodePublishVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_NodePublishVolumeResponse.Size(m)
+}
+func (m *NodePublishVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodePublishVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodePublishVolumeResponse proto.InternalMessageInfo
+
+type NodeUnpublishVolumeRequest struct {
+	// The ID of the volume. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The path at which the volume was published. It MUST be an absolute
+	// path in the root filesystem of the process serving this request.
+	// The SP MUST delete the file or directory it created at this path.
+	// This is a REQUIRED field.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	TargetPath           string   `protobuf:"bytes,2,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeUnpublishVolumeRequest) Reset()         { *m = NodeUnpublishVolumeRequest{} }
+func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeUnpublishVolumeRequest) ProtoMessage()    {}
+func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{47}
+}
+
+func (m *NodeUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeUnpublishVolumeRequest.Unmarshal(m, b)
+}
+func (m *NodeUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeUnpublishVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeUnpublishVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeUnpublishVolumeRequest.Merge(m, src)
+}
+func (m *NodeUnpublishVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeUnpublishVolumeRequest.Size(m)
+}
+func (m *NodeUnpublishVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeUnpublishVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeUnpublishVolumeRequest proto.InternalMessageInfo
+
+func (m *NodeUnpublishVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *NodeUnpublishVolumeRequest) GetTargetPath() string {
+	if m != nil {
+		return m.TargetPath
+	}
+	return ""
+}
+
+type NodeUnpublishVolumeResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeUnpublishVolumeResponse) Reset()         { *m = NodeUnpublishVolumeResponse{} }
+func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeUnpublishVolumeResponse) ProtoMessage()    {}
+func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{48}
+}
+
+func (m *NodeUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeUnpublishVolumeResponse.Unmarshal(m, b)
+}
+func (m *NodeUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeUnpublishVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeUnpublishVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeUnpublishVolumeResponse.Merge(m, src)
+}
+func (m *NodeUnpublishVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeUnpublishVolumeResponse.Size(m)
+}
+func (m *NodeUnpublishVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeUnpublishVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeUnpublishVolumeResponse proto.InternalMessageInfo
+
+type NodeGetVolumeStatsRequest struct {
+	// The ID of the volume. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// It can be any valid path where volume was previously
+	// staged or published.
+	// It MUST be an absolute path in the root filesystem of
+	// the process serving this request.
+	// This is a REQUIRED field.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	VolumePath string `protobuf:"bytes,2,opt,name=volume_path,json=volumePath,proto3" json:"volume_path,omitempty"`
+	// The path where the volume is staged, if the plugin has the
+	// STAGE_UNSTAGE_VOLUME capability, otherwise empty.
+	// If not empty, it MUST be an absolute path in the root
+	// filesystem of the process serving this request.
+	// This field is OPTIONAL.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	StagingTargetPath    string   `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeGetVolumeStatsRequest) Reset()         { *m = NodeGetVolumeStatsRequest{} }
+func (m *NodeGetVolumeStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeGetVolumeStatsRequest) ProtoMessage()    {}
+func (*NodeGetVolumeStatsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{49}
+}
+
+func (m *NodeGetVolumeStatsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeGetVolumeStatsRequest.Unmarshal(m, b)
+}
+func (m *NodeGetVolumeStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeGetVolumeStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeGetVolumeStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeGetVolumeStatsRequest.Merge(m, src)
+}
+func (m *NodeGetVolumeStatsRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeGetVolumeStatsRequest.Size(m)
+}
+func (m *NodeGetVolumeStatsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeGetVolumeStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeGetVolumeStatsRequest proto.InternalMessageInfo
+
+func (m *NodeGetVolumeStatsRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *NodeGetVolumeStatsRequest) GetVolumePath() string {
+	if m != nil {
+		return m.VolumePath
+	}
+	return ""
+}
+
+func (m *NodeGetVolumeStatsRequest) GetStagingTargetPath() string {
+	if m != nil {
+		return m.StagingTargetPath
+	}
+	return ""
+}
+
+type NodeGetVolumeStatsResponse struct {
+	// This field is OPTIONAL.
+	Usage []*VolumeUsage `protobuf:"bytes,1,rep,name=usage,proto3" json:"usage,omitempty"`
+	// Information about the current condition of the volume.
+	// This field is OPTIONAL.
+	// This field MUST be specified if the VOLUME_CONDITION node
+	// capability is supported.
+	VolumeCondition      *VolumeCondition `protobuf:"bytes,2,opt,name=volume_condition,json=volumeCondition,proto3" json:"volume_condition,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *NodeGetVolumeStatsResponse) Reset()         { *m = NodeGetVolumeStatsResponse{} }
+func (m *NodeGetVolumeStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeGetVolumeStatsResponse) ProtoMessage()    {}
+func (*NodeGetVolumeStatsResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{50}
+}
+
+func (m *NodeGetVolumeStatsResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeGetVolumeStatsResponse.Unmarshal(m, b)
+}
+func (m *NodeGetVolumeStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeGetVolumeStatsResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeGetVolumeStatsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeGetVolumeStatsResponse.Merge(m, src)
+}
+func (m *NodeGetVolumeStatsResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeGetVolumeStatsResponse.Size(m)
+}
+func (m *NodeGetVolumeStatsResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeGetVolumeStatsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeGetVolumeStatsResponse proto.InternalMessageInfo
+
+func (m *NodeGetVolumeStatsResponse) GetUsage() []*VolumeUsage {
+	if m != nil {
+		return m.Usage
+	}
+	return nil
+}
+
+func (m *NodeGetVolumeStatsResponse) GetVolumeCondition() *VolumeCondition {
+	if m != nil {
+		return m.VolumeCondition
+	}
+	return nil
+}
+
+type VolumeUsage struct {
+	// The available capacity in specified Unit. This field is OPTIONAL.
+	// The value of this field MUST NOT be negative.
+	Available int64 `protobuf:"varint,1,opt,name=available,proto3" json:"available,omitempty"`
+	// The total capacity in specified Unit. This field is REQUIRED.
+	// The value of this field MUST NOT be negative.
+	Total int64 `protobuf:"varint,2,opt,name=total,proto3" json:"total,omitempty"`
+	// The used capacity in specified Unit. This field is OPTIONAL.
+	// The value of this field MUST NOT be negative.
+	Used int64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"`
+	// Units by which values are measured. This field is REQUIRED.
+	Unit                 VolumeUsage_Unit `protobuf:"varint,4,opt,name=unit,proto3,enum=csi.v1.VolumeUsage_Unit" json:"unit,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *VolumeUsage) Reset()         { *m = VolumeUsage{} }
+func (m *VolumeUsage) String() string { return proto.CompactTextString(m) }
+func (*VolumeUsage) ProtoMessage()    {}
+func (*VolumeUsage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{51}
+}
+
+func (m *VolumeUsage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeUsage.Unmarshal(m, b)
+}
+func (m *VolumeUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeUsage.Marshal(b, m, deterministic)
+}
+func (m *VolumeUsage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeUsage.Merge(m, src)
+}
+func (m *VolumeUsage) XXX_Size() int {
+	return xxx_messageInfo_VolumeUsage.Size(m)
+}
+func (m *VolumeUsage) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeUsage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeUsage proto.InternalMessageInfo
+
+func (m *VolumeUsage) GetAvailable() int64 {
+	if m != nil {
+		return m.Available
+	}
+	return 0
+}
+
+func (m *VolumeUsage) GetTotal() int64 {
+	if m != nil {
+		return m.Total
+	}
+	return 0
+}
+
+func (m *VolumeUsage) GetUsed() int64 {
+	if m != nil {
+		return m.Used
+	}
+	return 0
+}
+
+func (m *VolumeUsage) GetUnit() VolumeUsage_Unit {
+	if m != nil {
+		return m.Unit
+	}
+	return VolumeUsage_UNKNOWN
+}
+
+// VolumeCondition represents the current condition of a volume.
+type VolumeCondition struct {
+	// Normal volumes are available for use and operating optimally.
+	// An abnormal volume does not meet these criteria.
+	// This field is REQUIRED.
+	Abnormal bool `protobuf:"varint,1,opt,name=abnormal,proto3" json:"abnormal,omitempty"`
+	// The message describing the condition of the volume.
+	// This field is REQUIRED.
+	Message              string   `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *VolumeCondition) Reset()         { *m = VolumeCondition{} }
+func (m *VolumeCondition) String() string { return proto.CompactTextString(m) }
+func (*VolumeCondition) ProtoMessage()    {}
+func (*VolumeCondition) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{52}
+}
+
+func (m *VolumeCondition) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeCondition.Unmarshal(m, b)
+}
+func (m *VolumeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeCondition.Marshal(b, m, deterministic)
+}
+func (m *VolumeCondition) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeCondition.Merge(m, src)
+}
+func (m *VolumeCondition) XXX_Size() int {
+	return xxx_messageInfo_VolumeCondition.Size(m)
+}
+func (m *VolumeCondition) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeCondition proto.InternalMessageInfo
+
+func (m *VolumeCondition) GetAbnormal() bool {
+	if m != nil {
+		return m.Abnormal
+	}
+	return false
+}
+
+func (m *VolumeCondition) GetMessage() string {
+	if m != nil {
+		return m.Message
+	}
+	return ""
+}
+
+type NodeGetCapabilitiesRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeGetCapabilitiesRequest) Reset()         { *m = NodeGetCapabilitiesRequest{} }
+func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeGetCapabilitiesRequest) ProtoMessage()    {}
+func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{53}
+}
+
+func (m *NodeGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeGetCapabilitiesRequest.Unmarshal(m, b)
+}
+func (m *NodeGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeGetCapabilitiesRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeGetCapabilitiesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeGetCapabilitiesRequest.Merge(m, src)
+}
+func (m *NodeGetCapabilitiesRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeGetCapabilitiesRequest.Size(m)
+}
+func (m *NodeGetCapabilitiesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeGetCapabilitiesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeGetCapabilitiesRequest proto.InternalMessageInfo
+
+type NodeGetCapabilitiesResponse struct {
+	// All the capabilities that the node service supports. This field
+	// is OPTIONAL.
+	Capabilities         []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
+}
+
+func (m *NodeGetCapabilitiesResponse) Reset()         { *m = NodeGetCapabilitiesResponse{} }
+func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeGetCapabilitiesResponse) ProtoMessage()    {}
+func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{54}
+}
+
+func (m *NodeGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeGetCapabilitiesResponse.Unmarshal(m, b)
+}
+func (m *NodeGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeGetCapabilitiesResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeGetCapabilitiesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeGetCapabilitiesResponse.Merge(m, src)
+}
+func (m *NodeGetCapabilitiesResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeGetCapabilitiesResponse.Size(m)
+}
+func (m *NodeGetCapabilitiesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeGetCapabilitiesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeGetCapabilitiesResponse proto.InternalMessageInfo
+
+func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability {
+	if m != nil {
+		return m.Capabilities
+	}
+	return nil
+}
+
+// Specifies a capability of the node service.
+type NodeServiceCapability struct {
+	// Types that are valid to be assigned to Type:
+	//	*NodeServiceCapability_Rpc
+	Type                 isNodeServiceCapability_Type `protobuf_oneof:"type"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
+}
+
+func (m *NodeServiceCapability) Reset()         { *m = NodeServiceCapability{} }
+func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) }
+func (*NodeServiceCapability) ProtoMessage()    {}
+func (*NodeServiceCapability) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{55}
+}
+
+func (m *NodeServiceCapability) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeServiceCapability.Unmarshal(m, b)
+}
+func (m *NodeServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeServiceCapability.Marshal(b, m, deterministic)
+}
+func (m *NodeServiceCapability) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeServiceCapability.Merge(m, src)
+}
+func (m *NodeServiceCapability) XXX_Size() int {
+	return xxx_messageInfo_NodeServiceCapability.Size(m)
+}
+func (m *NodeServiceCapability) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeServiceCapability.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeServiceCapability proto.InternalMessageInfo
+
+type isNodeServiceCapability_Type interface {
+	isNodeServiceCapability_Type()
+}
+
+type NodeServiceCapability_Rpc struct {
+	Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"`
+}
+
+func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {}
+
+func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type {
+	if m != nil {
+		return m.Type
+	}
+	return nil
+}
+
+func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC {
+	if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok {
+		return x.Rpc
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*NodeServiceCapability) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*NodeServiceCapability_Rpc)(nil),
+	}
+}
+
+type NodeServiceCapability_RPC struct {
+	Type                 NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.NodeServiceCapability_RPC_Type" json:"type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
+	XXX_unrecognized     []byte                         `json:"-"`
+	XXX_sizecache        int32                          `json:"-"`
+}
+
+func (m *NodeServiceCapability_RPC) Reset()         { *m = NodeServiceCapability_RPC{} }
+func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) }
+func (*NodeServiceCapability_RPC) ProtoMessage()    {}
+func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{55, 0}
+}
+
+func (m *NodeServiceCapability_RPC) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeServiceCapability_RPC.Unmarshal(m, b)
+}
+func (m *NodeServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeServiceCapability_RPC.Marshal(b, m, deterministic)
+}
+func (m *NodeServiceCapability_RPC) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeServiceCapability_RPC.Merge(m, src)
+}
+func (m *NodeServiceCapability_RPC) XXX_Size() int {
+	return xxx_messageInfo_NodeServiceCapability_RPC.Size(m)
+}
+func (m *NodeServiceCapability_RPC) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeServiceCapability_RPC.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeServiceCapability_RPC proto.InternalMessageInfo
+
+func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type {
+	if m != nil {
+		return m.Type
+	}
+	return NodeServiceCapability_RPC_UNKNOWN
+}
+
+type NodeGetInfoRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeGetInfoRequest) Reset()         { *m = NodeGetInfoRequest{} }
+func (m *NodeGetInfoRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeGetInfoRequest) ProtoMessage()    {}
+func (*NodeGetInfoRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{56}
+}
+
+func (m *NodeGetInfoRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeGetInfoRequest.Unmarshal(m, b)
+}
+func (m *NodeGetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeGetInfoRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeGetInfoRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeGetInfoRequest.Merge(m, src)
+}
+func (m *NodeGetInfoRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeGetInfoRequest.Size(m)
+}
+func (m *NodeGetInfoRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeGetInfoRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeGetInfoRequest proto.InternalMessageInfo
+
+type NodeGetInfoResponse struct {
+	// The identifier of the node as understood by the SP.
+	// This field is REQUIRED.
+	// This field MUST contain enough information to uniquely identify
+	// this specific node vs all other nodes supported by this plugin.
+	// This field SHALL be used by the CO in subsequent calls, including
+	// `ControllerPublishVolume`, to refer to this node.
+	// The SP is NOT responsible for global uniqueness of node_id across
+	// multiple SPs.
+	// This field overrides the general CSI size limit.
+	// The size of this field SHALL NOT exceed 256 bytes. The general
+	// CSI size limit, 128 byte, is RECOMMENDED for best backwards
+	// compatibility.
+	NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+	// Maximum number of volumes that controller can publish to the node.
+	// If value is not set or zero CO SHALL decide how many volumes of
+	// this type can be published by the controller to the node. The
+	// plugin MUST NOT set negative values here.
+	// This field is OPTIONAL.
+	MaxVolumesPerNode int64 `protobuf:"varint,2,opt,name=max_volumes_per_node,json=maxVolumesPerNode,proto3" json:"max_volumes_per_node,omitempty"`
+	// Specifies where (regions, zones, racks, etc.) the node is
+	// accessible from.
+	// A plugin that returns this field MUST also set the
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability.
+	// COs MAY use this information along with the topology information
+	// returned in CreateVolumeResponse to ensure that a given volume is
+	// accessible from a given node when scheduling workloads.
+	// This field is OPTIONAL. If it is not specified, the CO MAY assume
+	// the node is not subject to any topological constraint, and MAY
+	// schedule workloads that reference any volume V, such that there are
+	// no topological constraints declared for V.
+	//
+	// Example 1:
+	//   accessible_topology =
+	//     {"region": "R1", "zone": "Z2"}
+	// Indicates the node exists within the "region" "R1" and the "zone"
+	// "Z2".
+	AccessibleTopology   *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *NodeGetInfoResponse) Reset()         { *m = NodeGetInfoResponse{} }
+func (m *NodeGetInfoResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeGetInfoResponse) ProtoMessage()    {}
+func (*NodeGetInfoResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{57}
+}
+
+func (m *NodeGetInfoResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeGetInfoResponse.Unmarshal(m, b)
+}
+func (m *NodeGetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeGetInfoResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeGetInfoResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeGetInfoResponse.Merge(m, src)
+}
+func (m *NodeGetInfoResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeGetInfoResponse.Size(m)
+}
+func (m *NodeGetInfoResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeGetInfoResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeGetInfoResponse proto.InternalMessageInfo
+
+func (m *NodeGetInfoResponse) GetNodeId() string {
+	if m != nil {
+		return m.NodeId
+	}
+	return ""
+}
+
+func (m *NodeGetInfoResponse) GetMaxVolumesPerNode() int64 {
+	if m != nil {
+		return m.MaxVolumesPerNode
+	}
+	return 0
+}
+
+func (m *NodeGetInfoResponse) GetAccessibleTopology() *Topology {
+	if m != nil {
+		return m.AccessibleTopology
+	}
+	return nil
+}
+
+type NodeExpandVolumeRequest struct {
+	// The ID of the volume. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The path on which volume is available. This field is REQUIRED.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	VolumePath string `protobuf:"bytes,2,opt,name=volume_path,json=volumePath,proto3" json:"volume_path,omitempty"`
+	// This allows CO to specify the capacity requirements of the volume
+	// after expansion. If capacity_range is omitted then a plugin MAY
+	// inspect the file system of the volume to determine the maximum
+	// capacity to which the volume can be expanded. In such cases a
+	// plugin MAY expand the volume to its maximum capacity.
+	// This field is OPTIONAL.
+	CapacityRange *CapacityRange `protobuf:"bytes,3,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"`
+	// The path where the volume is staged, if the plugin has the
+	// STAGE_UNSTAGE_VOLUME capability, otherwise empty.
+	// If not empty, it MUST be an absolute path in the root
+	// filesystem of the process serving this request.
+	// This field is OPTIONAL.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	StagingTargetPath string `protobuf:"bytes,4,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"`
+	// Volume capability describing how the CO intends to use this volume.
+	// This allows SP to determine if volume is being used as a block
+	// device or mounted file system. For example - if volume is being
+	// used as a block device the SP MAY choose to skip expanding the
+	// filesystem in NodeExpandVolume implementation but still perform
+	// rest of the housekeeping needed for expanding the volume. If
+	// volume_capability is omitted the SP MAY determine
+	// access_type from given volume_path for the volume and perform
+	// node expansion. This is an OPTIONAL field.
+	VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"`
+	// Secrets required by plugin to complete node expand volume request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets              map[string]string `protobuf:"bytes,6,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *NodeExpandVolumeRequest) Reset()         { *m = NodeExpandVolumeRequest{} }
+func (m *NodeExpandVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeExpandVolumeRequest) ProtoMessage()    {}
+func (*NodeExpandVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{58}
+}
+
+func (m *NodeExpandVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeExpandVolumeRequest.Unmarshal(m, b)
+}
+func (m *NodeExpandVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeExpandVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeExpandVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeExpandVolumeRequest.Merge(m, src)
+}
+func (m *NodeExpandVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeExpandVolumeRequest.Size(m)
+}
+func (m *NodeExpandVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeExpandVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeExpandVolumeRequest proto.InternalMessageInfo
+
+func (m *NodeExpandVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *NodeExpandVolumeRequest) GetVolumePath() string {
+	if m != nil {
+		return m.VolumePath
+	}
+	return ""
+}
+
+func (m *NodeExpandVolumeRequest) GetCapacityRange() *CapacityRange {
+	if m != nil {
+		return m.CapacityRange
+	}
+	return nil
+}
+
+func (m *NodeExpandVolumeRequest) GetStagingTargetPath() string {
+	if m != nil {
+		return m.StagingTargetPath
+	}
+	return ""
+}
+
+func (m *NodeExpandVolumeRequest) GetVolumeCapability() *VolumeCapability {
+	if m != nil {
+		return m.VolumeCapability
+	}
+	return nil
+}
+
+func (m *NodeExpandVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+type NodeExpandVolumeResponse struct {
+	// The capacity of the volume in bytes. This field is OPTIONAL.
+	CapacityBytes        int64    `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeExpandVolumeResponse) Reset()         { *m = NodeExpandVolumeResponse{} }
+func (m *NodeExpandVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeExpandVolumeResponse) ProtoMessage()    {}
+func (*NodeExpandVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{59}
+}
+
+func (m *NodeExpandVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeExpandVolumeResponse.Unmarshal(m, b)
+}
+func (m *NodeExpandVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeExpandVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeExpandVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeExpandVolumeResponse.Merge(m, src)
+}
+func (m *NodeExpandVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeExpandVolumeResponse.Size(m)
+}
+func (m *NodeExpandVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeExpandVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeExpandVolumeResponse proto.InternalMessageInfo
+
+func (m *NodeExpandVolumeResponse) GetCapacityBytes() int64 {
+	if m != nil {
+		return m.CapacityBytes
+	}
+	return 0
+}
+
+var E_AlphaEnum = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1060,
+	Name:          "csi.v1.alpha_enum",
+	Tag:           "varint,1060,opt,name=alpha_enum",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+var E_AlphaEnumValue = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.EnumValueOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1060,
+	Name:          "csi.v1.alpha_enum_value",
+	Tag:           "varint,1060,opt,name=alpha_enum_value",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+var E_CsiSecret = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1059,
+	Name:          "csi.v1.csi_secret",
+	Tag:           "varint,1059,opt,name=csi_secret",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+var E_AlphaField = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1060,
+	Name:          "csi.v1.alpha_field",
+	Tag:           "varint,1060,opt,name=alpha_field",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+var E_AlphaMessage = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1060,
+	Name:          "csi.v1.alpha_message",
+	Tag:           "varint,1060,opt,name=alpha_message",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+var E_AlphaMethod = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MethodOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1060,
+	Name:          "csi.v1.alpha_method",
+	Tag:           "varint,1060,opt,name=alpha_method",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+var E_AlphaService = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.ServiceOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1060,
+	Name:          "csi.v1.alpha_service",
+	Tag:           "varint,1060,opt,name=alpha_service",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+func init() {
+	proto.RegisterEnum("csi.v1.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value)
+	proto.RegisterEnum("csi.v1.PluginCapability_VolumeExpansion_Type", PluginCapability_VolumeExpansion_Type_name, PluginCapability_VolumeExpansion_Type_value)
+	proto.RegisterEnum("csi.v1.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value)
+	proto.RegisterEnum("csi.v1.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value)
+	proto.RegisterEnum("csi.v1.VolumeUsage_Unit", VolumeUsage_Unit_name, VolumeUsage_Unit_value)
+	proto.RegisterEnum("csi.v1.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value)
+	proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v1.GetPluginInfoRequest")
+	proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v1.GetPluginInfoResponse")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetPluginInfoResponse.ManifestEntry")
+	proto.RegisterType((*GetPluginCapabilitiesRequest)(nil), "csi.v1.GetPluginCapabilitiesRequest")
+	proto.RegisterType((*GetPluginCapabilitiesResponse)(nil), "csi.v1.GetPluginCapabilitiesResponse")
+	proto.RegisterType((*PluginCapability)(nil), "csi.v1.PluginCapability")
+	proto.RegisterType((*PluginCapability_Service)(nil), "csi.v1.PluginCapability.Service")
+	proto.RegisterType((*PluginCapability_VolumeExpansion)(nil), "csi.v1.PluginCapability.VolumeExpansion")
+	proto.RegisterType((*ProbeRequest)(nil), "csi.v1.ProbeRequest")
+	proto.RegisterType((*ProbeResponse)(nil), "csi.v1.ProbeResponse")
+	proto.RegisterType((*CreateVolumeRequest)(nil), "csi.v1.CreateVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeRequest.ParametersEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeRequest.SecretsEntry")
+	proto.RegisterType((*VolumeContentSource)(nil), "csi.v1.VolumeContentSource")
+	proto.RegisterType((*VolumeContentSource_SnapshotSource)(nil), "csi.v1.VolumeContentSource.SnapshotSource")
+	proto.RegisterType((*VolumeContentSource_VolumeSource)(nil), "csi.v1.VolumeContentSource.VolumeSource")
+	proto.RegisterType((*CreateVolumeResponse)(nil), "csi.v1.CreateVolumeResponse")
+	proto.RegisterType((*VolumeCapability)(nil), "csi.v1.VolumeCapability")
+	proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.v1.VolumeCapability.BlockVolume")
+	proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.v1.VolumeCapability.MountVolume")
+	proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.v1.VolumeCapability.AccessMode")
+	proto.RegisterType((*CapacityRange)(nil), "csi.v1.CapacityRange")
+	proto.RegisterType((*Volume)(nil), "csi.v1.Volume")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.Volume.VolumeContextEntry")
+	proto.RegisterType((*TopologyRequirement)(nil), "csi.v1.TopologyRequirement")
+	proto.RegisterType((*Topology)(nil), "csi.v1.Topology")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.Topology.SegmentsEntry")
+	proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.v1.DeleteVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteVolumeRequest.SecretsEntry")
+	proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.v1.DeleteVolumeResponse")
+	proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.v1.ControllerPublishVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeRequest.SecretsEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry")
+	proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.v1.ControllerPublishVolumeResponse")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeResponse.PublishContextEntry")
+	proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.v1.ControllerUnpublishVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerUnpublishVolumeRequest.SecretsEntry")
+	proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.v1.ControllerUnpublishVolumeResponse")
+	proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry")
+	proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse")
+	proto.RegisterType((*ValidateVolumeCapabilitiesResponse_Confirmed)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry")
+	proto.RegisterType((*ListVolumesRequest)(nil), "csi.v1.ListVolumesRequest")
+	proto.RegisterType((*ListVolumesResponse)(nil), "csi.v1.ListVolumesResponse")
+	proto.RegisterType((*ListVolumesResponse_VolumeStatus)(nil), "csi.v1.ListVolumesResponse.VolumeStatus")
+	proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.v1.ListVolumesResponse.Entry")
+	proto.RegisterType((*ControllerGetVolumeRequest)(nil), "csi.v1.ControllerGetVolumeRequest")
+	proto.RegisterType((*ControllerGetVolumeResponse)(nil), "csi.v1.ControllerGetVolumeResponse")
+	proto.RegisterType((*ControllerGetVolumeResponse_VolumeStatus)(nil), "csi.v1.ControllerGetVolumeResponse.VolumeStatus")
+	proto.RegisterType((*GetCapacityRequest)(nil), "csi.v1.GetCapacityRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetCapacityRequest.ParametersEntry")
+	proto.RegisterType((*GetCapacityResponse)(nil), "csi.v1.GetCapacityResponse")
+	proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.v1.ControllerGetCapabilitiesRequest")
+	proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.v1.ControllerGetCapabilitiesResponse")
+	proto.RegisterType((*ControllerServiceCapability)(nil), "csi.v1.ControllerServiceCapability")
+	proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.v1.ControllerServiceCapability.RPC")
+	proto.RegisterType((*CreateSnapshotRequest)(nil), "csi.v1.CreateSnapshotRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateSnapshotRequest.ParametersEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateSnapshotRequest.SecretsEntry")
+	proto.RegisterType((*CreateSnapshotResponse)(nil), "csi.v1.CreateSnapshotResponse")
+	proto.RegisterType((*Snapshot)(nil), "csi.v1.Snapshot")
+	proto.RegisterType((*DeleteSnapshotRequest)(nil), "csi.v1.DeleteSnapshotRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteSnapshotRequest.SecretsEntry")
+	proto.RegisterType((*DeleteSnapshotResponse)(nil), "csi.v1.DeleteSnapshotResponse")
+	proto.RegisterType((*ListSnapshotsRequest)(nil), "csi.v1.ListSnapshotsRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ListSnapshotsRequest.SecretsEntry")
+	proto.RegisterType((*ListSnapshotsResponse)(nil), "csi.v1.ListSnapshotsResponse")
+	proto.RegisterType((*ListSnapshotsResponse_Entry)(nil), "csi.v1.ListSnapshotsResponse.Entry")
+	proto.RegisterType((*ControllerExpandVolumeRequest)(nil), "csi.v1.ControllerExpandVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerExpandVolumeRequest.SecretsEntry")
+	proto.RegisterType((*ControllerExpandVolumeResponse)(nil), "csi.v1.ControllerExpandVolumeResponse")
+	proto.RegisterType((*NodeStageVolumeRequest)(nil), "csi.v1.NodeStageVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.PublishContextEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.SecretsEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.VolumeContextEntry")
+	proto.RegisterType((*NodeStageVolumeResponse)(nil), "csi.v1.NodeStageVolumeResponse")
+	proto.RegisterType((*NodeUnstageVolumeRequest)(nil), "csi.v1.NodeUnstageVolumeRequest")
+	proto.RegisterType((*NodeUnstageVolumeResponse)(nil), "csi.v1.NodeUnstageVolumeResponse")
+	proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.v1.NodePublishVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.PublishContextEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.SecretsEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.VolumeContextEntry")
+	proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.v1.NodePublishVolumeResponse")
+	proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.v1.NodeUnpublishVolumeRequest")
+	proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.v1.NodeUnpublishVolumeResponse")
+	proto.RegisterType((*NodeGetVolumeStatsRequest)(nil), "csi.v1.NodeGetVolumeStatsRequest")
+	proto.RegisterType((*NodeGetVolumeStatsResponse)(nil), "csi.v1.NodeGetVolumeStatsResponse")
+	proto.RegisterType((*VolumeUsage)(nil), "csi.v1.VolumeUsage")
+	proto.RegisterType((*VolumeCondition)(nil), "csi.v1.VolumeCondition")
+	proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.v1.NodeGetCapabilitiesRequest")
+	proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.v1.NodeGetCapabilitiesResponse")
+	proto.RegisterType((*NodeServiceCapability)(nil), "csi.v1.NodeServiceCapability")
+	proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.v1.NodeServiceCapability.RPC")
+	proto.RegisterType((*NodeGetInfoRequest)(nil), "csi.v1.NodeGetInfoRequest")
+	proto.RegisterType((*NodeGetInfoResponse)(nil), "csi.v1.NodeGetInfoResponse")
+	proto.RegisterType((*NodeExpandVolumeRequest)(nil), "csi.v1.NodeExpandVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeExpandVolumeRequest.SecretsEntry")
+	proto.RegisterType((*NodeExpandVolumeResponse)(nil), "csi.v1.NodeExpandVolumeResponse")
+	proto.RegisterExtension(E_AlphaEnum)
+	proto.RegisterExtension(E_AlphaEnumValue)
+	proto.RegisterExtension(E_CsiSecret)
+	proto.RegisterExtension(E_AlphaField)
+	proto.RegisterExtension(E_AlphaMessage)
+	proto.RegisterExtension(E_AlphaMethod)
+	proto.RegisterExtension(E_AlphaService)
+}
+
+func init() {
+	proto.RegisterFile("github.com/container-storage-interface/spec/csi.proto", fileDescriptor_9cdb00adce470e01)
+}
+
+var fileDescriptor_9cdb00adce470e01 = []byte{
+	// 3797 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x3b, 0x4b, 0x6c, 0x1b, 0x49,
+	0x76, 0x6a, 0xfe, 0x24, 0x3d, 0x4a, 0x32, 0x5d, 0xfa, 0x98, 0x6e, 0x49, 0x96, 0xdc, 0x1e, 0x7b,
+	0x65, 0x8f, 0x4d, 0xaf, 0xb5, 0x63, 0x23, 0x23, 0x7b, 0x76, 0x87, 0xa4, 0x68, 0x89, 0x6b, 0x8a,
+	0xd4, 0x34, 0x29, 0x7b, 0xed, 0x64, 0xd0, 0xd3, 0x22, 0x4b, 0x74, 0x63, 0xc8, 0x6e, 0x4e, 0x77,
+	0x53, 0x91, 0xe6, 0x92, 0x20, 0x41, 0x0e, 0x41, 0x2e, 0xb9, 0xed, 0xe4, 0xb6, 0x48, 0xf6, 0x98,
+	0xc5, 0x22, 0x08, 0x82, 0x1c, 0x03, 0xe4, 0x18, 0x20, 0x9b, 0xdc, 0x12, 0xe4, 0xb2, 0xb7, 0x20,
+	0x58, 0x24, 0xc0, 0x5c, 0x72, 0xc9, 0x21, 0x08, 0xba, 0xaa, 0xba, 0xd9, 0x5f, 0x7e, 0x2c, 0x19,
+	0x73, 0xc8, 0x49, 0xec, 0x57, 0xef, 0xbd, 0x7a, 0x55, 0xf5, 0xde, 0xab, 0xf7, 0x29, 0xc1, 0xe3,
+	0xb6, 0x62, 0xbe, 0xed, 0x1f, 0xe7, 0x9a, 0x5a, 0xf7, 0x61, 0x53, 0x53, 0x4d, 0x59, 0x51, 0xb1,
+	0xfe, 0xc0, 0x30, 0x35, 0x5d, 0x6e, 0xe3, 0x07, 0x8a, 0x6a, 0x62, 0xfd, 0x44, 0x6e, 0xe2, 0x87,
+	0x46, 0x0f, 0x37, 0x1f, 0x36, 0x0d, 0x25, 0xd7, 0xd3, 0x35, 0x53, 0x43, 0x29, 0xeb, 0xe7, 0xe9,
+	0x23, 0x7e, 0xb3, 0xad, 0x69, 0xed, 0x0e, 0x7e, 0x48, 0xa0, 0xc7, 0xfd, 0x93, 0x87, 0x2d, 0x6c,
+	0x34, 0x75, 0xa5, 0x67, 0x6a, 0x3a, 0xc5, 0xe4, 0x37, 0xfc, 0x18, 0xa6, 0xd2, 0xc5, 0x86, 0x29,
+	0x77, 0x7b, 0x0c, 0xe1, 0x86, 0x1f, 0xe1, 0x77, 0x75, 0xb9, 0xd7, 0xc3, 0xba, 0x41, 0xc7, 0x85,
+	0x15, 0x58, 0xda, 0xc3, 0xe6, 0x61, 0xa7, 0xdf, 0x56, 0xd4, 0xb2, 0x7a, 0xa2, 0x89, 0xf8, 0xab,
+	0x3e, 0x36, 0x4c, 0xe1, 0x5f, 0x39, 0x58, 0xf6, 0x0d, 0x18, 0x3d, 0x4d, 0x35, 0x30, 0x42, 0x90,
+	0x50, 0xe5, 0x2e, 0xce, 0x72, 0x9b, 0xdc, 0xd6, 0xac, 0x48, 0x7e, 0xa3, 0xdb, 0xb0, 0x70, 0x8a,
+	0xd5, 0x96, 0xa6, 0x4b, 0xa7, 0x58, 0x37, 0x14, 0x4d, 0xcd, 0xc6, 0xc8, 0xe8, 0x3c, 0x85, 0xbe,
+	0xa4, 0x40, 0xb4, 0x07, 0x33, 0x5d, 0x59, 0x55, 0x4e, 0xb0, 0x61, 0x66, 0xe3, 0x9b, 0xf1, 0xad,
+	0xf4, 0xf6, 0x87, 0x39, 0xba, 0xd4, 0x5c, 0xe8, 0x5c, 0xb9, 0x03, 0x86, 0x5d, 0x52, 0x4d, 0xfd,
+	0x5c, 0x74, 0x88, 0xf9, 0xa7, 0x30, 0xef, 0x19, 0x42, 0x19, 0x88, 0x7f, 0x89, 0xcf, 0x99, 0x4c,
+	0xd6, 0x4f, 0xb4, 0x04, 0xc9, 0x53, 0xb9, 0xd3, 0xc7, 0x4c, 0x12, 0xfa, 0xb1, 0x13, 0xfb, 0x2d,
+	0x4e, 0xb8, 0x01, 0x6b, 0xce, 0x6c, 0x45, 0xb9, 0x27, 0x1f, 0x2b, 0x1d, 0xc5, 0x54, 0xb0, 0x61,
+	0x2f, 0xfd, 0x73, 0x58, 0x8f, 0x18, 0x67, 0x3b, 0xf0, 0x0c, 0xe6, 0x9a, 0x2e, 0x78, 0x96, 0x23,
+	0x4b, 0xc9, 0xda, 0x4b, 0xf1, 0x51, 0x9e, 0x8b, 0x1e, 0x6c, 0xe1, 0x57, 0x71, 0xc8, 0xf8, 0x51,
+	0xd0, 0x33, 0x98, 0x36, 0xb0, 0x7e, 0xaa, 0x34, 0xe9, 0xbe, 0xa6, 0xb7, 0x37, 0xa3, 0xb8, 0xe5,
+	0xea, 0x14, 0x6f, 0x7f, 0x4a, 0xb4, 0x49, 0xd0, 0x11, 0x64, 0x4e, 0xb5, 0x4e, 0xbf, 0x8b, 0x25,
+	0x7c, 0xd6, 0x93, 0x55, 0xe7, 0x00, 0xd2, 0xdb, 0x5b, 0x91, 0x6c, 0x5e, 0x12, 0x82, 0x92, 0x8d,
+	0xbf, 0x3f, 0x25, 0x5e, 0x39, 0xf5, 0x82, 0xf8, 0x9f, 0x72, 0x30, 0xcd, 0x66, 0x43, 0x1f, 0x43,
+	0xc2, 0x3c, 0xef, 0x51, 0xe9, 0x16, 0xb6, 0x6f, 0x8f, 0x92, 0x2e, 0xd7, 0x38, 0xef, 0x61, 0x91,
+	0x90, 0x08, 0x9f, 0x41, 0xc2, 0xfa, 0x42, 0x69, 0x98, 0x3e, 0xaa, 0xbe, 0xa8, 0xd6, 0x5e, 0x55,
+	0x33, 0x53, 0x68, 0x05, 0x50, 0xb1, 0x56, 0x6d, 0x88, 0xb5, 0x4a, 0xa5, 0x24, 0x4a, 0xf5, 0x92,
+	0xf8, 0xb2, 0x5c, 0x2c, 0x65, 0x38, 0xf4, 0x01, 0x6c, 0xbe, 0xac, 0x55, 0x8e, 0x0e, 0x4a, 0x52,
+	0xbe, 0x58, 0x2c, 0xd5, 0xeb, 0xe5, 0x42, 0xb9, 0x52, 0x6e, 0xbc, 0x96, 0x8a, 0xb5, 0x6a, 0xbd,
+	0x21, 0xe6, 0xcb, 0xd5, 0x46, 0x3d, 0x13, 0xe3, 0xff, 0x80, 0x83, 0x2b, 0xbe, 0x05, 0xa0, 0xbc,
+	0x47, 0xc2, 0x07, 0xe3, 0x2e, 0xdc, 0x2d, 0xe9, 0xfd, 0x30, 0x49, 0x01, 0x52, 0xb5, 0x6a, 0xa5,
+	0x5c, 0xb5, 0xa4, 0x4b, 0xc3, 0x74, 0xed, 0xf9, 0x73, 0xf2, 0x11, 0x2b, 0xa4, 0xe8, 0x84, 0xc2,
+	0x02, 0xcc, 0x1d, 0xea, 0xda, 0x31, 0xb6, 0xf5, 0x27, 0x0f, 0xf3, 0xec, 0x9b, 0xe9, 0xcb, 0xf7,
+	0x21, 0xa9, 0x63, 0xb9, 0x75, 0xce, 0x8e, 0x96, 0xcf, 0x51, 0x9b, 0xcc, 0xd9, 0x36, 0x99, 0x2b,
+	0x68, 0x5a, 0xe7, 0xa5, 0xa5, 0x9f, 0x22, 0x45, 0x14, 0xbe, 0x4d, 0xc0, 0x62, 0x51, 0xc7, 0xb2,
+	0x89, 0xa9, 0xb4, 0x8c, 0x75, 0xa8, 0xed, 0x3d, 0x83, 0x05, 0x4b, 0xbf, 0x9a, 0x8a, 0x79, 0x2e,
+	0xe9, 0xb2, 0xda, 0xc6, 0xec, 0xe8, 0x97, 0xed, 0x1d, 0x28, 0xb2, 0x51, 0xd1, 0x1a, 0x14, 0xe7,
+	0x9b, 0xee, 0x4f, 0x54, 0x86, 0x45, 0xa6, 0x3a, 0x1e, 0x95, 0x8e, 0x7b, 0x55, 0x9a, 0x4a, 0xe1,
+	0x52, 0x69, 0x74, 0xea, 0x85, 0x28, 0xd8, 0x40, 0x2f, 0x00, 0x7a, 0xb2, 0x2e, 0x77, 0xb1, 0x89,
+	0x75, 0x23, 0x9b, 0xf0, 0xda, 0x77, 0xc8, 0x6a, 0x72, 0x87, 0x0e, 0x36, 0xb5, 0x6f, 0x17, 0x39,
+	0xda, 0xb3, 0x0c, 0xa2, 0xa9, 0x63, 0xd3, 0xc8, 0x26, 0x09, 0xa7, 0xad, 0x61, 0x9c, 0xea, 0x14,
+	0x95, 0xb0, 0x29, 0xc4, 0xbf, 0x29, 0x70, 0xa2, 0x4d, 0x8d, 0x6a, 0xb0, 0x6c, 0x2f, 0x50, 0x53,
+	0x4d, 0xac, 0x9a, 0x92, 0xa1, 0xf5, 0xf5, 0x26, 0xce, 0xa6, 0xc8, 0x2e, 0xad, 0xfa, 0x96, 0x48,
+	0x71, 0xea, 0x04, 0x45, 0x64, 0x5b, 0xe3, 0x01, 0xa2, 0x37, 0xc0, 0xcb, 0xcd, 0x26, 0x36, 0x0c,
+	0x85, 0xee, 0x85, 0xa4, 0xe3, 0xaf, 0xfa, 0x8a, 0x8e, 0xbb, 0x58, 0x35, 0x8d, 0xec, 0xb4, 0x97,
+	0x6b, 0x43, 0xeb, 0x69, 0x1d, 0xad, 0x7d, 0x2e, 0x0e, 0x70, 0xc4, 0xeb, 0x1e, 0x72, 0xd7, 0x88,
+	0xc1, 0x7f, 0x02, 0x57, 0x7c, 0x9b, 0x32, 0x89, 0x67, 0xe3, 0x77, 0x60, 0xce, 0xbd, 0x13, 0x13,
+	0x79, 0xc5, 0x3f, 0x89, 0xc1, 0x62, 0xc8, 0x1e, 0xa0, 0x7d, 0x98, 0x31, 0x54, 0xb9, 0x67, 0xbc,
+	0xd5, 0x4c, 0xa6, 0xbf, 0xf7, 0x86, 0x6c, 0x59, 0xae, 0xce, 0x70, 0xe9, 0xe7, 0xfe, 0x94, 0xe8,
+	0x50, 0xa3, 0x02, 0xa4, 0xe8, 0x7e, 0xfa, 0x7d, 0x53, 0x18, 0x1f, 0x0a, 0x73, 0xb8, 0x30, 0x4a,
+	0xfe, 0x11, 0x2c, 0x78, 0x67, 0x40, 0x1b, 0x90, 0xb6, 0x67, 0x90, 0x94, 0x16, 0x5b, 0x2b, 0xd8,
+	0xa0, 0x72, 0x8b, 0xff, 0x10, 0xe6, 0xdc, 0xcc, 0xd0, 0x2a, 0xcc, 0x32, 0x85, 0x70, 0xd0, 0x67,
+	0x28, 0xa0, 0xdc, 0x72, 0x6c, 0xfa, 0x87, 0xb0, 0xe4, 0xd5, 0x33, 0x66, 0xca, 0x77, 0x9c, 0x35,
+	0xd0, 0xbd, 0x58, 0xf0, 0xae, 0xc1, 0x96, 0x53, 0xf8, 0x79, 0x12, 0x32, 0x7e, 0xa3, 0x41, 0xcf,
+	0x20, 0x79, 0xdc, 0xd1, 0x9a, 0x5f, 0x32, 0xda, 0x0f, 0xa2, 0xac, 0x2b, 0x57, 0xb0, 0xb0, 0x28,
+	0x74, 0x7f, 0x4a, 0xa4, 0x44, 0x16, 0x75, 0x57, 0xeb, 0xab, 0x26, 0xdb, 0xbd, 0x68, 0xea, 0x03,
+	0x0b, 0x6b, 0x40, 0x4d, 0x88, 0xd0, 0x2e, 0xa4, 0xa9, 0xda, 0x49, 0x5d, 0xad, 0x85, 0xb3, 0x71,
+	0xc2, 0xe3, 0x56, 0x24, 0x8f, 0x3c, 0xc1, 0x3d, 0xd0, 0x5a, 0x58, 0x04, 0xd9, 0xf9, 0xcd, 0xcf,
+	0x43, 0xda, 0x25, 0x1b, 0xff, 0x35, 0xa4, 0x5d, 0x93, 0xa1, 0x6b, 0x30, 0x7d, 0x62, 0x48, 0x8e,
+	0x13, 0x9e, 0x15, 0x53, 0x27, 0x06, 0xf1, 0xa7, 0x1b, 0x90, 0x26, 0x52, 0x48, 0x27, 0x1d, 0xb9,
+	0x6d, 0x64, 0x63, 0x9b, 0x71, 0xeb, 0x8c, 0x08, 0xe8, 0xb9, 0x05, 0x41, 0x8f, 0x80, 0x39, 0x14,
+	0x89, 0xe2, 0xb5, 0x75, 0xad, 0xdf, 0x23, 0x42, 0xce, 0x16, 0xe2, 0x3f, 0x2b, 0x70, 0x22, 0xbb,
+	0xdf, 0xc8, 0x6c, 0x7b, 0xd6, 0x20, 0xff, 0xd7, 0x31, 0x80, 0x81, 0x94, 0xe8, 0x19, 0x24, 0xc8,
+	0xc2, 0xa8, 0xf7, 0xdf, 0x1a, 0x63, 0x61, 0x39, 0xb2, 0x3a, 0x42, 0x25, 0xfc, 0x3b, 0x07, 0x09,
+	0xc2, 0xc6, 0x7f, 0x47, 0xd5, 0xcb, 0xd5, 0xbd, 0x4a, 0x49, 0xaa, 0xd6, 0x76, 0x4b, 0xd2, 0x2b,
+	0xb1, 0xdc, 0x28, 0x89, 0x19, 0x0e, 0xad, 0xc2, 0x35, 0x37, 0x5c, 0x2c, 0xe5, 0x77, 0x4b, 0xa2,
+	0x54, 0xab, 0x56, 0x5e, 0x67, 0x62, 0x88, 0x87, 0x95, 0x83, 0xa3, 0x4a, 0xa3, 0x1c, 0x1c, 0x8b,
+	0xa3, 0x35, 0xc8, 0xba, 0xc6, 0x18, 0x0f, 0xc6, 0x36, 0x61, 0xb1, 0x75, 0x8d, 0xd2, 0x9f, 0x6c,
+	0x30, 0x89, 0x04, 0xb8, 0xee, 0x9e, 0xd3, 0x4b, 0x9b, 0xe2, 0xad, 0x4d, 0x42, 0x37, 0x21, 0xeb,
+	0xc6, 0xf1, 0x70, 0x98, 0x26, 0x28, 0x85, 0x79, 0x47, 0x0d, 0x88, 0x9a, 0xbf, 0x82, 0x79, 0xcf,
+	0xed, 0x60, 0x05, 0x72, 0xcc, 0x9d, 0xb5, 0xa4, 0xe3, 0x73, 0x93, 0x04, 0x37, 0xdc, 0x56, 0x5c,
+	0x9c, 0xb7, 0xa1, 0x05, 0x0b, 0x68, 0x1d, 0x68, 0x47, 0xe9, 0x2a, 0x26, 0xc3, 0x89, 0x11, 0x1c,
+	0x20, 0x20, 0x82, 0x20, 0xfc, 0x3a, 0x06, 0x29, 0xa6, 0x15, 0xb7, 0x5d, 0xf7, 0x93, 0x87, 0xa5,
+	0x0d, 0xa5, 0x2c, 0x3d, 0x66, 0x19, 0xf3, 0x9a, 0x25, 0xda, 0x87, 0x05, 0xb7, 0x13, 0x3f, 0xb3,
+	0xc3, 0xc7, 0x9b, 0xde, 0x73, 0x76, 0x7b, 0x92, 0x33, 0x16, 0x34, 0xce, 0x9f, 0xba, 0x61, 0xa8,
+	0x00, 0x0b, 0xbe, 0x7b, 0x20, 0x31, 0xfa, 0x1e, 0x98, 0x6f, 0x7a, 0x5c, 0x62, 0x1e, 0x16, 0x6d,
+	0x17, 0xde, 0xc1, 0x92, 0xc9, 0x5c, 0x3c, 0xbb, 0xa7, 0x32, 0x01, 0xd7, 0x8f, 0x06, 0xc8, 0x36,
+	0x8c, 0xff, 0x14, 0x50, 0x50, 0xd6, 0x89, 0xfc, 0x75, 0x1f, 0x16, 0x43, 0x2e, 0x17, 0x94, 0x83,
+	0x59, 0x72, 0x54, 0x86, 0x62, 0x62, 0x16, 0x98, 0x06, 0x25, 0x1a, 0xa0, 0x58, 0xf8, 0x3d, 0x1d,
+	0x9f, 0x60, 0x5d, 0xc7, 0x2d, 0x62, 0x98, 0xa1, 0xf8, 0x0e, 0x8a, 0xf0, 0x87, 0x1c, 0xcc, 0xd8,
+	0x70, 0xb4, 0x03, 0x33, 0x06, 0x6e, 0xd3, 0x8b, 0x8f, 0xce, 0x75, 0xc3, 0x4f, 0x9b, 0xab, 0x33,
+	0x04, 0x16, 0xc2, 0xdb, 0xf8, 0x56, 0x08, 0xef, 0x19, 0x9a, 0x68, 0xf1, 0x7f, 0xcb, 0xc1, 0xe2,
+	0x2e, 0xee, 0x60, 0x7f, 0x7c, 0x34, 0xcc, 0xb7, 0xbb, 0x43, 0x8a, 0x98, 0x37, 0xa4, 0x08, 0x61,
+	0x35, 0x24, 0xa4, 0xb8, 0xd0, 0x35, 0xbb, 0x02, 0x4b, 0xde, 0xd9, 0xe8, 0xc5, 0x22, 0xfc, 0x57,
+	0x1c, 0x6e, 0x58, 0xba, 0xa0, 0x6b, 0x9d, 0x0e, 0xd6, 0x0f, 0xfb, 0xc7, 0x1d, 0xc5, 0x78, 0x3b,
+	0xc1, 0xe2, 0xae, 0xc1, 0xb4, 0xaa, 0xb5, 0x5c, 0xc6, 0x93, 0xb2, 0x3e, 0xcb, 0x2d, 0x54, 0x82,
+	0xab, 0xfe, 0x00, 0xef, 0x9c, 0xb9, 0xff, 0xe8, 0xf0, 0x2e, 0x73, 0xea, 0xbf, 0xbb, 0x78, 0x98,
+	0xb1, 0x42, 0x53, 0x4d, 0xed, 0x9c, 0x13, 0x8b, 0x99, 0x11, 0x9d, 0x6f, 0x24, 0xfa, 0x63, 0xb5,
+	0x1f, 0x38, 0xb1, 0xda, 0xd0, 0x15, 0x0d, 0x0b, 0xdb, 0xbe, 0x08, 0x58, 0x7c, 0x8a, 0xb0, 0xfe,
+	0x78, 0x4c, 0xd6, 0x23, 0x3d, 0xc1, 0x45, 0x4e, 0xf1, 0x12, 0xcc, 0xf7, 0x1f, 0x38, 0xd8, 0x88,
+	0x5c, 0x02, 0x0b, 0x36, 0x5a, 0x70, 0xa5, 0x47, 0x07, 0x9c, 0x4d, 0xa0, 0x56, 0xf6, 0x74, 0xe4,
+	0x26, 0xb0, 0xfc, 0x99, 0x41, 0x3d, 0xdb, 0xb0, 0xd0, 0xf3, 0x00, 0xf9, 0x3c, 0x2c, 0x86, 0xa0,
+	0x4d, 0xb4, 0x98, 0xdf, 0x70, 0xb0, 0x39, 0x10, 0xe5, 0x48, 0xed, 0x5d, 0x9e, 0xfa, 0x36, 0x06,
+	0xba, 0x45, 0x5d, 0xfe, 0xe3, 0xe0, 0xda, 0xc3, 0x27, 0x7c, 0x5f, 0x16, 0x7c, 0x0b, 0x6e, 0x0e,
+	0x99, 0x9a, 0x99, 0xf3, 0xaf, 0x13, 0x70, 0xf3, 0xa5, 0xdc, 0x51, 0x5a, 0x4e, 0x08, 0x19, 0x52,
+	0x69, 0x18, 0xbe, 0x25, 0xcd, 0x80, 0x05, 0x50, 0xaf, 0xf5, 0xcc, 0xb1, 0xda, 0x51, 0xfc, 0xc7,
+	0xb8, 0x0e, 0x2f, 0x31, 0xfd, 0x7b, 0x1d, 0x92, 0xfe, 0x7d, 0x3c, 0xbe, 0xac, 0xc3, 0x92, 0xc1,
+	0x23, 0xbf, 0x83, 0x79, 0x32, 0x3e, 0xdf, 0x21, 0x5a, 0x70, 0x61, 0x2b, 0xfe, 0x2e, 0xf3, 0xb5,
+	0xbf, 0x4f, 0x80, 0x30, 0x6c, 0xf5, 0xcc, 0x87, 0x88, 0x30, 0xdb, 0xd4, 0xd4, 0x13, 0x45, 0xef,
+	0xe2, 0x16, 0xcb, 0x3b, 0x3e, 0x1a, 0x67, 0xf3, 0x98, 0x03, 0x29, 0xda, 0xb4, 0xe2, 0x80, 0x0d,
+	0xca, 0xc2, 0x74, 0x17, 0x1b, 0x86, 0xdc, 0xb6, 0xc5, 0xb2, 0x3f, 0xf9, 0x5f, 0xc4, 0x61, 0xd6,
+	0x21, 0x41, 0x6a, 0x40, 0x83, 0xa9, 0xfb, 0xda, 0x7b, 0x17, 0x01, 0xde, 0x5d, 0x99, 0x63, 0xef,
+	0xa0, 0xcc, 0x2d, 0x8f, 0x32, 0x53, 0x73, 0xd8, 0x7d, 0x27, 0xb1, 0x87, 0xe8, 0xf5, 0x77, 0xae,
+	0x80, 0xc2, 0xef, 0x00, 0xaa, 0x28, 0x06, 0xcb, 0xdf, 0x1c, 0xb7, 0x64, 0xa5, 0x6b, 0xf2, 0x99,
+	0x84, 0x55, 0x53, 0x57, 0x58, 0xb8, 0x9e, 0x14, 0xa1, 0x2b, 0x9f, 0x95, 0x28, 0xc4, 0x0a, 0xe9,
+	0x0d, 0x53, 0xd6, 0x4d, 0x45, 0x6d, 0x4b, 0xa6, 0xf6, 0x25, 0x76, 0xca, 0xbd, 0x36, 0xb4, 0x61,
+	0x01, 0x85, 0xff, 0x8c, 0xc1, 0xa2, 0x87, 0x3d, 0xd3, 0xc9, 0xa7, 0x30, 0x3d, 0xe0, 0xed, 0x09,
+	0xe3, 0x43, 0xb0, 0x73, 0x74, 0xdb, 0x6c, 0x0a, 0xb4, 0x0e, 0xa0, 0xe2, 0x33, 0xd3, 0x33, 0xef,
+	0xac, 0x05, 0x21, 0x73, 0xf2, 0x7f, 0xc4, 0x39, 0xe9, 0xbe, 0x29, 0x9b, 0x7d, 0x03, 0xdd, 0x07,
+	0xc4, 0x5c, 0x34, 0x6e, 0x49, 0xec, 0x8e, 0xa1, 0xf3, 0xce, 0x8a, 0x19, 0x67, 0xa4, 0x4a, 0x6e,
+	0x1b, 0x03, 0xed, 0x39, 0x95, 0xd4, 0xa6, 0xa6, 0xb6, 0x14, 0x73, 0x50, 0x49, 0xbd, 0x16, 0x48,
+	0x10, 0xe8, 0x30, 0xcd, 0x4f, 0xaf, 0x9c, 0x7a, 0xa1, 0xfc, 0x57, 0x90, 0xa4, 0xc7, 0x31, 0x66,
+	0xc5, 0x00, 0x7d, 0x0a, 0x29, 0x83, 0x48, 0xec, 0xaf, 0x8e, 0x84, 0xed, 0x89, 0x7b, 0x85, 0x22,
+	0xa3, 0x13, 0x7e, 0x08, 0xfc, 0xe0, 0x62, 0xda, 0xc3, 0xe6, 0xf8, 0xd7, 0xef, 0x8e, 0xb5, 0x06,
+	0xe1, 0xa7, 0x31, 0x58, 0x0d, 0x65, 0x30, 0x59, 0xed, 0x03, 0xed, 0xfb, 0x56, 0xf2, 0xfd, 0xe0,
+	0x8d, 0x1d, 0x60, 0x1e, 0xba, 0x22, 0xfe, 0xf7, 0x2f, 0x76, 0x98, 0x85, 0x89, 0x0f, 0x33, 0x70,
+	0x8e, 0x74, 0x67, 0x7e, 0x11, 0x03, 0xb4, 0x87, 0x4d, 0x27, 0x55, 0x66, 0x5b, 0x1a, 0xe1, 0x6f,
+	0xb8, 0x77, 0xf0, 0x37, 0x3f, 0xf6, 0xf8, 0x1b, 0xea, 0xb1, 0xee, 0xb9, 0x7a, 0x23, 0xbe, 0xa9,
+	0x87, 0xde, 0x96, 0x11, 0xe9, 0x29, 0x8d, 0xf9, 0xc7, 0x4b, 0x4f, 0x2f, 0xe8, 0x56, 0xfe, 0x83,
+	0x83, 0x45, 0x8f, 0xd0, 0x4c, 0x83, 0x1e, 0x00, 0x92, 0x4f, 0x65, 0xa5, 0x23, 0x5b, 0x82, 0xd9,
+	0xe9, 0x3f, 0x2b, 0x07, 0x5c, 0x75, 0x46, 0x6c, 0x32, 0x74, 0x08, 0x8b, 0x5d, 0xf9, 0x4c, 0xe9,
+	0xf6, 0xbb, 0x12, 0xdb, 0x67, 0x43, 0xf9, 0xda, 0xae, 0x1e, 0xae, 0x06, 0xaa, 0xe8, 0x65, 0xd5,
+	0x7c, 0xf2, 0x11, 0x29, 0xa3, 0x53, 0x9b, 0xbc, 0xca, 0x88, 0x99, 0x06, 0x29, 0x5f, 0x63, 0xc2,
+	0x51, 0x51, 0x03, 0x1c, 0xe3, 0x63, 0x73, 0xa4, 0xc4, 0x03, 0x8e, 0x82, 0xe0, 0x8e, 0x7c, 0xd9,
+	0x9a, 0xfd, 0x0d, 0xa5, 0x8e, 0x3b, 0x62, 0x0c, 0xe0, 0xb0, 0xbd, 0xd9, 0x0b, 0x6d, 0x2a, 0xdd,
+	0x0a, 0xda, 0x0e, 0xeb, 0xb0, 0x44, 0xf6, 0x97, 0xfe, 0x37, 0xee, 0x36, 0xe3, 0x00, 0x36, 0x7a,
+	0x0a, 0x71, 0xbd, 0xd7, 0x64, 0x36, 0xfc, 0xbd, 0x31, 0xf8, 0xe7, 0xc4, 0xc3, 0xe2, 0xfe, 0x94,
+	0x68, 0x51, 0xf1, 0x7f, 0x16, 0x87, 0xb8, 0x78, 0x58, 0x44, 0x9f, 0x7a, 0x9a, 0x2d, 0xf7, 0xc7,
+	0xe4, 0xe2, 0xee, 0xb5, 0xfc, 0x53, 0x2c, 0xac, 0xd9, 0x92, 0x85, 0xa5, 0xa2, 0x58, 0xca, 0x37,
+	0x4a, 0xd2, 0x6e, 0xa9, 0x52, 0x6a, 0x94, 0x24, 0xda, 0x0c, 0xca, 0x70, 0x68, 0x0d, 0xb2, 0x87,
+	0x47, 0x85, 0x4a, 0xb9, 0xbe, 0x2f, 0x1d, 0x55, 0xed, 0x5f, 0x6c, 0x34, 0x86, 0x32, 0x30, 0x57,
+	0x29, 0xd7, 0x1b, 0x0c, 0x50, 0xcf, 0xc4, 0x2d, 0xc8, 0x5e, 0xa9, 0x21, 0x15, 0xf3, 0x87, 0xf9,
+	0x62, 0xb9, 0xf1, 0x3a, 0x93, 0x40, 0x3c, 0xac, 0x78, 0x79, 0xd7, 0xab, 0xf9, 0xc3, 0xfa, 0x7e,
+	0xad, 0x91, 0x49, 0x22, 0x04, 0x0b, 0x84, 0xde, 0x06, 0xd5, 0x33, 0x29, 0x8b, 0x43, 0xb1, 0x52,
+	0xab, 0x3a, 0x32, 0x4c, 0xa3, 0x25, 0xc8, 0xd8, 0x33, 0x8b, 0xa5, 0xfc, 0x2e, 0xa9, 0xea, 0xcd,
+	0xa0, 0xab, 0x30, 0x5f, 0xfa, 0xc9, 0x61, 0xbe, 0xba, 0x6b, 0x23, 0xce, 0xa2, 0x4d, 0x58, 0x73,
+	0x8b, 0x23, 0x31, 0xaa, 0xd2, 0x2e, 0xa9, 0xcc, 0xd5, 0x33, 0x80, 0xae, 0x43, 0x86, 0xf5, 0xb9,
+	0x8a, 0xb5, 0xea, 0x6e, 0xb9, 0x51, 0xae, 0x55, 0x33, 0x69, 0x5a, 0xc6, 0x5b, 0x04, 0xb0, 0x24,
+	0x67, 0xcc, 0xe6, 0x46, 0xd7, 0xf6, 0xe6, 0x69, 0x6d, 0xcf, 0xae, 0x5d, 0xff, 0x26, 0x06, 0xcb,
+	0xb4, 0x78, 0x6d, 0x97, 0xca, 0x6d, 0x87, 0xb5, 0x05, 0x19, 0x5a, 0xf4, 0x92, 0xfc, 0x57, 0xc1,
+	0x02, 0x85, 0xbf, 0xb4, 0x93, 0x0f, 0xbb, 0xd1, 0x14, 0x73, 0x35, 0x9a, 0xca, 0xfe, 0x54, 0xec,
+	0x9e, 0xb7, 0x25, 0xe3, 0x9b, 0x6d, 0x58, 0x76, 0x7f, 0x10, 0x92, 0x2b, 0x3c, 0x18, 0xce, 0x6d,
+	0x58, 0x1c, 0x75, 0x91, 0x54, 0xfe, 0x82, 0xae, 0xee, 0x39, 0xac, 0xf8, 0xe5, 0x65, 0x06, 0x7d,
+	0x3f, 0xd0, 0x38, 0x71, 0x7c, 0xaf, 0x83, 0xeb, 0x60, 0x08, 0xff, 0xc2, 0xc1, 0x8c, 0x0d, 0xb6,
+	0x62, 0x1c, 0xcb, 0x2f, 0x79, 0xca, 0xa5, 0xb3, 0x16, 0xc4, 0xa9, 0xbe, 0xba, 0x5b, 0x1e, 0x31,
+	0x7f, 0xcb, 0x23, 0xf4, 0x9c, 0xe3, 0xa1, 0xe7, 0xfc, 0x23, 0x98, 0x6f, 0x5a, 0xe2, 0x2b, 0x9a,
+	0x2a, 0x99, 0x4a, 0xd7, 0xae, 0x86, 0x06, 0x5b, 0x94, 0x0d, 0xfb, 0x5d, 0x81, 0x38, 0x67, 0x13,
+	0x58, 0x20, 0xb4, 0x09, 0x73, 0xa4, 0x65, 0x29, 0x99, 0x9a, 0xd4, 0x37, 0x70, 0x36, 0x49, 0x6a,
+	0x43, 0x40, 0x60, 0x0d, 0xed, 0xc8, 0xc0, 0xc2, 0xdf, 0x71, 0xb0, 0x4c, 0x4b, 0x5e, 0x7e, 0x75,
+	0x1c, 0xd5, 0xba, 0x71, 0x6b, 0x9c, 0xef, 0x4a, 0x0c, 0x65, 0xf8, 0xbe, 0x32, 0xfe, 0x2c, 0xac,
+	0xf8, 0xe7, 0x63, 0x69, 0xfe, 0x2f, 0x63, 0xb0, 0x64, 0xc5, 0x67, 0xf6, 0xc0, 0x65, 0x87, 0xd0,
+	0x13, 0x9c, 0xa4, 0x6f, 0x33, 0x13, 0x81, 0xcd, 0xdc, 0xf7, 0x27, 0xd1, 0x77, 0xdd, 0x11, 0xa6,
+	0x7f, 0x05, 0xef, 0x6b, 0x2f, 0xff, 0x92, 0x83, 0x65, 0xdf, 0x7c, 0xcc, 0x5e, 0x3e, 0xf1, 0x67,
+	0x05, 0xb7, 0x22, 0xe4, 0x7b, 0xa7, 0xbc, 0xe0, 0xb1, 0x1d, 0x8f, 0x4f, 0x66, 0x96, 0xff, 0x1c,
+	0x83, 0xf5, 0xc1, 0xa5, 0x46, 0x1e, 0x0d, 0xb4, 0x26, 0x28, 0x6b, 0x5d, 0xac, 0x37, 0xff, 0x99,
+	0xdf, 0xe1, 0x6e, 0x07, 0xef, 0xd9, 0x10, 0x91, 0x86, 0x39, 0xde, 0xd0, 0x6a, 0x70, 0x62, 0xd2,
+	0x6a, 0xf0, 0x85, 0x34, 0xe0, 0xf7, 0xdc, 0x85, 0x6e, 0xaf, 0xf8, 0x4c, 0x13, 0xc6, 0xec, 0x18,
+	0x3d, 0x81, 0x6b, 0x24, 0x05, 0x70, 0xde, 0xbc, 0xd8, 0x9d, 0x78, 0xea, 0x12, 0x67, 0xc4, 0x65,
+	0x6b, 0xd8, 0x79, 0xe8, 0xc1, 0xba, 0x24, 0x2d, 0xe1, 0xdb, 0x04, 0xac, 0x58, 0x29, 0x42, 0xdd,
+	0x94, 0xdb, 0x93, 0xf4, 0x0f, 0x7e, 0x3b, 0x58, 0x8e, 0x8d, 0x79, 0x8f, 0x25, 0x9c, 0xeb, 0x38,
+	0x55, 0x58, 0x94, 0x83, 0x45, 0xc3, 0x94, 0xdb, 0xc4, 0x1d, 0xc8, 0x7a, 0x1b, 0x9b, 0x52, 0x4f,
+	0x36, 0xdf, 0x32, 0x5b, 0xbf, 0xca, 0x86, 0x1a, 0x64, 0xe4, 0x50, 0x36, 0xdf, 0x5e, 0xd2, 0x41,
+	0xa2, 0x1f, 0xfb, 0x9d, 0xc2, 0x87, 0x23, 0xd6, 0x32, 0x44, 0xb7, 0x7e, 0x12, 0x51, 0xb2, 0x7f,
+	0x34, 0x82, 0xe5, 0xe8, 0x52, 0xfd, 0xc5, 0x4b, 0xd4, 0xdf, 0x71, 0xb5, 0xff, 0x3a, 0x5c, 0x0b,
+	0x2c, 0x9e, 0x5d, 0x21, 0x6d, 0xc8, 0x5a, 0x43, 0x47, 0xaa, 0x31, 0xa1, 0x3a, 0x46, 0x68, 0x4c,
+	0x2c, 0x42, 0x63, 0x84, 0x55, 0xb8, 0x1e, 0x32, 0x11, 0x93, 0xe2, 0x6f, 0x92, 0x54, 0x8c, 0xc9,
+	0x1b, 0x4f, 0x9f, 0x47, 0x59, 0xc5, 0x47, 0xee, 0x63, 0x0f, 0xed, 0xd1, 0xbc, 0x0f, 0xbb, 0xd8,
+	0x80, 0xb4, 0x1b, 0x8f, 0x5d, 0x83, 0xe6, 0x08, 0xc3, 0x49, 0x5e, 0xa8, 0x1f, 0x96, 0xf2, 0xf5,
+	0xc3, 0x2a, 0x03, 0xa3, 0x9a, 0xf6, 0x86, 0xb6, 0x91, 0x5b, 0x31, 0xc4, 0xac, 0xde, 0x04, 0xcc,
+	0x6a, 0xc6, 0xdb, 0x64, 0x8b, 0x64, 0xfa, 0xff, 0xc0, 0xb0, 0x98, 0x52, 0x87, 0x76, 0xbf, 0x84,
+	0x37, 0xc0, 0x53, 0x8d, 0x9f, 0xbc, 0x1f, 0xe5, 0x53, 0xa3, 0x98, 0x5f, 0x8d, 0x84, 0x75, 0x58,
+	0x0d, 0xe5, 0xcd, 0xa6, 0xfe, 0x63, 0x8e, 0x0a, 0xe6, 0x14, 0xba, 0xea, 0xa6, 0x6c, 0x1a, 0xe3,
+	0x4e, 0xcd, 0x06, 0xdd, 0x53, 0x53, 0x10, 0xd1, 0xe0, 0x09, 0x4d, 0x42, 0xf8, 0x53, 0x8e, 0xee,
+	0x83, 0x5f, 0x16, 0x76, 0xdb, 0xde, 0x85, 0x64, 0x9f, 0xd4, 0xf2, 0x69, 0xd4, 0xb5, 0xe8, 0x35,
+	0x82, 0x23, 0x6b, 0x48, 0xa4, 0x18, 0x97, 0x56, 0x1d, 0x15, 0x7e, 0xc9, 0x41, 0xda, 0xc5, 0x1f,
+	0xad, 0xc1, 0xac, 0x53, 0xfe, 0xb1, 0xf3, 0x1d, 0x07, 0x60, 0x1d, 0xbf, 0xa9, 0x99, 0x72, 0x87,
+	0xbd, 0x33, 0xa1, 0x1f, 0x56, 0x8a, 0xda, 0x37, 0x30, 0x0d, 0x87, 0xe3, 0x22, 0xf9, 0x8d, 0xee,
+	0x43, 0xa2, 0xaf, 0x2a, 0x26, 0x31, 0xfb, 0x05, 0xbf, 0x3d, 0x93, 0xa9, 0x72, 0x47, 0xaa, 0x62,
+	0x8a, 0x04, 0x4b, 0xb8, 0x07, 0x09, 0xeb, 0xcb, 0x5b, 0x81, 0x98, 0x85, 0x64, 0xe1, 0x75, 0xa3,
+	0x54, 0xcf, 0x70, 0x08, 0x20, 0x55, 0xa6, 0xf9, 0x7a, 0x4c, 0xa8, 0xd8, 0x0f, 0x4e, 0x9d, 0x45,
+	0x58, 0x2e, 0x40, 0x3e, 0x56, 0x35, 0xbd, 0x2b, 0x77, 0x88, 0xcc, 0x33, 0xa2, 0xf3, 0x1d, 0xdd,
+	0x22, 0xa1, 0x05, 0xc5, 0x35, 0xe7, 0x44, 0xc2, 0xea, 0x45, 0x5f, 0x50, 0xdd, 0x8a, 0xaa, 0x14,
+	0xe5, 0x43, 0x2b, 0x45, 0xeb, 0x9e, 0x5b, 0x76, 0x44, 0x8d, 0xe8, 0x57, 0x31, 0x58, 0x0e, 0xc5,
+	0x43, 0x8f, 0xdd, 0xd5, 0xa1, 0x9b, 0x43, 0x79, 0xba, 0xeb, 0x42, 0xff, 0xcd, 0xd1, 0xba, 0xd0,
+	0x8e, 0xa7, 0x2e, 0x74, 0x67, 0x24, 0xbd, 0xbb, 0x22, 0xf4, 0x57, 0x5c, 0x44, 0x45, 0xa8, 0xde,
+	0xc8, 0xef, 0x95, 0xa4, 0xa3, 0x2a, 0xfd, 0xeb, 0x54, 0x84, 0x96, 0x20, 0x33, 0xa8, 0x93, 0x48,
+	0xf5, 0x46, 0xbe, 0x51, 0xcf, 0xc4, 0x82, 0xd5, 0x98, 0x78, 0x68, 0xad, 0x25, 0x31, 0xba, 0xac,
+	0x92, 0xa4, 0x28, 0xab, 0x80, 0x18, 0xf5, 0x41, 0xed, 0xa8, 0xda, 0x90, 0xf6, 0xc4, 0xda, 0xd1,
+	0x21, 0x7b, 0x72, 0xe5, 0xd4, 0x5c, 0x96, 0x00, 0xb1, 0x23, 0x73, 0x3f, 0xa2, 0xff, 0x73, 0x0e,
+	0x16, 0x3d, 0x60, 0x76, 0x82, 0xae, 0x6e, 0x37, 0xe7, 0xe9, 0x76, 0x3f, 0x84, 0x25, 0x2b, 0x6d,
+	0xa4, 0xe6, 0x62, 0x48, 0x3d, 0xac, 0x93, 0x2a, 0x37, 0x53, 0xfc, 0xab, 0x5d, 0xf9, 0x8c, 0x75,
+	0x02, 0x0e, 0xb1, 0x6e, 0x31, 0xbe, 0x84, 0x5a, 0xaf, 0xf0, 0x4d, 0x9c, 0x06, 0x27, 0x13, 0x27,
+	0x37, 0x23, 0x1d, 0x55, 0x30, 0xfb, 0x89, 0x4f, 0x90, 0xfd, 0x44, 0xb8, 0xb9, 0xc4, 0x44, 0x11,
+	0xf1, 0xe4, 0x17, 0x7b, 0x75, 0x70, 0x79, 0xd3, 0xf0, 0xf5, 0xbe, 0x5b, 0x89, 0x47, 0xa6, 0x5b,
+	0xa9, 0x6f, 0x0a, 0xdc, 0xcf, 0x2e, 0x2b, 0x59, 0xce, 0xd3, 0xa0, 0xec, 0x02, 0x49, 0xd2, 0xf6,
+	0xff, 0x70, 0x30, 0x53, 0x6e, 0x61, 0xd5, 0xa4, 0x6b, 0x9b, 0xf7, 0xfc, 0x9f, 0x05, 0x5a, 0x8b,
+	0xf8, 0xf7, 0x0b, 0xb2, 0x30, 0x7e, 0x7d, 0xe8, 0x3f, 0x67, 0x08, 0x53, 0xe8, 0xc4, 0xf5, 0x3f,
+	0x22, 0x9e, 0x76, 0xc6, 0x07, 0x01, 0xca, 0x10, 0x3f, 0xc7, 0xdf, 0x1e, 0x81, 0xe5, 0xcc, 0xf3,
+	0x04, 0x92, 0xe4, 0x45, 0x3d, 0x5a, 0x72, 0x5e, 0xf5, 0xbb, 0x1e, 0xdc, 0xf3, 0xcb, 0x3e, 0xa8,
+	0x4d, 0xb7, 0xfd, 0x8f, 0xb3, 0x00, 0x83, 0x5c, 0x13, 0xbd, 0x80, 0x39, 0xf7, 0xa3, 0x5e, 0xb4,
+	0x3a, 0xe4, 0x49, 0x39, 0xbf, 0x16, 0x3e, 0xe8, 0xc8, 0xf4, 0x02, 0xe6, 0xdc, 0x0f, 0xb9, 0x06,
+	0xcc, 0x42, 0x1e, 0x93, 0x0d, 0x98, 0x85, 0xbe, 0xfd, 0x9a, 0x42, 0x1d, 0xb8, 0x16, 0xf1, 0x94,
+	0x07, 0xdd, 0x19, 0xef, 0xc1, 0x13, 0xff, 0xbd, 0x31, 0xdf, 0x04, 0x09, 0x53, 0x48, 0x87, 0xeb,
+	0x91, 0x2f, 0x58, 0xd0, 0xd6, 0xb8, 0xef, 0x6b, 0xf8, 0xbb, 0x63, 0x60, 0x3a, 0x73, 0xf6, 0x81,
+	0x8f, 0x6e, 0x9b, 0xa3, 0xbb, 0x63, 0xbf, 0xe7, 0xe0, 0xef, 0x8d, 0xdf, 0x85, 0x17, 0xa6, 0xd0,
+	0x3e, 0xa4, 0x5d, 0xfd, 0x53, 0xc4, 0x87, 0x36, 0x55, 0x29, 0xe3, 0xd5, 0x21, 0x0d, 0x57, 0xca,
+	0xc9, 0xd5, 0xd2, 0x1a, 0x70, 0x0a, 0x36, 0xe7, 0x06, 0x9c, 0x42, 0x7a, 0x60, 0xfe, 0xed, 0xf7,
+	0x5d, 0xf2, 0x61, 0xdb, 0x1f, 0x1e, 0x25, 0x84, 0x6d, 0x7f, 0x44, 0xc4, 0x20, 0x4c, 0xa1, 0xcf,
+	0x60, 0xc1, 0x5b, 0xa6, 0x46, 0xeb, 0x43, 0xcb, 0xed, 0xfc, 0x8d, 0xa8, 0x61, 0x37, 0x4b, 0x6f,
+	0x55, 0x74, 0xc0, 0x32, 0xb4, 0x3a, 0x3b, 0x60, 0x19, 0x51, 0x4c, 0x9d, 0xb2, 0xfc, 0x93, 0xa7,
+	0xd6, 0x37, 0xf0, 0x4f, 0x61, 0x25, 0xca, 0x81, 0x7f, 0x0a, 0x2d, 0x10, 0x0a, 0x53, 0x48, 0x81,
+	0x95, 0xf0, 0x52, 0x13, 0xba, 0x3d, 0x56, 0x25, 0x8d, 0xbf, 0x33, 0x0a, 0xcd, 0x99, 0xaa, 0x09,
+	0x8b, 0x21, 0xed, 0x6d, 0x24, 0x0c, 0xed, 0x7d, 0xd3, 0x49, 0x6e, 0x8d, 0xd1, 0x1f, 0x17, 0xac,
+	0x68, 0x63, 0xfb, 0xdf, 0x92, 0x90, 0x20, 0xd7, 0x7e, 0x03, 0xae, 0xf8, 0xea, 0x09, 0xe8, 0xc6,
+	0xf0, 0x2a, 0x0b, 0xbf, 0x11, 0x39, 0xee, 0xac, 0xe1, 0x0d, 0x5c, 0x0d, 0x54, 0x08, 0xd0, 0xa6,
+	0x9b, 0x2e, 0xac, 0x4a, 0xc1, 0xdf, 0x1c, 0x82, 0xe1, 0xe7, 0xed, 0xf5, 0x6d, 0x9b, 0xa3, 0x52,
+	0x58, 0x2f, 0xef, 0x28, 0x7f, 0xf6, 0x05, 0x8d, 0xb2, 0xfc, 0x9e, 0x4c, 0xf0, 0xca, 0x15, 0xea,
+	0xc3, 0x6e, 0x0d, 0xc5, 0x71, 0x66, 0xf8, 0xdc, 0x09, 0xef, 0x5c, 0x19, 0x14, 0xf2, 0x08, 0x17,
+	0x9a, 0xe9, 0xf1, 0xc2, 0x30, 0x14, 0x87, 0xfd, 0x2b, 0xc8, 0xf8, 0xef, 0x79, 0xb4, 0x31, 0x22,
+	0xec, 0xe0, 0x37, 0xa3, 0x11, 0xfc, 0x3b, 0xe3, 0x77, 0x32, 0x7e, 0xa9, 0xc2, 0xdc, 0xcb, 0xad,
+	0xa1, 0x38, 0x6e, 0xb7, 0xe8, 0x8a, 0x70, 0x07, 0x6e, 0x31, 0x18, 0x0d, 0x0f, 0xdc, 0x62, 0x48,
+	0x48, 0x2c, 0x4c, 0xed, 0x3c, 0x03, 0x90, 0x3b, 0xbd, 0xb7, 0xb2, 0x84, 0xd5, 0x7e, 0x17, 0xad,
+	0x05, 0x3a, 0x50, 0x25, 0xb5, 0xdf, 0xad, 0xf5, 0xac, 0xcc, 0xcb, 0xc8, 0xfe, 0x7c, 0x86, 0xe4,
+	0x5b, 0xb3, 0x84, 0xc0, 0x1a, 0xd8, 0xa9, 0x40, 0x66, 0x40, 0x2d, 0x91, 0x10, 0x0a, 0xdd, 0x0c,
+	0xe5, 0x41, 0xfa, 0xf9, 0x3e, 0x46, 0x0b, 0x0e, 0x23, 0x32, 0xba, 0xf3, 0x09, 0x40, 0xd3, 0x50,
+	0x24, 0x1a, 0xc3, 0xa1, 0xf5, 0x00, 0x9f, 0xe7, 0x0a, 0xee, 0xb4, 0x6c, 0x1e, 0x7f, 0xc1, 0x84,
+	0x69, 0x1a, 0x0a, 0x8d, 0xf4, 0x76, 0x7e, 0x04, 0x69, 0x2a, 0xcc, 0x89, 0x85, 0x37, 0x8a, 0x9e,
+	0xc9, 0x40, 0x57, 0x4f, 0x46, 0x76, 0x4a, 0x30, 0x4f, 0x19, 0xb0, 0xac, 0x11, 0x6d, 0x04, 0x58,
+	0x1c, 0xd0, 0x11, 0x1f, 0x93, 0x39, 0x42, 0xc6, 0xc6, 0x76, 0x0a, 0x30, 0x67, 0xb3, 0x31, 0xdf,
+	0x6a, 0x2d, 0x74, 0x23, 0x84, 0x8b, 0x35, 0xe0, 0x63, 0x92, 0x66, 0x4c, 0xac, 0xa1, 0x81, 0x28,
+	0xf6, 0x3f, 0x9b, 0x06, 0x45, 0x61, 0x99, 0x5d, 0xa8, 0x28, 0x6c, 0xac, 0x90, 0x7c, 0x13, 0x6f,
+	0x1a, 0xca, 0x71, 0x8a, 0x10, 0xfd, 0xe0, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x5e, 0xa7, 0xda,
+	0x94, 0x19, 0x3d, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// IdentityClient is the client API for Identity service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type IdentityClient interface {
+	GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error)
+	GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error)
+	Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error)
+}
+
+type identityClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewIdentityClient(cc *grpc.ClientConn) IdentityClient {
+	return &identityClient{cc}
+}
+
+func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) {
+	out := new(GetPluginInfoResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Identity/GetPluginInfo", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *identityClient) GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) {
+	out := new(GetPluginCapabilitiesResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Identity/GetPluginCapabilities", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *identityClient) Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) {
+	out := new(ProbeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Identity/Probe", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// IdentityServer is the server API for Identity service.
+type IdentityServer interface {
+	GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error)
+	GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error)
+	Probe(context.Context, *ProbeRequest) (*ProbeResponse, error)
+}
+
+// UnimplementedIdentityServer can be embedded to have forward compatible implementations.
+type UnimplementedIdentityServer struct {
+}
+
+func (*UnimplementedIdentityServer) GetPluginInfo(ctx context.Context, req *GetPluginInfoRequest) (*GetPluginInfoResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetPluginInfo not implemented")
+}
+func (*UnimplementedIdentityServer) GetPluginCapabilities(ctx context.Context, req *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetPluginCapabilities not implemented")
+}
+func (*UnimplementedIdentityServer) Probe(ctx context.Context, req *ProbeRequest) (*ProbeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Probe not implemented")
+}
+
+func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) {
+	s.RegisterService(&_Identity_serviceDesc, srv)
+}
+
+func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetPluginInfoRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(IdentityServer).GetPluginInfo(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Identity/GetPluginInfo",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Identity_GetPluginCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetPluginCapabilitiesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(IdentityServer).GetPluginCapabilities(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Identity/GetPluginCapabilities",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(IdentityServer).GetPluginCapabilities(ctx, req.(*GetPluginCapabilitiesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Identity_Probe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ProbeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(IdentityServer).Probe(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Identity/Probe",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(IdentityServer).Probe(ctx, req.(*ProbeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Identity_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "csi.v1.Identity",
+	HandlerType: (*IdentityServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GetPluginInfo",
+			Handler:    _Identity_GetPluginInfo_Handler,
+		},
+		{
+			MethodName: "GetPluginCapabilities",
+			Handler:    _Identity_GetPluginCapabilities_Handler,
+		},
+		{
+			MethodName: "Probe",
+			Handler:    _Identity_Probe_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/container-storage-interface/spec/csi.proto",
+}
+
+// ControllerClient is the client API for Controller service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ControllerClient interface {
+	CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error)
+	DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error)
+	ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error)
+	ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error)
+	ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error)
+	ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error)
+	GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error)
+	ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error)
+	CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error)
+	DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error)
+	ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error)
+	ControllerExpandVolume(ctx context.Context, in *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error)
+	ControllerGetVolume(ctx context.Context, in *ControllerGetVolumeRequest, opts ...grpc.CallOption) (*ControllerGetVolumeResponse, error)
+}
+
+type controllerClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewControllerClient(cc *grpc.ClientConn) ControllerClient {
+	return &controllerClient{cc}
+}
+
+func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) {
+	out := new(CreateVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/CreateVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) {
+	out := new(DeleteVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/DeleteVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) {
+	out := new(ControllerPublishVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerPublishVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) {
+	out := new(ControllerUnpublishVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerUnpublishVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) {
+	out := new(ValidateVolumeCapabilitiesResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ValidateVolumeCapabilities", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) {
+	out := new(ListVolumesResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ListVolumes", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) {
+	out := new(GetCapacityResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/GetCapacity", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) {
+	out := new(ControllerGetCapabilitiesResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerGetCapabilities", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) {
+	out := new(CreateSnapshotResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/CreateSnapshot", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) {
+	out := new(DeleteSnapshotResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/DeleteSnapshot", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) {
+	out := new(ListSnapshotsResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ListSnapshots", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ControllerExpandVolume(ctx context.Context, in *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error) {
+	out := new(ControllerExpandVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerExpandVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ControllerGetVolume(ctx context.Context, in *ControllerGetVolumeRequest, opts ...grpc.CallOption) (*ControllerGetVolumeResponse, error) {
+	out := new(ControllerGetVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerGetVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// ControllerServer is the server API for Controller service.
+type ControllerServer interface {
+	CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error)
+	DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error)
+	ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error)
+	ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error)
+	ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error)
+	ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error)
+	GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error)
+	ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error)
+	CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error)
+	DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error)
+	ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error)
+	ControllerExpandVolume(context.Context, *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error)
+	ControllerGetVolume(context.Context, *ControllerGetVolumeRequest) (*ControllerGetVolumeResponse, error)
+}
+
+// UnimplementedControllerServer can be embedded to have forward compatible implementations.
+type UnimplementedControllerServer struct {
+}
+
+func (*UnimplementedControllerServer) CreateVolume(ctx context.Context, req *CreateVolumeRequest) (*CreateVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method CreateVolume not implemented")
+}
+func (*UnimplementedControllerServer) DeleteVolume(ctx context.Context, req *DeleteVolumeRequest) (*DeleteVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteVolume not implemented")
+}
+func (*UnimplementedControllerServer) ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ControllerPublishVolume not implemented")
+}
+func (*UnimplementedControllerServer) ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ControllerUnpublishVolume not implemented")
+}
+func (*UnimplementedControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ValidateVolumeCapabilities not implemented")
+}
+func (*UnimplementedControllerServer) ListVolumes(ctx context.Context, req *ListVolumesRequest) (*ListVolumesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListVolumes not implemented")
+}
+func (*UnimplementedControllerServer) GetCapacity(ctx context.Context, req *GetCapacityRequest) (*GetCapacityResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetCapacity not implemented")
+}
+func (*UnimplementedControllerServer) ControllerGetCapabilities(ctx context.Context, req *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ControllerGetCapabilities not implemented")
+}
+func (*UnimplementedControllerServer) CreateSnapshot(ctx context.Context, req *CreateSnapshotRequest) (*CreateSnapshotResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method CreateSnapshot not implemented")
+}
+func (*UnimplementedControllerServer) DeleteSnapshot(ctx context.Context, req *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented")
+}
+func (*UnimplementedControllerServer) ListSnapshots(ctx context.Context, req *ListSnapshotsRequest) (*ListSnapshotsResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented")
+}
+func (*UnimplementedControllerServer) ControllerExpandVolume(ctx context.Context, req *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ControllerExpandVolume not implemented")
+}
+func (*UnimplementedControllerServer) ControllerGetVolume(ctx context.Context, req *ControllerGetVolumeRequest) (*ControllerGetVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ControllerGetVolume not implemented")
+}
+
+func RegisterControllerServer(s *grpc.Server, srv ControllerServer) {
+	s.RegisterService(&_Controller_serviceDesc, srv)
+}
+
+func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CreateVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).CreateVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/CreateVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).DeleteVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/DeleteVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ControllerPublishVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ControllerPublishVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ControllerPublishVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ControllerUnpublishVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ControllerUnpublishVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ValidateVolumeCapabilitiesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ValidateVolumeCapabilities",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListVolumesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ListVolumes(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ListVolumes",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetCapacityRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).GetCapacity(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/GetCapacity",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ControllerGetCapabilitiesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ControllerGetCapabilities(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ControllerGetCapabilities",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CreateSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).CreateSnapshot(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/CreateSnapshot",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).DeleteSnapshot(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/DeleteSnapshot",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListSnapshotsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ListSnapshots(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ListSnapshots",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ControllerExpandVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ControllerExpandVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ControllerExpandVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ControllerExpandVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ControllerExpandVolume(ctx, req.(*ControllerExpandVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ControllerGetVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ControllerGetVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ControllerGetVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ControllerGetVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ControllerGetVolume(ctx, req.(*ControllerGetVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Controller_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "csi.v1.Controller",
+	HandlerType: (*ControllerServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "CreateVolume",
+			Handler:    _Controller_CreateVolume_Handler,
+		},
+		{
+			MethodName: "DeleteVolume",
+			Handler:    _Controller_DeleteVolume_Handler,
+		},
+		{
+			MethodName: "ControllerPublishVolume",
+			Handler:    _Controller_ControllerPublishVolume_Handler,
+		},
+		{
+			MethodName: "ControllerUnpublishVolume",
+			Handler:    _Controller_ControllerUnpublishVolume_Handler,
+		},
+		{
+			MethodName: "ValidateVolumeCapabilities",
+			Handler:    _Controller_ValidateVolumeCapabilities_Handler,
+		},
+		{
+			MethodName: "ListVolumes",
+			Handler:    _Controller_ListVolumes_Handler,
+		},
+		{
+			MethodName: "GetCapacity",
+			Handler:    _Controller_GetCapacity_Handler,
+		},
+		{
+			MethodName: "ControllerGetCapabilities",
+			Handler:    _Controller_ControllerGetCapabilities_Handler,
+		},
+		{
+			MethodName: "CreateSnapshot",
+			Handler:    _Controller_CreateSnapshot_Handler,
+		},
+		{
+			MethodName: "DeleteSnapshot",
+			Handler:    _Controller_DeleteSnapshot_Handler,
+		},
+		{
+			MethodName: "ListSnapshots",
+			Handler:    _Controller_ListSnapshots_Handler,
+		},
+		{
+			MethodName: "ControllerExpandVolume",
+			Handler:    _Controller_ControllerExpandVolume_Handler,
+		},
+		{
+			MethodName: "ControllerGetVolume",
+			Handler:    _Controller_ControllerGetVolume_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/container-storage-interface/spec/csi.proto",
+}
+
+// NodeClient is the client API for Node service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type NodeClient interface {
+	NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error)
+	NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error)
+	NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error)
+	NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error)
+	NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error)
+	NodeExpandVolume(ctx context.Context, in *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error)
+	NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error)
+	NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error)
+}
+
+type nodeClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewNodeClient(cc *grpc.ClientConn) NodeClient {
+	return &nodeClient{cc}
+}
+
+func (c *nodeClient) NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) {
+	out := new(NodeStageVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeStageVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) {
+	out := new(NodeUnstageVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeUnstageVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) {
+	out := new(NodePublishVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodePublishVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) {
+	out := new(NodeUnpublishVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeUnpublishVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error) {
+	out := new(NodeGetVolumeStatsResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetVolumeStats", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodeExpandVolume(ctx context.Context, in *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error) {
+	out := new(NodeExpandVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeExpandVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) {
+	out := new(NodeGetCapabilitiesResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetCapabilities", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) {
+	out := new(NodeGetInfoResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetInfo", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// NodeServer is the server API for Node service.
+type NodeServer interface {
+	NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error)
+	NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error)
+	NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error)
+	NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error)
+	NodeGetVolumeStats(context.Context, *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error)
+	NodeExpandVolume(context.Context, *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error)
+	NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error)
+	NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error)
+}
+
+// UnimplementedNodeServer can be embedded to have forward compatible implementations.
+type UnimplementedNodeServer struct {
+}
+
+func (*UnimplementedNodeServer) NodeStageVolume(ctx context.Context, req *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeStageVolume not implemented")
+}
+func (*UnimplementedNodeServer) NodeUnstageVolume(ctx context.Context, req *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeUnstageVolume not implemented")
+}
+func (*UnimplementedNodeServer) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodePublishVolume not implemented")
+}
+func (*UnimplementedNodeServer) NodeUnpublishVolume(ctx context.Context, req *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeUnpublishVolume not implemented")
+}
+func (*UnimplementedNodeServer) NodeGetVolumeStats(ctx context.Context, req *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeGetVolumeStats not implemented")
+}
+func (*UnimplementedNodeServer) NodeExpandVolume(ctx context.Context, req *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeExpandVolume not implemented")
+}
+func (*UnimplementedNodeServer) NodeGetCapabilities(ctx context.Context, req *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeGetCapabilities not implemented")
+}
+func (*UnimplementedNodeServer) NodeGetInfo(ctx context.Context, req *NodeGetInfoRequest) (*NodeGetInfoResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeGetInfo not implemented")
+}
+
+func RegisterNodeServer(s *grpc.Server, srv NodeServer) {
+	s.RegisterService(&_Node_serviceDesc, srv)
+}
+
+func _Node_NodeStageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeStageVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeStageVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeStageVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeStageVolume(ctx, req.(*NodeStageVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodeUnstageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeUnstageVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeUnstageVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeUnstageVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeUnstageVolume(ctx, req.(*NodeUnstageVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodePublishVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodePublishVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodePublishVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeUnpublishVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeUnpublishVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeUnpublishVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodeGetVolumeStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeGetVolumeStatsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeGetVolumeStats(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeGetVolumeStats",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeGetVolumeStats(ctx, req.(*NodeGetVolumeStatsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodeExpandVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeExpandVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeExpandVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeExpandVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeExpandVolume(ctx, req.(*NodeExpandVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeGetCapabilitiesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeGetCapabilities(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeGetCapabilities",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodeGetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeGetInfoRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeGetInfo(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeGetInfo",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeGetInfo(ctx, req.(*NodeGetInfoRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Node_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "csi.v1.Node",
+	HandlerType: (*NodeServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "NodeStageVolume",
+			Handler:    _Node_NodeStageVolume_Handler,
+		},
+		{
+			MethodName: "NodeUnstageVolume",
+			Handler:    _Node_NodeUnstageVolume_Handler,
+		},
+		{
+			MethodName: "NodePublishVolume",
+			Handler:    _Node_NodePublishVolume_Handler,
+		},
+		{
+			MethodName: "NodeUnpublishVolume",
+			Handler:    _Node_NodeUnpublishVolume_Handler,
+		},
+		{
+			MethodName: "NodeGetVolumeStats",
+			Handler:    _Node_NodeGetVolumeStats_Handler,
+		},
+		{
+			MethodName: "NodeExpandVolume",
+			Handler:    _Node_NodeExpandVolume_Handler,
+		},
+		{
+			MethodName: "NodeGetCapabilities",
+			Handler:    _Node_NodeGetCapabilities_Handler,
+		},
+		{
+			MethodName: "NodeGetInfo",
+			Handler:    _Node_NodeGetInfo_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/container-storage-interface/spec/csi.proto",
+}

+ 1 - 1
vendor/github.com/containerd/containerd/.golangci.yml

@@ -19,7 +19,7 @@ issues:
     - EXC0002
 
 run:
-  timeout: 3m
+  timeout: 8m
   skip-dirs:
     - api
     - design

+ 16 - 0
vendor/github.com/containerd/containerd/.mailmap

@@ -29,13 +29,17 @@ Eric Ernst <eric@amperecomputing.com> <eric.ernst@intel.com>
 Eric Ren <renzhen.rz@linux.alibaba.com> <renzhen@linux.alibaba.com>
 Eric Ren <renzhen.rz@linux.alibaba.com> <renzhen.rz@alibaba-linux.com>
 Eric Ren <renzhen.rz@linux.alibaba.com> <renzhen.rz@alibaba-inc.com>
+Fabiano Fidêncio <fidencio@redhat.com> <fabiano.fidencio@intel.com>
 Fahed Dorgaa <fahed.dorgaa@gmail.com>
 Frank Yang <yyb196@gmail.com>
 Fupan Li <lifupan@gmail.com>
 Fupan Li <lifupan@gmail.com> <fupan.lfp@antfin.com>
+Fupan Li <lifupan@gmail.com> <fupan.lfp@antgroup.com>
+Furkan Türkal <furkan.turkal@trendyol.com>
 Georgia Panoutsakopoulou <gpanoutsak@gmail.com>
 Guangming Wang <guangming.wang@daocloud.io>
 Haiyan Meng <haiyanmeng@google.com>
+haoyun <yun.hao@daocloud.io>
 Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
 Hu Shuai <hus.fnst@cn.fujitsu.com>
 Hu Shuai <hus.fnst@cn.fujitsu.com> <hushuaiia@qq.com>
@@ -53,15 +57,18 @@ John Howard <github@lowenna.com> <jhoward@microsoft.com>
 John Howard <github@lowenna.com> <jhowardmsft@users.noreply.github.com>
 Lorenz Brun <lorenz@brun.one> <lorenz@nexantic.com>
 Luc Perkins <lucperkins@gmail.com>
+Jiajun Jiang <levinxo@gmail.com>
 Julien Balestra <julien.balestra@datadoghq.com>
 Jun Lin Chen <webmaster@mc256.com> <1913688+mc256@users.noreply.github.com>
 Justin Cormack <justin.cormack@docker.com> <justin@specialbusservice.com>
 Justin Terry <juterry@microsoft.com>
 Justin Terry <juterry@microsoft.com> <jterry75@users.noreply.github.com>
+Kante <kerthcet@gmail.com>
 Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
 Kevin Kern <kaiwentan@harmonycloud.cn>
 Kevin Parsons <kevpar@microsoft.com> <kevpar@users.noreply.github.com>
 Kevin Xu <cming.xu@gmail.com>
+Kitt Hsu <kitt.hsu@gmail.com>
 Kohei Tokunaga <ktokunaga.mail@gmail.com>
 Krasi Georgiev <krasi.root@gmail.com> <krasi@vip-consult.solutions>
 Lantao Liu <lantaol@google.com>
@@ -69,6 +76,7 @@ Lantao Liu <lantaol@google.com> <taotaotheripper@gmail.com>
 Li Yuxuan <liyuxuan04@baidu.com> <darfux@163.com>
 Lifubang <lifubang@aliyun.com> <lifubang@acmcoder.com>
 Lu Jingxiao <lujingxiao@huawei.com>
+Maksym Pavlenko <pavlenko.maksym@gmail.com> <865334+mxpv@users.noreply.github.com>
 Maksym Pavlenko <pavlenko.maksym@gmail.com> <makpav@amazon.com>
 Maksym Pavlenko <pavlenko.maksym@gmail.com> <mxpv@apple.com>
 Mario Hros <spam@k3a.me>
@@ -79,6 +87,9 @@ Michael Crosby <crosbymichael@gmail.com> <michael@thepasture.io>
 Michael Katsoulis <michaelkatsoulis88@gmail.com>
 Mike Brown <brownwm@us.ibm.com> <mikebrow@users.noreply.github.com>
 Mohammad Asif Siddiqui <mohammad.asif.siddiqui1@huawei.com>
+Ng Yang <wssccc@qq.com>
+Ning Li <ning.a.li@transwarp.io>
+ningmingxiao <ning.mingxiao@zte.com.cn>
 Nishchay Kumar <mrawesomenix@gmail.com>
 Oliver Stenbom <oliver@stenbom.eu> <ostenbom@pivotal.io>
 Phil Estes <estesp@gmail.com> <estesp@linux.vnet.ibm.com>
@@ -104,16 +115,20 @@ Stephen J Day <stevvooe@gmail.com> <stephen.day@docker.com>
 Sudeesh John <sudeesh@linux.vnet.ibm.com>
 Su Fei  <fesu@ebay.com> <fesu@ebay.com>
 Su Xiaolin <linxxnil@126.com>
+Takumasa Sakao <sakataku7@gmail.com> <tsakao@zlab.co.jp>
 Ted Yu <yuzhihong@gmail.com>
 Tõnis Tiigi <tonistiigi@gmail.com>
 Wade Lee <weidonglee27@gmail.com>
 Wade Lee <weidonglee27@gmail.com> <weidonglee29@gmail.com>
 Wade Lee <weidonglee27@gmail.com> <21621232@zju.edu.cn>
 wanglei <wllenyj@linux.alibaba.com>
+wanglei <wllenyj@linux.alibaba.com> <wanglei01@alibaba-inc.com>
+wangzhan <wang.zhan@smartx.com>
 Wei Fu <fuweid89@gmail.com>
 Wei Fu <fuweid89@gmail.com> <fhfuwei@163.com>
 Xiaodong Zhang <a4012017@sina.com>
 Xuean Yan <yan.xuean@zte.com.cn>
+Yang Yang <yang8518296@163.com>
 Yue Zhang <zy675793960@yeah.net>
 Yuxing Liu <starnop@163.com>
 Zhang Wei <zhangwei555@huawei.com>
@@ -124,4 +139,5 @@ Zhiyu Li <payall4u@qq.com> <404977848@qq.com>
 Zhongming Chang<zhongming.chang@daocloud.io>
 Zhoulin Xie <zhoulin.xie@daocloud.io>
 Zhoulin Xie <zhoulin.xie@daocloud.io> <42261994+JoeWrightss@users.noreply.github.com>
+zounengren <zouyee1989@gmail.com> <zounengren@cmss.chinamobile.com>
 张潇 <xiaozhang0210@hotmail.com>

+ 12 - 2
vendor/github.com/containerd/containerd/ADOPTERS.md

@@ -12,10 +12,14 @@ including the Balena project listed below.
 
 **_[IBM Cloud Private (ICP)](https://www.ibm.com/cloud/private)_** - IBM's on-premises cloud offering has containerd as a "tech preview" CRI runtime for the Kubernetes offered within this product for the past two releases, and plans to fully migrate to containerd in a future release.
 
-**_[Google Cloud Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/)_** - offers containerd as the CRI runtime in **beta** for recent versions of Kubernetes.
+**_[Google Cloud Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/)_** - containerd has been offered in GKE since version 1.14 and has been the default runtime since version 1.19. It is also the only supported runtime for GKE Autopilot from the launch. [More details](https://cloud.google.com/kubernetes-engine/docs/concepts/using-containerd)
 
 **_[AWS Fargate](https://aws.amazon.com/fargate)_** - uses containerd + Firecracker (noted below) as the runtime and isolation technology for containers run in the Fargate platform. Fargate is a serverless, container-native compute offering from Amazon Web Services.
 
+**_[Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/)_** - EKS optionally offers containerd as a CRI runtime starting with Kubernetes version 1.21. In Kubernetes 1.22 the default CRI runtime will be containerd.
+
+**_[Bottlerocket](https://aws.amazon.com/bottlerocket/)_** - Bottlerocket is a Linux distribution from Amazon Web Services purpose-built for containers using containerd as the core system runtime.
+
 **_Cloud Foundry_** - The [Guardian container manager](https://github.com/cloudfoundry/guardian) for CF has been using OCI runC directly with additional code from CF managing the container image and filesystem interactions, but have recently migrated to use containerd as a replacement for the extra code they had written around runC.
 
 **_Alibaba's PouchContainer_** - The Alibaba [PouchContainer](https://github.com/alibaba/pouch) project uses containerd as its runtime for a cloud native offering that has unique isolation and image distribution capabilities.
@@ -32,7 +36,7 @@ including the Balena project listed below.
 
 **_BuildKit_** - The Moby project's [BuildKit](https://github.com/moby/buildkit) can use either runC or containerd as build execution backends for building container images. BuildKit support has also been built into the Docker engine in recent releases, making BuildKit provide the backend to the `docker build` command.
 
-**_Azure acs-engine_** - Microsoft Azure's [acs-engine](https://github.com/Azure/acs-engine) open source project has customizable deployment of Kubernetes clusters, where containerd is a selectable container runtime. At some point in the future Azure's AKS service will default to use containerd as the CRI runtime for deployed Kubernetes clusters.
+**_[Azure Kubernetes Service (AKS)](https://azure.microsoft.com/services/kubernetes-service)_** - Microsoft's managed Kubernetes offering uses containerd for Linux nodes running v1.19 or greater. Containerd for Windows nodes is currently in public preview. [More Details](https://docs.microsoft.com/azure/aks/cluster-configuration#container-runtime-configuration)
 
 **_Amazon Firecracker_** - The AWS [Firecracker VMM project](http://firecracker-microvm.io/) has extended containerd with a new snapshotter and v2 shim to allow containerd to drive virtualized container processes via their VMM implementation. More details on their containerd integration are available in [their GitHub project](https://github.com/firecracker-microvm/firecracker-containerd).
 
@@ -42,6 +46,12 @@ including the Balena project listed below.
 
 **_Inclavare Containers_** - [Inclavare Containers](https://github.com/alibaba/inclavare-containers) is an innovation of container runtime with the novel approach for launching protected containers in hardware-assisted Trusted Execution Environment (TEE) technology, aka Enclave, which can prevent the untrusted entity, such as Cloud Service Provider (CSP), from accessing the sensitive and confidential assets in use.
 
+**_VMware TKG_** - [Tanzu Kubernetes Grid](https://tanzu.vmware.com/kubernetes-grid) VMware's Multicloud Kubernetes offering uses containerd as the default CRI runtime.
+
+**_VMware TCE_** - [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) VMware's fully-featured, easy to manage, Kubernetes platform for learners and users. It is a freely available, community supported, and open source distribution of VMware Tanzu. It uses containerd as the default CRI runtime.
+
+**_[Talos Linux](https://www.talos.dev/)_** - Talos Linux is Linux designed for Kubernetes – secure, immutable, and minimal. Talos Linux is using containerd as the core system runtime and CRI implementation.
+
 **_Other Projects_** - While the above list provides a cross-section of well known uses of containerd, the simplicity and clear API layer for containerd has inspired many smaller projects around providing simple container management platforms. Several examples of building higher layer functionality on top of the containerd base have come from various containerd community participants:
  - Michael Crosby's [boss](https://github.com/crosbymichael/boss) project,
  - Evan Hazlett's [stellar](https://github.com/ehazlett/stellar) project,

+ 47 - 38
vendor/github.com/containerd/containerd/BUILDING.md

@@ -15,7 +15,7 @@ This doc includes:
 To build the `containerd` daemon, and the `ctr` simple test client, the following build system dependencies are required:
 
 * Go 1.13.x or above except 1.14.x
-* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
+* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/protocolbuffers/protobuf/releases))
 * Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via the build tag `no_btrfs`, removing this dependency.
 
 ## Build the development environment
@@ -32,9 +32,9 @@ git clone https://github.com/containerd/containerd
 
 For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.11.4 release for a 64-bit Linux host:
 
-```
-$ wget -c https://github.com/google/protobuf/releases/download/v3.11.4/protoc-3.11.4-linux-x86_64.zip
-$ sudo unzip protoc-3.11.4-linux-x86_64.zip -d /usr/local
+```sh
+wget -c https://github.com/protocolbuffers/protobuf/releases/download/v3.11.4/protoc-3.11.4-linux-x86_64.zip
+sudo unzip protoc-3.11.4-linux-x86_64.zip -d /usr/local
 ```
 
 `containerd` uses [Btrfs](https://en.wikipedia.org/wiki/Btrfs) it means that you
@@ -46,38 +46,20 @@ need to satisfy these dependencies in your system:
 
 At this point you are ready to build `containerd` yourself!
 
-## Build runc
+## Runc
 
-`runc` is the default container runtime used by `containerd` and is required to
-run containerd. While it is okay to download a runc binary and install that on
+Runc is the default container runtime used by `containerd` and is required to
+run containerd. While it is okay to download a `runc` binary and install that on
 the system, sometimes it is necessary to build runc directly when working with
-container runtime development. You can skip this step if you already have the
-correct version of `runc` installed.
-
-`runc` requires `libseccomp`. You may need to install the missing dependencies:
-
-* CentOS/Fedora: `yum install libseccomp libseccomp-devel`
-* Debian/Ubuntu: `apt-get install libseccomp libseccomp-dev`
-
-
-For the quick and dirty installation, you can use the following:
-
-```
-git clone https://github.com/opencontainers/runc
-cd runc
-make
-sudo make install
-```
-
-Make sure to follow the guidelines for versioning in [RUNC.md](/docs/RUNC.md) for the
-best results.
+container runtime development. Make sure to follow the guidelines for versioning
+in [RUNC.md](/docs/RUNC.md) for the best results.
 
 ## Build containerd
 
 `containerd` uses `make` to create a repeatable build flow. It means that you
 can run:
 
-```
+```sh
 cd containerd
 make
 ```
@@ -86,22 +68,44 @@ This is going to build all the project binaries in the `./bin/` directory.
 
 You can move them in your global path, `/usr/local/bin` with:
 
-```sudo
+```sh
 sudo make install
 ```
 
+The install prefix can be changed by passing the `PREFIX` variable (defaults
+to `/usr/local`).
+
+Note: if you set one of these vars, set them to the same values on all make stages
+(build as well as install).
+
+If you want to prepend an additional prefix on actual installation (eg. packaging or chroot install),
+you can pass it via `DESTDIR` variable:
+
+```sh
+sudo make install DESTDIR=/tmp/install-x973234/
+```
+
+The above command installs the `containerd` binary to `/tmp/install-x973234/usr/local/bin/containerd`
+
+The current `DESTDIR` convention is supported since containerd v1.6.
+Older releases was using `DESTDIR` for a different purpose that is similar to `PREFIX`.
+
+
 When making any changes to the gRPC API, you can use the installed `protoc`
 compiler to regenerate the API generated code packages with:
 
-```sudo
+```sh
 make generate
 ```
 
 > *Note*: Several build tags are currently available:
-> * `no_btrfs`: A build tag disables building the btrfs snapshot driver.
 > * `no_cri`: A build tag disables building Kubernetes [CRI](http://blog.kubernetes.io/2016/12/container-runtime-interface-cri-in-kubernetes.html) support into containerd.
 > See [here](https://github.com/containerd/cri-containerd#build-tags) for build tags of CRI plugin.
-> * `no_devmapper`: A build tag disables building the device mapper snapshot driver.
+> * snapshotters (alphabetical order)
+>   * `no_aufs`: A build tag disables building the aufs snapshot driver.
+>   * `no_btrfs`: A build tag disables building the Btrfs snapshot driver.
+>   * `no_devmapper`: A build tag disables building the device mapper snapshot driver.
+>   * `no_zfs`: A build tag disables building the ZFS snapshot driver.
 >
 > For example, adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
 > Makefile target will disable the btrfs driver within the containerd Go build.
@@ -117,7 +121,7 @@ Please refer to [RUNC.md](/docs/RUNC.md) for the currently supported version of
 
 You can build static binaries by providing a few variables to `make`:
 
-```sudo
+```sh
 make EXTRA_FLAGS="-buildmode pie" \
 	EXTRA_LDFLAGS='-linkmode external -extldflags "-fno-PIC -static"' \
 	BUILDTAGS="netgo osusergo static_build"
@@ -131,12 +135,12 @@ make EXTRA_FLAGS="-buildmode pie" \
 
 The following instructions assume you are at the parent directory of containerd source directory.
 
-## Build containerd
+## Build containerd in a container
 
 You can build `containerd` via a Linux-based Docker container.
 You can build an image from this `Dockerfile`:
 
-```
+```dockerfile
 FROM golang
 
 RUN apt-get update && \
@@ -158,10 +162,11 @@ This mounts `containerd` repository
 You are now ready to [build](#build-containerd):
 
 ```sh
- make && make install
+make && make install
 ```
 
-## Build containerd and runc
+## Build containerd and runc in a container
+
 To have complete core container runtime, you will need both `containerd` and `runc`. It is possible to build both of these via Docker container.
 
 You can use `git` to checkout `runc`:
@@ -177,7 +182,6 @@ FROM golang
 
 RUN apt-get update && \
     apt-get install -y libbtrfs-dev libseccomp-dev
-
 ```
 
 In our Docker container we will build `runc` build, which includes
@@ -246,6 +250,7 @@ go test -v -run . -test.root
 ```
 
 Example output from directly running `go test` to execute the `TestContainerList` test:
+
 ```sh
 sudo go test -v -run "TestContainerList" . -test.root
 INFO[0000] running tests against containerd revision=f2ae8a020a985a8d9862c9eb5ab66902c2888361 version=v1.0.0-beta.2-49-gf2ae8a0
@@ -255,6 +260,10 @@ PASS
 ok  	github.com/containerd/containerd	4.778s
 ```
 
+> *Note*: in order to run `sudo go` you need to
+> - either keep user PATH environment variable. ex: `sudo "PATH=$PATH" env go test <args>`
+> - or use `go test -exec` ex: `go test -exec sudo -v -run "TestTarWithXattr" ./archive/ -test.root`
+
 ## Additional tools
 
 ### containerd-stress

+ 62 - 31
vendor/github.com/containerd/containerd/Makefile

@@ -15,16 +15,22 @@
 
 # Go command to use for build
 GO ?= go
+INSTALL ?= install
 
 # Root directory of the project (absolute path).
 ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
 
 # Base path used to install.
-DESTDIR ?= /usr/local
+# The files will be installed under `$(DESTDIR)/$(PREFIX)`.
+# The convention of `DESTDIR` was changed in containerd v1.6.
+PREFIX        ?= /usr/local
+DATADIR       ?= $(PREFIX)/share
+MANDIR        ?= $(DATADIR)/man
+
 TEST_IMAGE_LIST ?=
 
 # Used to populate variables in version package.
-VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
+VERSION ?= $(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
 REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
 PACKAGE=github.com/containerd/containerd
 SHIM_CGO_ENABLED ?= 0
@@ -68,7 +74,7 @@ endif
 WHALE = "🇩"
 ONI = "👹"
 
-RELEASE=containerd-$(VERSION:v%=%).${GOOS}-${GOARCH}
+RELEASE=containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH}
 CRIRELEASE=cri-containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH}
 CRICNIRELEASE=cri-containerd-cni-$(VERSION:v%=%)-${GOOS}-${GOARCH}
 
@@ -83,12 +89,14 @@ ifdef BUILDTAGS
 endif
 GO_BUILDTAGS ?=
 GO_BUILDTAGS += ${DEBUG_TAGS}
-GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(GO_BUILDTAGS)",)
+GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(strip $(GO_BUILDTAGS))",)
 GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) $(EXTRA_LDFLAGS)'
 SHIM_GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) -extldflags "-static" $(EXTRA_LDFLAGS)'
 
 # Project packages.
 PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration)
+API_PACKAGES=$(shell (cd api && $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration))
+NON_API_PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration | grep -v "containerd/api")
 TEST_REQUIRES_ROOT_PACKAGES=$(filter \
     ${PACKAGES}, \
     $(shell \
@@ -133,6 +141,9 @@ CRIDIR=$(OUTPUTDIR)/cri
 .PHONY: clean all AUTHORS build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release mandir install-man genman install-cri-deps cri-release cri-cni-release cri-integration install-deps bin/cri-integration.test
 .DEFAULT: default
 
+# Forcibly set the default goal to all, in case an include above brought in a rule definition.
+.DEFAULT_GOAL := all
+
 all: binaries
 
 check: proto-fmt ## run all linters
@@ -150,7 +161,13 @@ generate: protos
 
 protos: bin/protoc-gen-gogoctrd ## generate protobuf
 	@echo "$(WHALE) $@"
-	@PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${PACKAGES}
+	@find . -path ./vendor -prune -false -o -name '*.pb.go' | xargs rm
+	$(eval TMPDIR := $(shell mktemp -d))
+	@mv ${ROOTDIR}/vendor ${TMPDIR}
+	@(cd ${ROOTDIR}/api && PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${API_PACKAGES})
+	@(PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${NON_API_PACKAGES})
+	@mv ${TMPDIR}/vendor ${ROOTDIR}
+	@rm -rf ${TMPDIR}
 
 check-protos: protos ## check if protobufs needs to be generated again
 	@echo "$(WHALE) $@"
@@ -194,7 +211,7 @@ bin/cri-integration.test:
 
 cri-integration: binaries bin/cri-integration.test ## run cri integration tests
 	@echo "$(WHALE) $@"
-	@./script/test/cri-integration.sh
+	@bash -x ./script/test/cri-integration.sh
 	@rm -rf bin/cri-integration.test
 
 benchmark: ## run benchmarks tests
@@ -213,16 +230,16 @@ bin/%: cmd/% FORCE
 	$(call BUILD_BINARY)
 
 bin/containerd-shim: cmd/containerd-shim FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
-	@echo "$(WHALE) bin/containerd-shim"
-	@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim
+	@echo "$(WHALE) $@"
+	@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim
 
 bin/containerd-shim-runc-v1: cmd/containerd-shim-runc-v1 FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
-	@echo "$(WHALE) bin/containerd-shim-runc-v1"
-	@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim-runc-v1 ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v1
+	@echo "$(WHALE) $@"
+	@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v1
 
 bin/containerd-shim-runc-v2: cmd/containerd-shim-runc-v2 FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
-	@echo "$(WHALE) bin/containerd-shim-runc-v2"
-	@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim-runc-v2 ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v2
+	@echo "$(WHALE) $@"
+	@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v2
 
 binaries: $(BINARIES) ## build binaries
 	@echo "$(WHALE) $@"
@@ -238,30 +255,31 @@ genman: man/containerd.8 man/ctr.8
 
 man/containerd.8: FORCE
 	@echo "$(WHALE) $@"
-	$(GO) run cmd/gen-manpages/main.go $(@F) $(@D)
+	$(GO) run -mod=readonly ${GO_TAGS} cmd/gen-manpages/main.go $(@F) $(@D)
 
 man/ctr.8: FORCE
 	@echo "$(WHALE) $@"
-	$(GO) run cmd/gen-manpages/main.go $(@F) $(@D)
+	$(GO) run -mod=readonly ${GO_TAGS} cmd/gen-manpages/main.go $(@F) $(@D)
 
 man/%: docs/man/%.md FORCE
 	@echo "$(WHALE) $@"
 	go-md2man -in "$<" -out "$@"
 
 define installmanpage
-mkdir -p $(DESTDIR)/man/man$(2);
-gzip -c $(1) >$(DESTDIR)/man/man$(2)/$(3).gz;
+$(INSTALL) -d $(DESTDIR)$(MANDIR)/man$(2);
+gzip -c $(1) >$(DESTDIR)$(MANDIR)/man$(2)/$(3).gz;
 endef
 
-install-man:
+install-man: man
 	@echo "$(WHALE) $@"
 	$(foreach manpage,$(addprefix man/,$(MANPAGES)), $(call installmanpage,$(manpage),$(subst .,,$(suffix $(manpage))),$(notdir $(manpage))))
 
+
 releases/$(RELEASE).tar.gz: $(BINARIES)
 	@echo "$(WHALE) $@"
 	@rm -rf releases/$(RELEASE) releases/$(RELEASE).tar.gz
-	@install -d releases/$(RELEASE)/bin
-	@install $(BINARIES) releases/$(RELEASE)/bin
+	@$(INSTALL) -d releases/$(RELEASE)/bin
+	@$(INSTALL) $(BINARIES) releases/$(RELEASE)/bin
 	@tar -czf releases/$(RELEASE).tar.gz -C releases/$(RELEASE) bin
 	@rm -rf releases/$(RELEASE)
 
@@ -272,18 +290,18 @@ release: releases/$(RELEASE).tar.gz
 # install of cri deps into release output directory
 ifeq ($(GOOS),windows)
 install-cri-deps: $(BINARIES)
-	mkdir -p $(CRIDIR)
+	$(INSTALL) -d $(CRIDIR)
 	DESTDIR=$(CRIDIR) script/setup/install-cni-windows
 	cp bin/* $(CRIDIR)
 else
 install-cri-deps: $(BINARIES)
 	@rm -rf ${CRIDIR}
-	@install -d ${CRIDIR}/usr/local/bin
-	@install -D -m 755 bin/* ${CRIDIR}/usr/local/bin
-	@install -d ${CRIDIR}/opt/containerd/cluster
+	@$(INSTALL) -d ${CRIDIR}/usr/local/bin
+	@$(INSTALL) -D -m 755 bin/* ${CRIDIR}/usr/local/bin
+	@$(INSTALL) -d ${CRIDIR}/opt/containerd/cluster
 	@cp -r contrib/gce ${CRIDIR}/opt/containerd/cluster/
-	@install -d ${CRIDIR}/etc/systemd/system
-	@install -m 644 containerd.service ${CRIDIR}/etc/systemd/system
+	@$(INSTALL) -d ${CRIDIR}/etc/systemd/system
+	@$(INSTALL) -m 644 containerd.service ${CRIDIR}/etc/systemd/system
 	echo "CONTAINERD_VERSION: '$(VERSION:v%=%)'" | tee ${CRIDIR}/opt/containerd/cluster/version
 
 	DESTDIR=$(CRIDIR) script/setup/install-runc
@@ -291,8 +309,8 @@ install-cri-deps: $(BINARIES)
 	DESTDIR=$(CRIDIR) script/setup/install-critools
 	DESTDIR=$(CRIDIR) script/setup/install-imgcrypt
 
-	@install -d $(CRIDIR)/bin
-	@install $(BINARIES) $(CRIDIR)/bin
+	@$(INSTALL) -d $(CRIDIR)/bin
+	@$(INSTALL) $(BINARIES) $(CRIDIR)/bin
 endif
 
 ifeq ($(GOOS),windows)
@@ -345,12 +363,12 @@ clean-test: ## clean up debris from previously failed tests
 
 install: ## install binaries
 	@echo "$(WHALE) $@ $(BINARIES)"
-	@mkdir -p $(DESTDIR)/bin
-	@install $(BINARIES) $(DESTDIR)/bin
+	@$(INSTALL) -d $(DESTDIR)$(PREFIX)/bin
+	@$(INSTALL) $(BINARIES) $(DESTDIR)$(PREFIX)/bin
 
 uninstall:
 	@echo "$(WHALE) $@"
-	@rm -f $(addprefix $(DESTDIR)/bin/,$(notdir $(BINARIES)))
+	@rm -f $(addprefix $(DESTDIR)$(PREFIX)/bin/,$(notdir $(BINARIES)))
 
 ifeq ($(GOOS),windows)
 install-deps:
@@ -394,10 +412,23 @@ root-coverage: ## generate coverage profiles for unit tests that require root
 		fi; \
 	done )
 
-vendor: ## vendor
+vendor: ## ensure all the go.mod/go.sum files are up-to-date including vendor/ directory
 	@echo "$(WHALE) $@"
 	@$(GO) mod tidy
 	@$(GO) mod vendor
+	@$(GO) mod verify
+	@(cd ${ROOTDIR}/integration/client && ${GO} mod tidy)
+
+verify-vendor: ## verify if all the go.mod/go.sum files are up-to-date
+	@echo "$(WHALE) $@"
+	$(eval TMPDIR := $(shell mktemp -d))
+	@cp -R ${ROOTDIR} ${TMPDIR}
+	@(cd ${TMPDIR}/containerd && ${GO} mod tidy)
+	@(cd ${TMPDIR}/containerd/integration/client && ${GO} mod tidy)
+	@diff -r -u -q ${ROOTDIR} ${TMPDIR}/containerd
+	@rm -rf ${TMPDIR}
+	@${ROOTDIR}/script/verify-go-modules.sh integration/client
+
 
 help: ## this help
 	@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort

+ 1 - 1
vendor/github.com/containerd/containerd/Makefile.linux

@@ -20,7 +20,7 @@ COMMANDS += containerd-shim containerd-shim-runc-v1 containerd-shim-runc-v2
 
 # check GOOS for cross compile builds
 ifeq ($(GOOS),linux)
-  ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64 riscv64))
+  ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64))
 	GO_GCFLAGS += -buildmode=pie
   endif
 endif

+ 5 - 1
vendor/github.com/containerd/containerd/Makefile.windows

@@ -22,7 +22,11 @@ ifeq ($(GOARCH),amd64)
 	TESTFLAGS_RACE= -race
 endif
 
-BINARIES:=$(addsuffix .exe,$(BINARIES))
+WINDOWS_SHIM=bin/containerd-shim-runhcs-v1.exe
+BINARIES := $(addsuffix .exe,$(BINARIES)) $(WINDOWS_SHIM)
+
+$(WINDOWS_SHIM): script/setup/install-runhcs-shim go.mod
+	DESTDIR=$(CURDIR)/bin $<
 
 bin/%.exe: cmd/% FORCE
 	$(BUILD_BINARY)

+ 0 - 17
vendor/github.com/containerd/containerd/Protobuild.toml

@@ -31,28 +31,11 @@ plugins = ["grpc", "fieldpath"]
   "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types"
   "google/rpc/status.proto" = "github.com/gogo/googleapis/google/rpc"
 
-[[overrides]]
-prefixes = ["github.com/containerd/containerd/api/events"]
-plugins = ["fieldpath"] # disable grpc for this package
-
-[[overrides]]
-prefixes = ["github.com/containerd/containerd/api/services/ttrpc/events/v1"]
-plugins = ["ttrpc", "fieldpath"]
-
 [[overrides]]
 # enable ttrpc and disable fieldpath and grpc for the shim
 prefixes = ["github.com/containerd/containerd/runtime/v1/shim/v1", "github.com/containerd/containerd/runtime/v2/task"]
 plugins = ["ttrpc"]
 
-# Aggregrate the API descriptors to lock down API changes.
-[[descriptors]]
-prefix = "github.com/containerd/containerd/api"
-target = "api/next.pb.txt"
-ignore_files = [
-	"google/protobuf/descriptor.proto",
-	"gogoproto/gogo.proto"
-]
-
 # Lock down runc config
 [[descriptors]]
 prefix = "github.com/containerd/containerd/runtime/linux/runctypes"

+ 21 - 10
vendor/github.com/containerd/containerd/README.md

@@ -1,9 +1,9 @@
-![containerd banner](https://raw.githubusercontent.com/cncf/artwork/master/projects/containerd/horizontal/color/containerd-horizontal-color.png)
+![containerd banner light mode](https://raw.githubusercontent.com/cncf/artwork/master/projects/containerd/horizontal/color/containerd-horizontal-color.png#gh-light-mode-only)
+![containerd banner dark mode](https://raw.githubusercontent.com/cncf/artwork/master/projects/containerd/horizontal/white/containerd-horizontal-white.png#gh-dark-mode-only)
 
 [![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/containerd)](https://pkg.go.dev/github.com/containerd/containerd)
 [![Build Status](https://github.com/containerd/containerd/workflows/CI/badge.svg)](https://github.com/containerd/containerd/actions?query=workflow%3ACI)
 [![Nightlies](https://github.com/containerd/containerd/workflows/Nightly/badge.svg)](https://github.com/containerd/containerd/actions?query=workflow%3ANightly)
-[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
 [![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd)
 [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1271/badge)](https://bestpractices.coreinfrastructure.org/projects/1271)
 
@@ -21,7 +21,7 @@ We are a large inclusive OSS project that is welcoming help of any kind shape or
 * Documentation help is needed to make the product easier to consume and extend.
 * We need OSS community outreach / organizing help to get the word out; manage
 and create messaging and educational content; and to help with social media, community forums/groups, and google groups.
-* We are actively inviting new [security advisors](https://github.com/containerd/project/blob/master/GOVERNANCE.md#security-advisors) to join the team.
+* We are actively inviting new [security advisors](https://github.com/containerd/project/blob/main/GOVERNANCE.md#security-advisors) to join the team.
 * New sub-projects are being created, core and non-core that could use additional development help.
 * Each of the [containerd projects](https://github.com/containerd) has a list of issues currently being worked on or that need help resolving.
   - If the issue has not already been assigned to someone, or has not made recent progress and you are interested, please inquire.
@@ -41,7 +41,7 @@ If you are interested in trying out containerd see our example at [Getting Start
 ## Nightly builds
 
 There are nightly builds available for download [here](https://github.com/containerd/containerd/actions?query=workflow%3ANightly).
-Binaries are generated from `master` branch every night for `Linux` and `Windows`.
+Binaries are generated from `main` branch every night for `Linux` and `Windows`.
 
 Please be aware: nightly builds might have critical bugs, it's not recommended for use in production and no support provided.
 
@@ -68,6 +68,14 @@ your system. See more details in [Checkpoint and Restore](#checkpoint-and-restor
 
 Build requirements for developers are listed in [BUILDING](BUILDING.md).
 
+
+## Supported Registries
+
+Any registry which is compliant with the [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec)
+is supported by containerd.
+
+For configuring registries, see [registry host configuration documentation](docs/hosts.md)
+
 ## Features
 
 ### Client
@@ -77,8 +85,11 @@ containerd offers a full client package to help you integrate containerd into yo
 ```go
 
 import (
+  "context"
+
   "github.com/containerd/containerd"
   "github.com/containerd/containerd/cio"
+  "github.com/containerd/containerd/namespaces"
 )
 
 
@@ -269,7 +280,7 @@ loaded for the user's shell environment.
 `cri` is a native plugin of containerd. Since containerd 1.1, the cri plugin is built into the release binaries and enabled by default.
 
 > **Note:** As of containerd 1.5, the `cri` plugin is merged into the containerd/containerd repo. For example, the source code previously stored under [`containerd/cri/pkg`](https://github.com/containerd/cri/tree/release/1.4/pkg)
-was moved to [`containerd/containerd/pkg/cri` package](https://github.com/containerd/containerd/tree/master/pkg/cri).
+was moved to [`containerd/containerd/pkg/cri` package](https://github.com/containerd/containerd/tree/main/pkg/cri).
 
 The `cri` plugin has reached GA status, representing that it is:
 * Feature complete
@@ -289,7 +300,7 @@ A Kubernetes incubator project, [cri-tools](https://github.com/kubernetes-sigs/c
 * [CRI Plugin Testing Guide](./docs/cri/testing.md)
 * [Debugging Pods, Containers, and Images with `crictl`](./docs/cri/crictl.md)
 * [Configuring `cri` Plugins](./docs/cri/config.md)
-* [Configuring containerd](https://github.com/containerd/containerd/blob/master/docs/man/containerd-config.8.md)
+* [Configuring containerd](https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.8.md)
 
 ### Communication
 
@@ -315,14 +326,14 @@ copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by
 
 ## Project details
 
-**containerd** is the primary open source project within the broader containerd GitHub repository.
+**containerd** is the primary open source project within the broader containerd GitHub organization.
 However, all projects within the repo have common maintainership, governance, and contributing
 guidelines which are stored in a `project` repository commonly for all containerd projects.
 
 Please find all these core project documents, including the:
- * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
- * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
- * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
+ * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
 
 information in our [`containerd/project`](https://github.com/containerd/project) repository.
 

+ 39 - 23
vendor/github.com/containerd/containerd/RELEASES.md

@@ -27,7 +27,7 @@ considered "pre-releases".
 
 ### Major and Minor Releases
 
-Major and minor releases of containerd will be made from master. Releases of
+Major and minor releases of containerd will be made from main. Releases of
 containerd will be marked with GPG signed tags and announced at
 https://github.com/containerd/containerd/releases. The tag will be of the
 format `v<major>.<minor>.<patch>` and should be made with the command `git tag
@@ -43,7 +43,7 @@ done against that branch.
 
 Pre-releases, such as alphas, betas and release candidates will be conducted
 from their source branch. For major and minor releases, these releases will be
-done from master. For patch releases, these pre-releases should be done within
+done from main. For patch releases, these pre-releases should be done within
 the corresponding release branch.
 
 While pre-releases are done to assist in the stabilization process, no
@@ -89,7 +89,7 @@ whichever is longer. Additionally, releases may have an extended security suppor
 period after the end of the active period to accept security backports. This
 timeframe will be decided by maintainers before the end of the active status.
 
-The current state is available in the following table:
+The current state is available in the following tables:
 
 | Release | Status      | Start            | End of Life       |
 |---------|-------------|------------------|-------------------|
@@ -100,12 +100,27 @@ The current state is available in the following table:
 | [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8)  | End of Life | April 23, 2018  | October 23, 2019 |
 | [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 |
 | [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019  | March 4, 2021 |
-| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.4)  | Active   | August 17, 2020 | max(August 17, 2021, release of 1.5.0 + 6 months) |
-| [1.5](https://github.com/containerd/containerd/milestone/30)         | Next   | TBD  | max(TBD+1 year, release of 1.6.0 + 6 months) |
+| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.12) | Extended   | August 17, 2020 | March 3, 2022 (Extended) |
+| [1.5](https://github.com/containerd/containerd/releases/tag/v1.5.9)  | Active   | May 3, 2021  | October 28, 2022 |
+| [1.6](https://github.com/containerd/containerd/releases/tag/v1.6.0)  | Active   | February 15, 2022  | max(February 15, 2023 or release of 1.7.0 + 6 months) |
+| [1.7](https://github.com/containerd/containerd/milestone/42)         | Next   | TBD  | TBD |
 
 Note that branches and release from before 1.0 may not follow these rules.
 
-This table should be updated as part of the release preparation process.
+| CRI-Containerd Version | Containerd Version | Kubernetes Version | CRI Version  |
+|------------------------|--------------------|--------------------|--------------|
+| v1.0.0-alpha.x         |                    | 1.7, 1.8           | v1alpha1     |
+| v1.0.0-beta.x          |                    | 1.9                | v1alpha1     |
+| End-Of-Life            | v1.1 (End-Of-Life) | 1.10+              | v1alpha2     |
+|                        | v1.2 (End-Of-Life) | 1.10+              | v1alpha2     |
+|                        | v1.3 (End-Of-Life) | 1.12+              | v1alpha2     |
+|                        | v1.4               | 1.19+              | v1alpha2     |
+|                        | v1.5               | 1.20+              | v1alpha2     |
+|                        | v1.6               | 1.23+              | v1, v1alpha2 |
+
+**Note:** The support table above specifies the Kubernetes Version that was supported at time of release of the containerd - cri integration and Kubernetes only supports n-3 minor release versions.
+
+These tables should be updated as part of the release preparation process.
 
 ### Backporting
 
@@ -115,11 +130,11 @@ will be features for the next _minor_ or _major_ release. For the most part,
 this process is straightforward and we are here to help make it as smooth as
 possible.
 
-If there are important fixes that need to be backported, please let use know in
+If there are important fixes that need to be backported, please let us know in
 one of three ways:
 
 1. Open an issue.
-2. Open a PR with cherry-picked change from master.
+2. Open a PR with cherry-picked change from main.
 3. Open a PR with a ported fix.
 
 __If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
@@ -127,10 +142,10 @@ Remember that backported PRs must follow the versioning guidelines from this doc
 
 Any release that is "active" can accept backports. Opening a backport PR is
 fairly straightforward. The steps differ depending on whether you are pulling
-a fix from master or need to draft a new commit specific to a particular
+a fix from main or need to draft a new commit specific to a particular
 branch.
 
-To cherry pick a straightforward commit from master, simply use the cherry pick
+To cherry pick a straightforward commit from main, simply use the cherry pick
 process:
 
 1. Pick the branch to which you want backported, usually in the format
@@ -154,7 +169,7 @@ process:
 	```
 
    Make sure to replace `stevvooe` with whatever fork you are using to open
-   the PR. When you open the PR, make sure to switch `master` with whatever
+   the PR. When you open the PR, make sure to switch `main` with whatever
    release branch you are targeting with the fix. Make sure the PR title has
    `[<release branch>]` prefixed. e.g.:
 
@@ -162,11 +177,11 @@ process:
    [release/1.4] Fix foo in bar
    ```
 
-If there is no existing fix in master, you should first fix the bug in master,
+If there is no existing fix in main, you should first fix the bug in main,
 or ask us a maintainer or contributor to do it via an issue. Once that PR is
 completed, open a PR using the process above.
 
-Only when the bug is not seen in master and must be made for the specific
+Only when the bug is not seen in main and must be made for the specific
 release branch should you open a PR with new code.
 
 ## Public API Stability
@@ -177,12 +192,12 @@ containerd versions:
 
 | Component        | Status   | Stabilized Version | Links         |
 |------------------|----------|--------------------|---------------|
-| GRPC API         | Stable   | 1.0                | [api/](api) |
+| GRPC API         | Stable   | 1.0                | [gRPC API](#grpc-api) |
 | Metrics API      | Stable   | 1.0                | - |
 | Runtime Shim API | Stable   | 1.2                | - |
-| Daemon Config    | Stable   | 1.0			       | - |
+| Daemon Config    | Stable   | 1.0                | - |
+| CRI GRPC API     | Stable   | 1.6 (_CRI v1_)     | [cri-api](https://github.com/kubernetes/cri-api/tree/master/pkg/apis/runtime/v1) |
 | Go client API    | Unstable | _future_           | [godoc](https://godoc.org/github.com/containerd/containerd) |
-| CRI GRPC API     | Unstable | v1alpha2 _current_ | [cri-api](https://github.com/kubernetes/cri-api/tree/master/pkg/apis/runtime/v1alpha2) |
 | `ctr` tool       | Unstable | Out of scope       | - |
 
 From the version stated in the above table, that component must adhere to the
@@ -201,7 +216,7 @@ version jump.
 To ensure compatibility, we have collected the entire GRPC API symbol set into
 a single file. At each _minor_ release of containerd, we will move the current
 `next.pb.txt` file to a file named for the minor version, such as `1.0.pb.txt`,
-enumerating the support services and messages. See [api/](api) for details.
+enumerating the support services and messages.
 
 Note that new services may be added in _minor_ releases. New service methods
 and new fields on messages may be added if they are optional.
@@ -321,9 +336,10 @@ against total impact.
 
 The deprecated features are shown in the following table:
 
-| Component                                                            | Deprecation release | Target release for removal | Recommendation                |
-|----------------------------------------------------------------------|---------------------|----------------------------|-------------------------------|
-| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4     | containerd v2.0            | Use `io.containerd.runc.v2`   |
-| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`)       | containerd v1.4     | containerd v2.0            | Use `io.containerd.runc.v2`   |
-| config.toml `version = 1`                                            | containerd v1.5     | containerd v2.0            | Use config.toml `version = 2` |
-| Built-in `aufs` snapshotter                                          | containerd v1.5     | containerd v2.0            | Use `overlayfs` snapshotter   |
+| Component                                                            | Deprecation release | Target release for removal | Recommendation                    |
+|----------------------------------------------------------------------|---------------------|----------------------------|-----------------------------------|
+| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4     | containerd v2.0            | Use `io.containerd.runc.v2`       |
+| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`)       | containerd v1.4     | containerd v2.0            | Use `io.containerd.runc.v2`       |
+| config.toml `version = 1`                                            | containerd v1.5     | containerd v2.0            | Use config.toml `version = 2`     |
+| Built-in `aufs` snapshotter                                          | containerd v1.5     | containerd v2.0            | Use `overlayfs` snapshotter       |
+| `cri-containerd-*.tar.gz` release bundles                            | containerd v1.6     | containerd v2.0            | Use `containerd-*.tar.gz` bundles |

+ 29 - 2
vendor/github.com/containerd/containerd/Vagrantfile

@@ -17,7 +17,7 @@
 
 # Vagrantfile for cgroup2 and SELinux
 Vagrant.configure("2") do |config|
-  config.vm.box = "fedora/34-cloud-base"
+  config.vm.box = "fedora/35-cloud-base"
   memory = 4096
   cpus = 2
   config.vm.provider :virtualbox do |v|
@@ -77,7 +77,7 @@ Vagrant.configure("2") do |config|
   config.vm.provision "install-golang", type: "shell", run: "once" do |sh|
     sh.upload_path = "/tmp/vagrant-install-golang"
     sh.env = {
-        'GO_VERSION': ENV['GO_VERSION'] || "1.16.14",
+        'GO_VERSION': ENV['GO_VERSION'] || "1.17.7",
     }
     sh.inline = <<~SHELL
         #!/usr/bin/env bash
@@ -257,4 +257,31 @@ EOF
     SHELL
   end
 
+  # Rootless Podman is used for testing CRI-in-UserNS
+  # (We could use rootless nerdctl, but we are using Podman here because it is available in dnf)
+  config.vm.provision "install-rootless-podman", type: "shell", run: "never" do |sh|
+    sh.upload_path = "/tmp/vagrant-install-rootless-podman"
+    sh.inline = <<~SHELL
+        #!/usr/bin/env bash
+        set -eux -o pipefail
+        # Delegate cgroup v2 controllers to rootless
+        mkdir -p /etc/systemd/system/user@.service.d
+        cat > /etc/systemd/system/user@.service.d/delegate.conf << EOF
+[Service]
+Delegate=yes
+EOF
+        systemctl daemon-reload
+        # Install Podman
+        dnf install -y podman
+        # Configure Podman to resolve `golang` to `docker.io/library/golang`
+        mkdir -p /etc/containers
+        cat > /etc/containers/registries.conf <<EOF
+[registries.search]
+registries = ['docker.io']
+EOF
+        # Disable SELinux to allow overlayfs
+        setenforce 0
+    SHELL
+  end
+
 end

+ 17 - 0
vendor/github.com/containerd/containerd/api/services/containers/v1/doc.go

@@ -0,0 +1,17 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package containers

+ 17 - 0
vendor/github.com/containerd/containerd/api/services/content/v1/doc.go

@@ -0,0 +1,17 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package content

+ 17 - 0
vendor/github.com/containerd/containerd/api/services/diff/v1/doc.go

@@ -0,0 +1,17 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package diff

+ 17 - 0
vendor/github.com/containerd/containerd/api/services/namespaces/v1/doc.go

@@ -0,0 +1,17 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package namespaces

+ 17 - 0
vendor/github.com/containerd/containerd/api/services/snapshots/v1/doc.go

@@ -0,0 +1,17 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package snapshots

+ 17 - 0
vendor/github.com/containerd/containerd/api/services/tasks/v1/doc.go

@@ -0,0 +1,17 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package tasks

+ 133 - 86
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go

@@ -51,6 +51,7 @@ type CreateTaskRequest struct {
 	Terminal             bool              `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"`
 	Checkpoint           *types.Descriptor `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
 	Options              *types1.Any       `protobuf:"bytes,9,opt,name=options,proto3" json:"options,omitempty"`
+	RuntimePath          string            `protobuf:"bytes,10,opt,name=runtime_path,json=runtimePath,proto3" json:"runtime_path,omitempty"`
 	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
 	XXX_unrecognized     []byte            `json:"-"`
 	XXX_sizecache        int32             `json:"-"`
@@ -1169,93 +1170,95 @@ func init() {
 }
 
 var fileDescriptor_310e7127b8a26f14 = []byte{
-	// 1376 bytes of a gzipped FileDescriptorProto
+	// 1400 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0x45,
-	0x14, 0xee, 0xfa, 0xee, 0xe3, 0xa4, 0x4d, 0x96, 0x34, 0x98, 0xa5, 0x8a, 0xc3, 0xf2, 0x62, 0x02,
-	0x5d, 0x53, 0x17, 0x55, 0x55, 0x5b, 0x55, 0xe4, 0x46, 0x64, 0x41, 0xd5, 0x74, 0x5b, 0xa0, 0xaa,
-	0x84, 0xc2, 0xc6, 0x3b, 0x71, 0x46, 0xb1, 0x77, 0xb6, 0x3b, 0xe3, 0xb4, 0xe6, 0x05, 0x7e, 0x42,
-	0x5f, 0x79, 0x81, 0xbf, 0x93, 0x47, 0x1e, 0x11, 0xaa, 0x02, 0xf5, 0xbf, 0xe0, 0x0d, 0xcd, 0x65,
-	0xd7, 0x1b, 0x3b, 0xf6, 0x3a, 0x4d, 0xc3, 0x4b, 0x32, 0x33, 0x7b, 0xce, 0x99, 0x33, 0xdf, 0xb9,
-	0x7d, 0x09, 0xac, 0xb5, 0x30, 0xdb, 0xef, 0xee, 0x5a, 0x4d, 0xd2, 0xa9, 0x35, 0x89, 0xc7, 0x1c,
-	0xec, 0xa1, 0xc0, 0x8d, 0x2f, 0x1d, 0x1f, 0xd7, 0x28, 0x0a, 0x0e, 0x71, 0x13, 0xd1, 0x1a, 0x73,
-	0xe8, 0x01, 0xad, 0x1d, 0xde, 0x90, 0x0b, 0xcb, 0x0f, 0x08, 0x23, 0xfa, 0xb5, 0x81, 0xb4, 0x15,
-	0x4a, 0x5a, 0x52, 0xe0, 0xf0, 0x86, 0xf1, 0x61, 0x8b, 0x90, 0x56, 0x1b, 0xd5, 0x84, 0xec, 0x6e,
-	0x77, 0xaf, 0x86, 0x3a, 0x3e, 0xeb, 0x49, 0x55, 0xe3, 0x83, 0xe1, 0x8f, 0x8e, 0x17, 0x7e, 0x5a,
-	0x68, 0x91, 0x16, 0x11, 0xcb, 0x1a, 0x5f, 0xa9, 0xd3, 0x5b, 0x53, 0xf9, 0xcb, 0x7a, 0x3e, 0xa2,
-	0xb5, 0x0e, 0xe9, 0x7a, 0x4c, 0xe9, 0xdd, 0x3e, 0x8b, 0x1e, 0x62, 0x01, 0x6e, 0xaa, 0xd7, 0x19,
-	0x77, 0xcf, 0xa0, 0xe9, 0x22, 0xda, 0x0c, 0xb0, 0xcf, 0x48, 0xa0, 0x94, 0xef, 0x9c, 0x41, 0x99,
-	0x23, 0x26, 0x7e, 0x28, 0xdd, 0xca, 0x30, 0x36, 0x0c, 0x77, 0x10, 0x65, 0x4e, 0xc7, 0x97, 0x02,
-	0xe6, 0x51, 0x0a, 0xe6, 0xd7, 0x03, 0xe4, 0x30, 0xf4, 0xc4, 0xa1, 0x07, 0x36, 0x7a, 0xde, 0x45,
-	0x94, 0xe9, 0x75, 0x98, 0x89, 0xcc, 0xef, 0x60, 0xb7, 0xac, 0x2d, 0x6b, 0xd5, 0xe2, 0xda, 0x95,
-	0xfe, 0x71, 0xa5, 0xb4, 0x1e, 0x9e, 0x37, 0x36, 0xec, 0x52, 0x24, 0xd4, 0x70, 0xf5, 0x1a, 0xe4,
-	0x02, 0x42, 0xd8, 0x1e, 0x2d, 0xa7, 0x97, 0xd3, 0xd5, 0x52, 0xfd, 0x7d, 0x2b, 0x16, 0x52, 0xe1,
-	0x9d, 0xf5, 0x80, 0x83, 0x69, 0x2b, 0x31, 0x7d, 0x01, 0xb2, 0x94, 0xb9, 0xd8, 0x2b, 0x67, 0xb8,
-	0x75, 0x5b, 0x6e, 0xf4, 0x45, 0xc8, 0x51, 0xe6, 0x92, 0x2e, 0x2b, 0x67, 0xc5, 0xb1, 0xda, 0xa9,
-	0x73, 0x14, 0x04, 0xe5, 0x5c, 0x74, 0x8e, 0x82, 0x40, 0x37, 0xa0, 0xc0, 0x50, 0xd0, 0xc1, 0x9e,
-	0xd3, 0x2e, 0xe7, 0x97, 0xb5, 0x6a, 0xc1, 0x8e, 0xf6, 0xfa, 0x3d, 0x80, 0xe6, 0x3e, 0x6a, 0x1e,
-	0xf8, 0x04, 0x7b, 0xac, 0x5c, 0x58, 0xd6, 0xaa, 0xa5, 0xfa, 0xb5, 0x51, 0xb7, 0x36, 0x22, 0xc4,
-	0xed, 0x98, 0xbc, 0x6e, 0x41, 0x9e, 0xf8, 0x0c, 0x13, 0x8f, 0x96, 0x8b, 0x42, 0x75, 0xc1, 0x92,
-	0x68, 0x5a, 0x21, 0x9a, 0xd6, 0xaa, 0xd7, 0xb3, 0x43, 0x21, 0xf3, 0x19, 0xe8, 0x71, 0x24, 0xa9,
-	0x4f, 0x3c, 0x8a, 0xde, 0x0a, 0xca, 0x39, 0x48, 0xfb, 0xd8, 0x2d, 0xa7, 0x96, 0xb5, 0xea, 0xac,
-	0xcd, 0x97, 0x66, 0x0b, 0x66, 0x1e, 0x33, 0x27, 0x60, 0xe7, 0x09, 0xd0, 0xc7, 0x90, 0x47, 0x2f,
-	0x51, 0x73, 0x47, 0x59, 0x2e, 0xae, 0x41, 0xff, 0xb8, 0x92, 0xdb, 0x7c, 0x89, 0x9a, 0x8d, 0x0d,
-	0x3b, 0xc7, 0x3f, 0x35, 0x5c, 0xf3, 0x23, 0x98, 0x55, 0x17, 0x29, 0xff, 0x95, 0x2f, 0xda, 0xc0,
-	0x97, 0x2d, 0x98, 0xdf, 0x40, 0x6d, 0x74, 0xee, 0x8c, 0x31, 0x7f, 0xd3, 0xe0, 0xb2, 0xb4, 0x14,
-	0xdd, 0xb6, 0x08, 0xa9, 0x48, 0x39, 0xd7, 0x3f, 0xae, 0xa4, 0x1a, 0x1b, 0x76, 0x0a, 0x9f, 0x82,
-	0x88, 0x5e, 0x81, 0x12, 0x7a, 0x89, 0xd9, 0x0e, 0x65, 0x0e, 0xeb, 0xf2, 0x9c, 0xe3, 0x5f, 0x80,
-	0x1f, 0x3d, 0x16, 0x27, 0xfa, 0x2a, 0x14, 0xf9, 0x0e, 0xb9, 0x3b, 0x0e, 0x13, 0x29, 0x56, 0xaa,
-	0x1b, 0x23, 0x01, 0x7c, 0x12, 0x96, 0xc3, 0x5a, 0xe1, 0xe8, 0xb8, 0x72, 0xe9, 0xd5, 0xdf, 0x15,
-	0xcd, 0x2e, 0x48, 0xb5, 0x55, 0x66, 0x12, 0x58, 0x90, 0xfe, 0x6d, 0x07, 0xa4, 0x89, 0x28, 0xbd,
-	0x70, 0xf4, 0x11, 0xc0, 0x16, 0xba, 0xf8, 0x20, 0x6f, 0x42, 0x49, 0x5c, 0xa3, 0x40, 0xbf, 0x05,
-	0x79, 0x5f, 0x3e, 0x50, 0x5c, 0x31, 0x54, 0x23, 0x87, 0x37, 0x54, 0x99, 0x84, 0x20, 0x84, 0xc2,
-	0xe6, 0x0a, 0xcc, 0x7d, 0x83, 0x29, 0xe3, 0x69, 0x10, 0x41, 0xb3, 0x08, 0xb9, 0x3d, 0xdc, 0x66,
-	0x28, 0x90, 0xde, 0xda, 0x6a, 0xc7, 0x93, 0x26, 0x26, 0x1b, 0xd5, 0x46, 0x56, 0xb4, 0xf8, 0xb2,
-	0x26, 0x3a, 0xc6, 0xe4, 0x6b, 0xa5, 0xa8, 0xf9, 0x4a, 0x83, 0xd2, 0xd7, 0xb8, 0xdd, 0xbe, 0x68,
-	0x90, 0x44, 0xc3, 0xc1, 0x2d, 0xde, 0x56, 0x64, 0x6e, 0xa9, 0x1d, 0x4f, 0x45, 0xa7, 0xdd, 0x16,
-	0x19, 0x55, 0xb0, 0xf9, 0xd2, 0xfc, 0x57, 0x03, 0x9d, 0x2b, 0xbf, 0x83, 0x2c, 0x89, 0x7a, 0x62,
-	0xea, 0xf4, 0x9e, 0x98, 0x1e, 0xd3, 0x13, 0x33, 0x63, 0x7b, 0x62, 0x76, 0xa8, 0x27, 0x56, 0x21,
-	0x43, 0x7d, 0xd4, 0x14, 0x5d, 0x74, 0x5c, 0x4b, 0x13, 0x12, 0x71, 0x94, 0xf2, 0x63, 0x53, 0xe9,
-	0x2a, 0xbc, 0x77, 0xe2, 0xe9, 0x32, 0xb2, 0xe6, 0xaf, 0x1a, 0xcc, 0xd9, 0x88, 0xe2, 0x9f, 0xd0,
-	0x36, 0xeb, 0x5d, 0x78, 0xa8, 0x16, 0x20, 0xfb, 0x02, 0xbb, 0x6c, 0x5f, 0x45, 0x4a, 0x6e, 0x38,
-	0x3a, 0xfb, 0x08, 0xb7, 0xf6, 0x65, 0xf5, 0xcf, 0xda, 0x6a, 0x67, 0xfe, 0x0c, 0x97, 0xd7, 0xdb,
-	0x84, 0xa2, 0xc6, 0xc3, 0xff, 0xc3, 0x31, 0x19, 0xce, 0xb4, 0x88, 0x82, 0xdc, 0x98, 0x5f, 0xc1,
-	0xdc, 0xb6, 0xd3, 0xa5, 0xe7, 0xee, 0x9f, 0x5b, 0x30, 0x6f, 0x23, 0xda, 0xed, 0x9c, 0xdb, 0xd0,
-	0x26, 0x5c, 0xe1, 0xc5, 0xb9, 0x8d, 0xdd, 0xf3, 0x24, 0xaf, 0x69, 0xcb, 0x7e, 0x20, 0xcd, 0xa8,
-	0x12, 0xbf, 0x0f, 0x45, 0xd5, 0x2e, 0x50, 0x58, 0xe6, 0xcb, 0x93, 0xca, 0xbc, 0xe1, 0xed, 0x11,
-	0x7b, 0xa0, 0x62, 0xbe, 0xd6, 0xe0, 0xea, 0x7a, 0x34, 0x93, 0xcf, 0xcb, 0x51, 0x76, 0x60, 0xde,
-	0x77, 0x02, 0xe4, 0xb1, 0x9d, 0x18, 0x2f, 0x90, 0xe1, 0xab, 0xf3, 0xfe, 0xff, 0xd7, 0x71, 0x65,
-	0x25, 0xc6, 0xb6, 0x88, 0x8f, 0xbc, 0x48, 0x9d, 0xd6, 0x5a, 0xe4, 0xba, 0x8b, 0x5b, 0x88, 0x32,
-	0x6b, 0x43, 0xfc, 0xb2, 0xe7, 0xa4, 0xb1, 0xf5, 0x53, 0x39, 0x43, 0x7a, 0x1a, 0xce, 0xf0, 0x14,
-	0x16, 0x87, 0x5f, 0x17, 0x01, 0x57, 0x1a, 0x30, 0xc1, 0x53, 0x3b, 0xe4, 0x08, 0x79, 0x89, 0x2b,
-	0x98, 0xbf, 0xa7, 0x60, 0xfe, 0x5b, 0xdf, 0x7d, 0x07, 0xc4, 0xae, 0x0e, 0xc5, 0x00, 0x51, 0xd2,
-	0x0d, 0x9a, 0x88, 0x0a, 0xb0, 0xc6, 0xbd, 0x6a, 0x20, 0xa6, 0xef, 0x42, 0xc9, 0xf1, 0x3c, 0xc2,
-	0x9c, 0x10, 0x0b, 0xee, 0xfd, 0x97, 0xd6, 0x24, 0x92, 0x6f, 0x8d, 0x78, 0x6b, 0xad, 0x0e, 0x4c,
-	0x6c, 0x7a, 0x2c, 0xe8, 0xd9, 0x71, 0xa3, 0xc6, 0x7d, 0x98, 0x1b, 0x16, 0xe0, 0xcd, 0xf9, 0x00,
-	0xf5, 0xd4, 0xec, 0xe1, 0x4b, 0x5e, 0x82, 0x87, 0x4e, 0xbb, 0x8b, 0xc2, 0x8e, 0x2a, 0x36, 0x77,
-	0x52, 0xb7, 0x35, 0x73, 0x05, 0x2e, 0x3f, 0x90, 0x2c, 0x3d, 0x44, 0xa7, 0x0c, 0x79, 0x39, 0xae,
-	0x24, 0xde, 0x45, 0x3b, 0xdc, 0xf2, 0x0a, 0x89, 0x64, 0xa3, 0xe1, 0x95, 0x57, 0x24, 0x5f, 0x05,
-	0xa7, 0x7c, 0x0a, 0xe1, 0x15, 0x02, 0x76, 0x28, 0x68, 0xee, 0x41, 0xe9, 0x7b, 0x07, 0x5f, 0xfc,
-	0x80, 0x0f, 0x60, 0x46, 0xde, 0xa3, 0x7c, 0x1d, 0x22, 0x4b, 0xda, 0x64, 0xb2, 0x94, 0x7a, 0x1b,
-	0xb2, 0x54, 0x7f, 0x3d, 0x03, 0x59, 0x31, 0xde, 0xf5, 0x03, 0xc8, 0x49, 0x22, 0xac, 0xd7, 0x26,
-	0x47, 0x7c, 0xe4, 0x0f, 0x0f, 0xe3, 0xf3, 0xe9, 0x15, 0xd4, 0xd3, 0x7e, 0x84, 0xac, 0x20, 0xac,
-	0xfa, 0xca, 0x64, 0xd5, 0x38, 0x7d, 0x36, 0x3e, 0x9d, 0x4a, 0x56, 0xdd, 0xd0, 0x82, 0x9c, 0x64,
-	0x81, 0x49, 0xcf, 0x19, 0x61, 0xc5, 0xc6, 0x67, 0xd3, 0x28, 0x44, 0x17, 0x3d, 0x87, 0xd9, 0x13,
-	0x74, 0x53, 0xaf, 0x4f, 0xa3, 0x7e, 0x92, 0x75, 0x9c, 0xf1, 0xca, 0x67, 0x90, 0xde, 0x42, 0x4c,
-	0xaf, 0x4e, 0x56, 0x1a, 0x70, 0x52, 0xe3, 0x93, 0x29, 0x24, 0x23, 0xdc, 0x32, 0x7c, 0x1c, 0xe8,
-	0xd6, 0x64, 0x95, 0x61, 0x0a, 0x69, 0xd4, 0xa6, 0x96, 0x57, 0x17, 0x35, 0x20, 0xc3, 0x19, 0xa1,
-	0x9e, 0xe0, 0x5b, 0x8c, 0x35, 0x1a, 0x8b, 0x23, 0xc9, 0xbd, 0xd9, 0xf1, 0x59, 0x4f, 0xdf, 0x86,
-	0x0c, 0x2f, 0x25, 0x3d, 0x21, 0x0f, 0x47, 0xd9, 0xde, 0x58, 0x8b, 0x8f, 0xa1, 0x18, 0x11, 0xa1,
-	0x24, 0x28, 0x86, 0x19, 0xd3, 0x58, 0xa3, 0x0f, 0x21, 0xaf, 0x28, 0x8c, 0x9e, 0x10, 0xef, 0x93,
-	0x4c, 0x67, 0x82, 0xc1, 0xac, 0xa0, 0x24, 0x49, 0x1e, 0x0e, 0xf3, 0x96, 0xb1, 0x06, 0x1f, 0x41,
-	0x4e, 0x72, 0x93, 0xa4, 0xa2, 0x19, 0x61, 0x30, 0x63, 0x4d, 0x62, 0x28, 0x84, 0xf4, 0x42, 0xbf,
-	0x9e, 0x9c, 0x23, 0x31, 0x36, 0x63, 0x58, 0xd3, 0x8a, 0xab, 0x8c, 0x7a, 0x01, 0x10, 0x1b, 0xea,
-	0x37, 0x13, 0x20, 0x3e, 0x8d, 0x9e, 0x18, 0x5f, 0x9c, 0x4d, 0x49, 0x5d, 0xfc, 0x08, 0x72, 0x72,
-	0x0c, 0x26, 0xc1, 0x36, 0x32, 0x2c, 0xc7, 0xc2, 0xb6, 0x07, 0x79, 0x35, 0xba, 0x92, 0x72, 0xe5,
-	0xe4, 0x34, 0x34, 0xae, 0x4f, 0x29, 0xad, 0x5c, 0xff, 0x01, 0x32, 0x7c, 0xe6, 0x24, 0x55, 0x61,
-	0x6c, 0xfe, 0x19, 0x2b, 0xd3, 0x88, 0x4a, 0xf3, 0x6b, 0xdf, 0x1d, 0xbd, 0x59, 0xba, 0xf4, 0xe7,
-	0x9b, 0xa5, 0x4b, 0xbf, 0xf4, 0x97, 0xb4, 0xa3, 0xfe, 0x92, 0xf6, 0x47, 0x7f, 0x49, 0xfb, 0xa7,
-	0xbf, 0xa4, 0x3d, 0xbb, 0xf7, 0x76, 0xff, 0x7e, 0xbc, 0x2b, 0x16, 0x4f, 0x53, 0xbb, 0x39, 0x01,
-	0xd8, 0xcd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x55, 0xbf, 0x54, 0xc7, 0x14, 0x00, 0x00,
+	0x14, 0xee, 0xfa, 0xee, 0xe3, 0xa4, 0x4d, 0x96, 0x34, 0x98, 0xa5, 0x8a, 0xd3, 0xe5, 0xc5, 0x04,
+	0xba, 0xa6, 0x2e, 0xaa, 0xaa, 0xb6, 0xaa, 0xc8, 0x8d, 0xc8, 0x82, 0xaa, 0xe9, 0xb6, 0x40, 0x55,
+	0x09, 0x99, 0x8d, 0x77, 0x62, 0x8f, 0x62, 0xef, 0x6c, 0x77, 0xc6, 0x69, 0xcd, 0x0b, 0xfc, 0x84,
+	0xbe, 0xf2, 0x02, 0x7f, 0xa7, 0x8f, 0x3c, 0x22, 0x54, 0x05, 0xea, 0x57, 0x7e, 0x01, 0x6f, 0x68,
+	0x2e, 0xbb, 0xde, 0xd8, 0xf1, 0x25, 0x4d, 0xc3, 0x4b, 0x3b, 0x33, 0x7b, 0xce, 0x99, 0x33, 0xdf,
+	0xb9, 0x7d, 0x0e, 0x6c, 0x34, 0x31, 0x6b, 0x75, 0xf7, 0xac, 0x06, 0xe9, 0x54, 0x1a, 0xc4, 0x63,
+	0x0e, 0xf6, 0x50, 0xe0, 0xc6, 0x97, 0x8e, 0x8f, 0x2b, 0x14, 0x05, 0x87, 0xb8, 0x81, 0x68, 0x85,
+	0x39, 0xf4, 0x80, 0x56, 0x0e, 0xaf, 0xcb, 0x85, 0xe5, 0x07, 0x84, 0x11, 0xfd, 0xca, 0x40, 0xda,
+	0x0a, 0x25, 0x2d, 0x29, 0x70, 0x78, 0xdd, 0xf8, 0xb0, 0x49, 0x48, 0xb3, 0x8d, 0x2a, 0x42, 0x76,
+	0xaf, 0xbb, 0x5f, 0x41, 0x1d, 0x9f, 0xf5, 0xa4, 0xaa, 0xf1, 0xc1, 0xf0, 0x47, 0xc7, 0x0b, 0x3f,
+	0x2d, 0x35, 0x49, 0x93, 0x88, 0x65, 0x85, 0xaf, 0xd4, 0xe9, 0xcd, 0x99, 0xfc, 0x65, 0x3d, 0x1f,
+	0xd1, 0x4a, 0x87, 0x74, 0x3d, 0xa6, 0xf4, 0x6e, 0x9d, 0x46, 0x0f, 0xb1, 0x00, 0x37, 0xd4, 0xeb,
+	0x8c, 0x3b, 0xa7, 0xd0, 0x74, 0x11, 0x6d, 0x04, 0xd8, 0x67, 0x24, 0x50, 0xca, 0xb7, 0x4f, 0xa1,
+	0xcc, 0x11, 0x13, 0xff, 0x28, 0xdd, 0xd2, 0x30, 0x36, 0x0c, 0x77, 0x10, 0x65, 0x4e, 0xc7, 0x97,
+	0x02, 0xe6, 0x3f, 0x09, 0x58, 0xdc, 0x0c, 0x90, 0xc3, 0xd0, 0x63, 0x87, 0x1e, 0xd8, 0xe8, 0x59,
+	0x17, 0x51, 0xa6, 0x57, 0x61, 0x2e, 0x32, 0x5f, 0xc7, 0x6e, 0x51, 0x5b, 0xd5, 0xca, 0xf9, 0x8d,
+	0x4b, 0xfd, 0xa3, 0x52, 0x61, 0x33, 0x3c, 0xaf, 0x6d, 0xd9, 0x85, 0x48, 0xa8, 0xe6, 0xea, 0x15,
+	0xc8, 0x04, 0x84, 0xb0, 0x7d, 0x5a, 0x4c, 0xae, 0x26, 0xcb, 0x85, 0xea, 0xfb, 0x56, 0x2c, 0xa4,
+	0xc2, 0x3b, 0xeb, 0x3e, 0x07, 0xd3, 0x56, 0x62, 0xfa, 0x12, 0xa4, 0x29, 0x73, 0xb1, 0x57, 0x4c,
+	0x71, 0xeb, 0xb6, 0xdc, 0xe8, 0xcb, 0x90, 0xa1, 0xcc, 0x25, 0x5d, 0x56, 0x4c, 0x8b, 0x63, 0xb5,
+	0x53, 0xe7, 0x28, 0x08, 0x8a, 0x99, 0xe8, 0x1c, 0x05, 0x81, 0x6e, 0x40, 0x8e, 0xa1, 0xa0, 0x83,
+	0x3d, 0xa7, 0x5d, 0xcc, 0xae, 0x6a, 0xe5, 0x9c, 0x1d, 0xed, 0xf5, 0xbb, 0x00, 0x8d, 0x16, 0x6a,
+	0x1c, 0xf8, 0x04, 0x7b, 0xac, 0x98, 0x5b, 0xd5, 0xca, 0x85, 0xea, 0x95, 0x51, 0xb7, 0xb6, 0x22,
+	0xc4, 0xed, 0x98, 0xbc, 0x6e, 0x41, 0x96, 0xf8, 0x0c, 0x13, 0x8f, 0x16, 0xf3, 0x42, 0x75, 0xc9,
+	0x92, 0x68, 0x5a, 0x21, 0x9a, 0xd6, 0xba, 0xd7, 0xb3, 0x43, 0x21, 0xfd, 0x2a, 0xcc, 0x05, 0x5d,
+	0x8f, 0x03, 0x5c, 0xf7, 0x1d, 0xd6, 0x2a, 0x82, 0xf0, 0xb3, 0xa0, 0xce, 0x76, 0x1d, 0xd6, 0x32,
+	0x9f, 0x82, 0x1e, 0x07, 0x9b, 0xfa, 0xc4, 0xa3, 0xe8, 0xad, 0xd0, 0x5e, 0x80, 0xa4, 0x8f, 0xdd,
+	0x62, 0x62, 0x55, 0x2b, 0xcf, 0xdb, 0x7c, 0x69, 0x36, 0x61, 0xee, 0x11, 0x73, 0x02, 0x76, 0x96,
+	0x18, 0x7e, 0x04, 0x59, 0xf4, 0x02, 0x35, 0xea, 0xca, 0x72, 0x7e, 0x03, 0xfa, 0x47, 0xa5, 0xcc,
+	0xf6, 0x0b, 0xd4, 0xa8, 0x6d, 0xd9, 0x19, 0xfe, 0xa9, 0xe6, 0x9a, 0x57, 0x61, 0x5e, 0x5d, 0xa4,
+	0xfc, 0x57, 0xbe, 0x68, 0x03, 0x5f, 0x76, 0x60, 0x71, 0x0b, 0xb5, 0xd1, 0x99, 0x93, 0xca, 0xfc,
+	0x55, 0x83, 0x8b, 0xd2, 0x52, 0x74, 0xdb, 0x32, 0x24, 0x22, 0xe5, 0x4c, 0xff, 0xa8, 0x94, 0xa8,
+	0x6d, 0xd9, 0x09, 0x7c, 0x02, 0x22, 0x7a, 0x09, 0x0a, 0xe8, 0x05, 0x66, 0x75, 0xca, 0x1c, 0xd6,
+	0xe5, 0x69, 0xc9, 0xbf, 0x00, 0x3f, 0x7a, 0x24, 0x4e, 0xf4, 0x75, 0xc8, 0xf3, 0x1d, 0x72, 0xeb,
+	0x0e, 0x13, 0x59, 0x58, 0xa8, 0x1a, 0x23, 0x31, 0x7e, 0x1c, 0x56, 0xcc, 0x46, 0xee, 0xd5, 0x51,
+	0xe9, 0xc2, 0xcb, 0xbf, 0x4a, 0x9a, 0x9d, 0x93, 0x6a, 0xeb, 0xcc, 0x24, 0xb0, 0x24, 0xfd, 0xdb,
+	0x0d, 0x48, 0x03, 0x51, 0x7a, 0xee, 0xe8, 0x23, 0x80, 0x1d, 0x74, 0xfe, 0x41, 0xde, 0x86, 0x82,
+	0xb8, 0x46, 0x81, 0x7e, 0x13, 0xb2, 0xbe, 0x7c, 0xa0, 0xb8, 0x62, 0xa8, 0x8c, 0x0e, 0xaf, 0xab,
+	0x4a, 0x0a, 0x41, 0x08, 0x85, 0xcd, 0x35, 0x58, 0xf8, 0x1a, 0x53, 0xc6, 0xd3, 0x20, 0x82, 0x66,
+	0x19, 0x32, 0xfb, 0xb8, 0xcd, 0x50, 0x20, 0xbd, 0xb5, 0xd5, 0x8e, 0x27, 0x4d, 0x4c, 0x36, 0xaa,
+	0x8d, 0xb4, 0x98, 0x02, 0x45, 0x4d, 0x34, 0x95, 0xc9, 0xd7, 0x4a, 0x51, 0xf3, 0xa5, 0x06, 0x85,
+	0xaf, 0x70, 0xbb, 0x7d, 0xde, 0x20, 0x89, 0x9e, 0x84, 0x9b, 0xbc, 0xf3, 0xc8, 0xdc, 0x52, 0x3b,
+	0x9e, 0x8a, 0x4e, 0xbb, 0x2d, 0x32, 0x2a, 0x67, 0xf3, 0xa5, 0xf9, 0xaf, 0x06, 0x3a, 0x57, 0x7e,
+	0x07, 0x59, 0x12, 0xb5, 0xcd, 0xc4, 0xc9, 0x6d, 0x33, 0x39, 0xa6, 0x6d, 0xa6, 0xc6, 0xb6, 0xcd,
+	0xf4, 0x50, 0xdb, 0x2c, 0x43, 0x8a, 0xfa, 0xa8, 0x21, 0x1a, 0xed, 0xb8, 0xae, 0x27, 0x24, 0xe2,
+	0x28, 0x65, 0xc7, 0xa6, 0xd2, 0x65, 0x78, 0xef, 0xd8, 0xd3, 0x65, 0x64, 0xcd, 0x5f, 0x34, 0x58,
+	0xb0, 0x11, 0xc5, 0x3f, 0xa2, 0x5d, 0xd6, 0x3b, 0xf7, 0x50, 0x2d, 0x41, 0xfa, 0x39, 0x76, 0x59,
+	0x4b, 0x45, 0x4a, 0x6e, 0x38, 0x3a, 0x2d, 0x84, 0x9b, 0x2d, 0x59, 0xfd, 0xf3, 0xb6, 0xda, 0x99,
+	0x3f, 0xc1, 0xc5, 0xcd, 0x36, 0xa1, 0xa8, 0xf6, 0xe0, 0xff, 0x70, 0x4c, 0x86, 0x33, 0x29, 0xa2,
+	0x20, 0x37, 0xe6, 0x97, 0xb0, 0xb0, 0xeb, 0x74, 0xe9, 0x99, 0xfb, 0xe7, 0x0e, 0x2c, 0xda, 0x88,
+	0x76, 0x3b, 0x67, 0x36, 0xb4, 0x0d, 0x97, 0x78, 0x71, 0xee, 0x62, 0xf7, 0x2c, 0xc9, 0x6b, 0xda,
+	0xb2, 0x1f, 0x48, 0x33, 0xaa, 0xc4, 0xef, 0x41, 0x5e, 0xb5, 0x0b, 0x14, 0x96, 0xf9, 0xea, 0xa4,
+	0x32, 0xaf, 0x79, 0xfb, 0xc4, 0x1e, 0xa8, 0x98, 0xaf, 0x35, 0xb8, 0xbc, 0x19, 0x8d, 0xed, 0xb3,
+	0xd2, 0x98, 0x3a, 0x2c, 0xfa, 0x4e, 0x80, 0x3c, 0x56, 0x8f, 0x51, 0x07, 0x19, 0xbe, 0x2a, 0xef,
+	0xff, 0x7f, 0x1e, 0x95, 0xd6, 0x62, 0x84, 0x8c, 0xf8, 0xc8, 0x8b, 0xd4, 0x69, 0xa5, 0x49, 0xae,
+	0xb9, 0xb8, 0x89, 0x28, 0xb3, 0xb6, 0xc4, 0x7f, 0xf6, 0x82, 0x34, 0xb6, 0x79, 0x22, 0xad, 0x48,
+	0xce, 0x40, 0x2b, 0xcc, 0x27, 0xb0, 0x3c, 0xfc, 0xba, 0x08, 0xb8, 0xc2, 0x80, 0x2c, 0x9e, 0xd8,
+	0x21, 0x47, 0xf8, 0x4d, 0x5c, 0xc1, 0xfc, 0x2d, 0x01, 0x8b, 0xdf, 0xf8, 0xee, 0x3b, 0xe0, 0x7e,
+	0x55, 0xc8, 0x07, 0x88, 0x92, 0x6e, 0xd0, 0x40, 0x54, 0x80, 0x35, 0xee, 0x55, 0x03, 0x31, 0x7d,
+	0x0f, 0x0a, 0x8e, 0xe7, 0x11, 0xe6, 0x84, 0x58, 0x70, 0xef, 0xbf, 0xb0, 0x26, 0xfd, 0x0e, 0xb0,
+	0x46, 0xbc, 0xb5, 0xd6, 0x07, 0x26, 0xb6, 0x3d, 0x16, 0xf4, 0xec, 0xb8, 0x51, 0xe3, 0x1e, 0x2c,
+	0x0c, 0x0b, 0xf0, 0xe6, 0x7c, 0x80, 0x7a, 0x6a, 0xf6, 0xf0, 0x25, 0x2f, 0xc1, 0x43, 0xa7, 0xdd,
+	0x45, 0x61, 0x47, 0x15, 0x9b, 0xdb, 0x89, 0x5b, 0x9a, 0xb9, 0x06, 0x17, 0xef, 0x4b, 0x22, 0x1f,
+	0xa2, 0x53, 0x84, 0xac, 0x1c, 0x57, 0x12, 0xef, 0xbc, 0x1d, 0x6e, 0x79, 0x85, 0x44, 0xb2, 0xd1,
+	0xf0, 0xca, 0xaa, 0xdf, 0x01, 0x2a, 0x38, 0xc5, 0x13, 0x38, 0xb1, 0x10, 0xb0, 0x43, 0x41, 0x73,
+	0x1f, 0x0a, 0xdf, 0x39, 0xf8, 0xfc, 0x07, 0x7c, 0x00, 0x73, 0xf2, 0x1e, 0xe5, 0xeb, 0x10, 0x59,
+	0xd2, 0x26, 0x93, 0xa5, 0xc4, 0xdb, 0x90, 0xa5, 0xea, 0xeb, 0x39, 0x48, 0x8b, 0xf1, 0xae, 0x1f,
+	0x40, 0x46, 0x12, 0x61, 0xbd, 0x32, 0x39, 0xe2, 0x23, 0xbf, 0x4d, 0x8c, 0xcf, 0x66, 0x57, 0x50,
+	0x4f, 0xfb, 0x01, 0xd2, 0x82, 0xb0, 0xea, 0x6b, 0x93, 0x55, 0xe3, 0xf4, 0xd9, 0xf8, 0x64, 0x26,
+	0x59, 0x75, 0x43, 0x13, 0x32, 0x92, 0x05, 0x4e, 0x7b, 0xce, 0x08, 0x2b, 0x36, 0x3e, 0x9d, 0x45,
+	0x21, 0xba, 0xe8, 0x19, 0xcc, 0x1f, 0xa3, 0x9b, 0x7a, 0x75, 0x16, 0xf5, 0xe3, 0xac, 0xe3, 0x94,
+	0x57, 0x3e, 0x85, 0xe4, 0x0e, 0x62, 0x7a, 0x79, 0xb2, 0xd2, 0x80, 0x93, 0x1a, 0x1f, 0xcf, 0x20,
+	0x19, 0xe1, 0x96, 0xe2, 0xe3, 0x40, 0xb7, 0x26, 0xab, 0x0c, 0x53, 0x48, 0xa3, 0x32, 0xb3, 0xbc,
+	0xba, 0xa8, 0x06, 0x29, 0xce, 0x08, 0xf5, 0x29, 0xbe, 0xc5, 0x58, 0xa3, 0xb1, 0x3c, 0x92, 0xdc,
+	0xdb, 0x1d, 0x9f, 0xf5, 0xf4, 0x5d, 0x48, 0xf1, 0x52, 0xd2, 0xa7, 0xe4, 0xe1, 0x28, 0xdb, 0x1b,
+	0x6b, 0xf1, 0x11, 0xe4, 0x23, 0x22, 0x34, 0x0d, 0x8a, 0x61, 0xc6, 0x34, 0xd6, 0xe8, 0x03, 0xc8,
+	0x2a, 0x0a, 0xa3, 0x4f, 0x89, 0xf7, 0x71, 0xa6, 0x33, 0xc1, 0x60, 0x5a, 0x50, 0x92, 0x69, 0x1e,
+	0x0e, 0xf3, 0x96, 0xb1, 0x06, 0x1f, 0x42, 0x46, 0x72, 0x93, 0x69, 0x45, 0x33, 0xc2, 0x60, 0xc6,
+	0x9a, 0xc4, 0x90, 0x0b, 0xe9, 0x85, 0x7e, 0x6d, 0x7a, 0x8e, 0xc4, 0xd8, 0x8c, 0x61, 0xcd, 0x2a,
+	0xae, 0x32, 0xea, 0x39, 0x40, 0x6c, 0xa8, 0xdf, 0x98, 0x02, 0xf1, 0x49, 0xf4, 0xc4, 0xf8, 0xfc,
+	0x74, 0x4a, 0xea, 0xe2, 0x87, 0x90, 0x91, 0x63, 0x70, 0x1a, 0x6c, 0x23, 0xc3, 0x72, 0x2c, 0x6c,
+	0xfb, 0x90, 0x55, 0xa3, 0x6b, 0x5a, 0xae, 0x1c, 0x9f, 0x86, 0xc6, 0xb5, 0x19, 0xa5, 0x95, 0xeb,
+	0xdf, 0x43, 0x8a, 0xcf, 0x9c, 0x69, 0x55, 0x18, 0x9b, 0x7f, 0xc6, 0xda, 0x2c, 0xa2, 0xd2, 0xfc,
+	0xc6, 0xb7, 0xaf, 0xde, 0xac, 0x5c, 0xf8, 0xe3, 0xcd, 0xca, 0x85, 0x9f, 0xfb, 0x2b, 0xda, 0xab,
+	0xfe, 0x8a, 0xf6, 0x7b, 0x7f, 0x45, 0xfb, 0xbb, 0xbf, 0xa2, 0x3d, 0xbd, 0xfb, 0x76, 0x7f, 0xa1,
+	0xbc, 0x23, 0x16, 0x4f, 0x12, 0x7b, 0x19, 0x01, 0xd8, 0x8d, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff,
+	0xc7, 0x3c, 0xaa, 0x56, 0xea, 0x14, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -1946,6 +1949,13 @@ func (m *CreateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 		i -= len(m.XXX_unrecognized)
 		copy(dAtA[i:], m.XXX_unrecognized)
 	}
+	if len(m.RuntimePath) > 0 {
+		i -= len(m.RuntimePath)
+		copy(dAtA[i:], m.RuntimePath)
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.RuntimePath)))
+		i--
+		dAtA[i] = 0x52
+	}
 	if m.Options != nil {
 		{
 			size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
@@ -3198,6 +3208,10 @@ func (m *CreateTaskRequest) Size() (n int) {
 		l = m.Options.Size()
 		n += 1 + l + sovTasks(uint64(l))
 	}
+	l = len(m.RuntimePath)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
 	if m.XXX_unrecognized != nil {
 		n += len(m.XXX_unrecognized)
 	}
@@ -3747,6 +3761,7 @@ func (this *CreateTaskRequest) String() string {
 		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
 		`Checkpoint:` + strings.Replace(fmt.Sprintf("%v", this.Checkpoint), "Descriptor", "types.Descriptor", 1) + `,`,
 		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`,
+		`RuntimePath:` + fmt.Sprintf("%v", this.RuntimePath) + `,`,
 		`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
 		`}`,
 	}, "")
@@ -4385,6 +4400,38 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RuntimePath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RuntimePath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipTasks(dAtA[iNdEx:])

+ 2 - 0
vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto

@@ -88,6 +88,8 @@ message CreateTaskRequest {
 	containerd.types.Descriptor checkpoint = 8;
 
 	google.protobuf.Any options = 9;
+
+	string runtime_path = 10;
 }
 
 message CreateTaskResponse {

+ 18 - 0
vendor/github.com/containerd/containerd/api/services/version/v1/doc.go

@@ -0,0 +1,18 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+// Package version defines the version service.
+package version

+ 18 - 0
vendor/github.com/containerd/containerd/api/types/task/doc.go

@@ -0,0 +1,18 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+// Package task defines the task service.
+package task

+ 45 - 9
vendor/github.com/containerd/containerd/archive/compression/compression.go

@@ -21,15 +21,16 @@ import (
 	"bytes"
 	"compress/gzip"
 	"context"
+	"encoding/binary"
 	"fmt"
 	"io"
 	"os"
-	"os/exec"
 	"strconv"
 	"sync"
 
 	"github.com/containerd/containerd/log"
 	"github.com/klauspost/compress/zstd"
+	exec "golang.org/x/sys/execabs"
 )
 
 type (
@@ -125,17 +126,52 @@ func (r *bufferedReader) Peek(n int) ([]byte, error) {
 	return r.buf.Peek(n)
 }
 
+const (
+	zstdMagicSkippableStart = 0x184D2A50
+	zstdMagicSkippableMask  = 0xFFFFFFF0
+)
+
+var (
+	gzipMagic = []byte{0x1F, 0x8B, 0x08}
+	zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
+)
+
+type matcher = func([]byte) bool
+
+func magicNumberMatcher(m []byte) matcher {
+	return func(source []byte) bool {
+		return bytes.HasPrefix(source, m)
+	}
+}
+
+// zstdMatcher detects zstd compression algorithm.
+// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames.
+// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details.
+func zstdMatcher() matcher {
+	return func(source []byte) bool {
+		if bytes.HasPrefix(source, zstdMagic) {
+			// Zstandard frame
+			return true
+		}
+		// skippable frame
+		if len(source) < 8 {
+			return false
+		}
+		// magic number from 0x184D2A50 to 0x184D2A5F.
+		if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart {
+			return true
+		}
+		return false
+	}
+}
+
 // DetectCompression detects the compression algorithm of the source.
 func DetectCompression(source []byte) Compression {
-	for compression, m := range map[Compression][]byte{
-		Gzip: {0x1F, 0x8B, 0x08},
-		Zstd: {0x28, 0xb5, 0x2f, 0xfd},
+	for compression, fn := range map[Compression]matcher{
+		Gzip: magicNumberMatcher(gzipMagic),
+		Zstd: zstdMatcher(),
 	} {
-		if len(source) < len(m) {
-			// Len too short
-			continue
-		}
-		if bytes.Equal(m, source[:len(m)]) {
+		if fn(source) {
 			return compression
 		}
 	}

+ 53 - 32
vendor/github.com/containerd/containerd/archive/tar.go

@@ -19,6 +19,8 @@ package archive
 import (
 	"archive/tar"
 	"context"
+	"errors"
+	"fmt"
 	"io"
 	"os"
 	"path/filepath"
@@ -30,7 +32,6 @@ import (
 
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/continuity/fs"
-	"github.com/pkg/errors"
 )
 
 var bufPool = &sync.Pool{
@@ -48,12 +49,15 @@ var errInvalidArchive = errors.New("invalid archive")
 // Produces a tar using OCI style file markers for deletions. Deleted
 // files will be prepended with the prefix ".wh.". This style is
 // based off AUFS whiteouts.
-// See https://github.com/opencontainers/image-spec/blob/master/layer.md
+// See https://github.com/opencontainers/image-spec/blob/main/layer.md
 func Diff(ctx context.Context, a, b string) io.ReadCloser {
 	r, w := io.Pipe()
 
 	go func() {
 		err := WriteDiff(ctx, w, a, b)
+		if err != nil {
+			log.G(ctx).WithError(err).Debugf("write diff failed")
+		}
 		if err = w.CloseWithError(err); err != nil {
 			log.G(ctx).WithError(err).Debugf("closing tar pipe failed")
 		}
@@ -68,12 +72,12 @@ func Diff(ctx context.Context, a, b string) io.ReadCloser {
 // Produces a tar using OCI style file markers for deletions. Deleted
 // files will be prepended with the prefix ".wh.". This style is
 // based off AUFS whiteouts.
-// See https://github.com/opencontainers/image-spec/blob/master/layer.md
+// See https://github.com/opencontainers/image-spec/blob/main/layer.md
 func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffOpt) error {
 	var options WriteDiffOptions
 	for _, opt := range opts {
 		if err := opt(&options); err != nil {
-			return errors.Wrap(err, "failed to apply option")
+			return fmt.Errorf("failed to apply option: %w", err)
 		}
 	}
 	if options.writeDiffFunc == nil {
@@ -89,12 +93,12 @@ func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffO
 // Produces a tar using OCI style file markers for deletions. Deleted
 // files will be prepended with the prefix ".wh.". This style is
 // based off AUFS whiteouts.
-// See https://github.com/opencontainers/image-spec/blob/master/layer.md
+// See https://github.com/opencontainers/image-spec/blob/main/layer.md
 func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, _ WriteDiffOptions) error {
-	cw := newChangeWriter(w, b)
+	cw := NewChangeWriter(w, b)
 	err := fs.Changes(ctx, a, b, cw.HandleChange)
 	if err != nil {
-		return errors.Wrap(err, "failed to create diff tar stream")
+		return fmt.Errorf("failed to create diff tar stream: %w", err)
 	}
 	return cw.Close()
 }
@@ -102,7 +106,7 @@ func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, _ WriteDiffOp
 const (
 	// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
 	// filename this means that file has been removed from the base layer.
-	// See https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts
+	// See https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts
 	whiteoutPrefix = ".wh."
 
 	// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
@@ -118,14 +122,14 @@ const (
 )
 
 // Apply applies a tar stream of an OCI style diff tar.
-// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
+// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets
 func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int64, error) {
 	root = filepath.Clean(root)
 
 	var options ApplyOptions
 	for _, opt := range opts {
 		if err := opt(&options); err != nil {
-			return 0, errors.Wrap(err, "failed to apply option")
+			return 0, fmt.Errorf("failed to apply option: %w", err)
 		}
 	}
 	if options.Filter == nil {
@@ -140,7 +144,7 @@ func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int
 
 // applyNaive applies a tar stream of an OCI style diff tar to a directory
 // applying each file as either a whole file or whiteout.
-// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
+// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets
 func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOptions) (size int64, err error) {
 	var (
 		dirs []*tar.Header
@@ -233,7 +237,7 @@ func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOpti
 		ppath, base := filepath.Split(hdr.Name)
 		ppath, err = fs.RootPath(root, ppath)
 		if err != nil {
-			return 0, errors.Wrap(err, "failed to get root path")
+			return 0, fmt.Errorf("failed to get root path: %w", err)
 		}
 
 		// Join to root before joining to parent path to ensure relative links are
@@ -263,7 +267,7 @@ func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOpti
 		}
 		writeFile, err := convertWhiteout(hdr, path)
 		if err != nil {
-			return 0, errors.Wrapf(err, "failed to convert whiteout file %q", hdr.Name)
+			return 0, fmt.Errorf("failed to convert whiteout file %q: %w", hdr.Name, err)
 		}
 		if !writeFile {
 			continue
@@ -370,7 +374,7 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header
 		return nil
 
 	default:
-		return errors.Errorf("unhandled tar header type %d\n", hdr.Typeflag)
+		return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag)
 	}
 
 	// Lchown is not supported on Windows.
@@ -461,7 +465,17 @@ func mkparent(ctx context.Context, path, root string, parents []string) error {
 	return nil
 }
 
-type changeWriter struct {
+// ChangeWriter provides tar stream from filesystem change information.
+// The privided tar stream is styled as an OCI layer. Change information
+// (add/modify/delete/unmodified) for each file needs to be passed to this
+// writer through HandleChange method.
+//
+// This should be used combining with continuity's diff computing functionality
+// (e.g. `fs.Change` of github.com/containerd/continuity/fs).
+//
+// See also https://github.com/opencontainers/image-spec/blob/main/layer.md for details
+// about OCI layers
+type ChangeWriter struct {
 	tw        *tar.Writer
 	source    string
 	whiteoutT time.Time
@@ -470,8 +484,11 @@ type changeWriter struct {
 	addedDirs map[string]struct{}
 }
 
-func newChangeWriter(w io.Writer, source string) *changeWriter {
-	return &changeWriter{
+// NewChangeWriter returns ChangeWriter that writes tar stream of the source directory
+// to the privided writer. Change information (add/modify/delete/unmodified) for each
+// file needs to be passed through HandleChange method.
+func NewChangeWriter(w io.Writer, source string) *ChangeWriter {
+	return &ChangeWriter{
 		tw:        tar.NewWriter(w),
 		source:    source,
 		whiteoutT: time.Now(),
@@ -481,7 +498,10 @@ func newChangeWriter(w io.Writer, source string) *changeWriter {
 	}
 }
 
-func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error {
+// HandleChange receives filesystem change information and reflect that information to
+// the result tar stream. This function implements `fs.ChangeFunc` of continuity
+// (github.com/containerd/continuity/fs) and should be used with that package.
+func (cw *ChangeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error {
 	if err != nil {
 		return err
 	}
@@ -501,7 +521,7 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
 			return err
 		}
 		if err := cw.tw.WriteHeader(hdr); err != nil {
-			return errors.Wrap(err, "failed to write whiteout header")
+			return fmt.Errorf("failed to write whiteout header: %w", err)
 		}
 	} else {
 		var (
@@ -536,12 +556,12 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
 		if strings.HasPrefix(name, string(filepath.Separator)) {
 			name, err = filepath.Rel(string(filepath.Separator), name)
 			if err != nil {
-				return errors.Wrap(err, "failed to make path relative")
+				return fmt.Errorf("failed to make path relative: %w", err)
 			}
 		}
 		name, err = tarName(name)
 		if err != nil {
-			return errors.Wrap(err, "cannot canonicalize path")
+			return fmt.Errorf("cannot canonicalize path: %w", err)
 		}
 		// suffix with '/' for directories
 		if f.IsDir() && !strings.HasSuffix(name, "/") {
@@ -550,7 +570,7 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
 		hdr.Name = name
 
 		if err := setHeaderForSpecialDevice(hdr, name, f); err != nil {
-			return errors.Wrap(err, "failed to set device headers")
+			return fmt.Errorf("failed to set device headers: %w", err)
 		}
 
 		// additionalLinks stores file names which must be linked to
@@ -578,8 +598,8 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
 		}
 
 		if capability, err := getxattr(source, "security.capability"); err != nil {
-			return errors.Wrap(err, "failed to get capabilities xattr")
-		} else if capability != nil {
+			return fmt.Errorf("failed to get capabilities xattr: %w", err)
+		} else if len(capability) > 0 {
 			if hdr.PAXRecords == nil {
 				hdr.PAXRecords = map[string]string{}
 			}
@@ -590,19 +610,19 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
 			return err
 		}
 		if err := cw.tw.WriteHeader(hdr); err != nil {
-			return errors.Wrap(err, "failed to write file header")
+			return fmt.Errorf("failed to write file header: %w", err)
 		}
 
 		if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
 			file, err := open(source)
 			if err != nil {
-				return errors.Wrapf(err, "failed to open path: %v", source)
+				return fmt.Errorf("failed to open path: %v: %w", source, err)
 			}
 			defer file.Close()
 
 			n, err := copyBuffered(context.TODO(), cw.tw, file)
 			if err != nil {
-				return errors.Wrap(err, "failed to copy")
+				return fmt.Errorf("failed to copy: %w", err)
 			}
 			if n != hdr.Size {
 				return errors.New("short write copying file")
@@ -621,7 +641,7 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
 					return err
 				}
 				if err := cw.tw.WriteHeader(hdr); err != nil {
-					return errors.Wrap(err, "failed to write file header")
+					return fmt.Errorf("failed to write file header: %w", err)
 				}
 			}
 		}
@@ -629,14 +649,15 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
 	return nil
 }
 
-func (cw *changeWriter) Close() error {
+// Close closes this writer.
+func (cw *ChangeWriter) Close() error {
 	if err := cw.tw.Close(); err != nil {
-		return errors.Wrap(err, "failed to close tar writer")
+		return fmt.Errorf("failed to close tar writer: %w", err)
 	}
 	return nil
 }
 
-func (cw *changeWriter) includeParents(hdr *tar.Header) error {
+func (cw *ChangeWriter) includeParents(hdr *tar.Header) error {
 	if cw.addedDirs == nil {
 		return nil
 	}
@@ -744,7 +765,7 @@ func validateWhiteout(path string) error {
 			dir += string(filepath.Separator)
 		}
 		if !strings.HasPrefix(originalPath, dir) {
-			return errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base)
+			return fmt.Errorf("invalid whiteout name: %v: %w", base, errInvalidArchive)
 		}
 	}
 	return nil

+ 0 - 2
vendor/github.com/containerd/containerd/archive/tar_freebsd.go

@@ -1,5 +1,3 @@
-// +build freebsd
-
 /*
    Copyright The containerd Authors.
 

+ 1 - 0
vendor/github.com/containerd/containerd/archive/tar_mostunix.go

@@ -1,3 +1,4 @@
+//go:build !windows && !freebsd
 // +build !windows,!freebsd
 
 /*

+ 0 - 2
vendor/github.com/containerd/containerd/archive/tar_opts_linux.go

@@ -1,5 +1,3 @@
-// +build linux
-
 /*
    Copyright The containerd Authors.
 

+ 2 - 4
vendor/github.com/containerd/containerd/archive/tar_opts_windows.go

@@ -1,5 +1,3 @@
-// +build windows
-
 /*
    Copyright The containerd Authors.
 
@@ -26,7 +24,7 @@ import (
 )
 
 // applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer
-// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
+// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets
 func applyWindowsLayer(ctx context.Context, root string, r io.Reader, options ApplyOptions) (size int64, err error) {
 	return ociwclayer.ImportLayerFromTar(ctx, r, root, options.Parents)
 }
@@ -47,7 +45,7 @@ func AsWindowsContainerLayer() ApplyOpt {
 // Produces a tar using OCI style file markers for deletions. Deleted
 // files will be prepended with the prefix ".wh.". This style is
 // based off AUFS whiteouts.
-// See https://github.com/opencontainers/image-spec/blob/master/layer.md
+// See https://github.com/opencontainers/image-spec/blob/main/layer.md
 func writeDiffWindowsLayers(ctx context.Context, w io.Writer, _, layer string, options WriteDiffOptions) error {
 	return ociwclayer.ExportLayerToTar(ctx, w, layer, options.ParentLayers)
 }

+ 26 - 8
vendor/github.com/containerd/containerd/archive/tar_unix.go

@@ -1,3 +1,4 @@
+//go:build !windows
 // +build !windows
 
 /*
@@ -20,14 +21,16 @@ package archive
 
 import (
 	"archive/tar"
+	"errors"
+	"fmt"
 	"os"
+	"runtime"
 	"strings"
 	"syscall"
 
 	"github.com/containerd/containerd/pkg/userns"
 	"github.com/containerd/continuity/fs"
 	"github.com/containerd/continuity/sysx"
-	"github.com/pkg/errors"
 	"golang.org/x/sys/unix"
 )
 
@@ -40,6 +43,20 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
 }
 
 func setHeaderForSpecialDevice(hdr *tar.Header, name string, fi os.FileInfo) error {
+	// Devmajor and Devminor are only needed for special devices.
+
+	// In FreeBSD, RDev for regular files is -1 (unless overridden by FS):
+	// https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531
+	// (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241).
+
+	// ZFS in particular does not override the default:
+	// https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027
+
+	// Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1).
+	// Such large values cannot be encoded in a tar header.
+	if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar {
+		return nil
+	}
 	s, ok := fi.Sys().(*syscall.Stat_t)
 	if !ok {
 		return errors.New("unsupported stat type")
@@ -69,6 +86,7 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
 	}
 	// Call chmod to avoid permission mask
 	if err := os.Chmod(name, perm); err != nil {
+		f.Close()
 		return nil, err
 	}
 	return f, err
@@ -122,7 +140,7 @@ func getxattr(path, attr string) ([]byte, error) {
 func setxattr(path, key, value string) error {
 	// Do not set trusted attributes
 	if strings.HasPrefix(key, "trusted.") {
-		return errors.Wrap(unix.ENOTSUP, "admin attributes from archive not supported")
+		return fmt.Errorf("admin attributes from archive not supported: %w", unix.ENOTSUP)
 	}
 	return unix.Lsetxattr(path, key, []byte(value), 0)
 }
@@ -142,12 +160,12 @@ func copyDirInfo(fi os.FileInfo, path string) error {
 			}
 		}
 		if err != nil {
-			return errors.Wrapf(err, "failed to chown %s", path)
+			return fmt.Errorf("failed to chown %s: %w", path, err)
 		}
 	}
 
 	if err := os.Chmod(path, fi.Mode()); err != nil {
-		return errors.Wrapf(err, "failed to chmod %s", path)
+		return fmt.Errorf("failed to chmod %s: %w", path, err)
 	}
 
 	timespec := []unix.Timespec{
@@ -155,7 +173,7 @@ func copyDirInfo(fi os.FileInfo, path string) error {
 		unix.NsecToTimespec(syscall.TimespecToNsec(fs.StatMtime(st))),
 	}
 	if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
-		return errors.Wrapf(err, "failed to utime %s", path)
+		return fmt.Errorf("failed to utime %s: %w", path, err)
 	}
 
 	return nil
@@ -167,7 +185,7 @@ func copyUpXAttrs(dst, src string) error {
 		if err == unix.ENOTSUP || err == sysx.ENODATA {
 			return nil
 		}
-		return errors.Wrapf(err, "failed to list xattrs on %s", src)
+		return fmt.Errorf("failed to list xattrs on %s: %w", src, err)
 	}
 	for _, xattr := range xattrKeys {
 		// Do not copy up trusted attributes
@@ -179,10 +197,10 @@ func copyUpXAttrs(dst, src string) error {
 			if err == unix.ENOTSUP || err == sysx.ENODATA {
 				continue
 			}
-			return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
+			return fmt.Errorf("failed to get xattr %q on %s: %w", xattr, src, err)
 		}
 		if err := lsetxattrCreate(dst, xattr, data); err != nil {
-			return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
+			return fmt.Errorf("failed to set xattr %q on %s: %w", xattr, dst, err)
 		}
 	}
 

+ 2 - 4
vendor/github.com/containerd/containerd/archive/tar_windows.go

@@ -1,5 +1,3 @@
-// +build windows
-
 /*
    Copyright The containerd Authors.
 
@@ -20,12 +18,12 @@ package archive
 
 import (
 	"archive/tar"
+	"errors"
 	"fmt"
 	"os"
 	"strings"
 
 	"github.com/containerd/containerd/sys"
-	"github.com/pkg/errors"
 )
 
 // tarName returns platform-specific filepath
@@ -114,7 +112,7 @@ func setxattr(path, key, value string) error {
 
 func copyDirInfo(fi os.FileInfo, path string) error {
 	if err := os.Chmod(path, fi.Mode()); err != nil {
-		return errors.Wrapf(err, "failed to chmod %s", path)
+		return fmt.Errorf("failed to chmod %s: %w", path, err)
 	}
 	return nil
 }

+ 3 - 3
vendor/github.com/containerd/containerd/archive/time_unix.go

@@ -1,3 +1,4 @@
+//go:build !windows
 // +build !windows
 
 /*
@@ -19,11 +20,10 @@
 package archive
 
 import (
+	"fmt"
 	"time"
 
 	"golang.org/x/sys/unix"
-
-	"github.com/pkg/errors"
 )
 
 func chtimes(path string, atime, mtime time.Time) error {
@@ -32,7 +32,7 @@ func chtimes(path string, atime, mtime time.Time) error {
 	utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
 
 	if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
-		return errors.Wrapf(err, "failed call to UtimesNanoAt for %s", path)
+		return fmt.Errorf("failed call to UtimesNanoAt for %s: %w", path, err)
 	}
 
 	return nil

+ 6 - 6
vendor/github.com/containerd/containerd/cio/io_unix.go

@@ -1,3 +1,4 @@
+//go:build !windows
 // +build !windows
 
 /*
@@ -20,15 +21,14 @@ package cio
 
 import (
 	"context"
+	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"sync"
 	"syscall"
 
 	"github.com/containerd/fifo"
-	"github.com/pkg/errors"
 )
 
 // NewFIFOSetInDir returns a new FIFOSet with paths in a temporary directory under root
@@ -38,7 +38,7 @@ func NewFIFOSetInDir(root, id string, terminal bool) (*FIFOSet, error) {
 			return nil, err
 		}
 	}
-	dir, err := ioutil.TempDir(root, "")
+	dir, err := os.MkdirTemp(root, "")
 	if err != nil {
 		return nil, err
 	}
@@ -112,7 +112,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) {
 
 	if fifos.Stdin != "" {
 		if f.Stdin, retErr = fifo.OpenFifo(ctx, fifos.Stdin, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil {
-			return f, errors.Wrapf(retErr, "failed to open stdin fifo")
+			return f, fmt.Errorf("failed to open stdin fifo: %w", retErr)
 		}
 		defer func() {
 			if retErr != nil && f.Stdin != nil {
@@ -122,7 +122,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) {
 	}
 	if fifos.Stdout != "" {
 		if f.Stdout, retErr = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil {
-			return f, errors.Wrapf(retErr, "failed to open stdout fifo")
+			return f, fmt.Errorf("failed to open stdout fifo: %w", retErr)
 		}
 		defer func() {
 			if retErr != nil && f.Stdout != nil {
@@ -132,7 +132,7 @@ func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) {
 	}
 	if !fifos.Terminal && fifos.Stderr != "" {
 		if f.Stderr, retErr = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil {
-			return f, errors.Wrapf(retErr, "failed to open stderr fifo")
+			return f, fmt.Errorf("failed to open stderr fifo: %w", retErr)
 		}
 	}
 	return f, nil

+ 3 - 4
vendor/github.com/containerd/containerd/cio/io_windows.go

@@ -23,7 +23,6 @@ import (
 
 	winio "github.com/Microsoft/go-winio"
 	"github.com/containerd/containerd/log"
-	"github.com/pkg/errors"
 )
 
 const pipeRoot = `\\.\pipe`
@@ -54,7 +53,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) {
 	if fifos.Stdin != "" {
 		l, err := winio.ListenPipe(fifos.Stdin, nil)
 		if err != nil {
-			return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdin)
+			return nil, fmt.Errorf("failed to create stdin pipe %s: %w", fifos.Stdin, err)
 		}
 		cios.closers = append(cios.closers, l)
 
@@ -77,7 +76,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) {
 	if fifos.Stdout != "" {
 		l, err := winio.ListenPipe(fifos.Stdout, nil)
 		if err != nil {
-			return nil, errors.Wrapf(err, "failed to create stdout pipe %s", fifos.Stdout)
+			return nil, fmt.Errorf("failed to create stdout pipe %s: %w", fifos.Stdout, err)
 		}
 		cios.closers = append(cios.closers, l)
 
@@ -100,7 +99,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) {
 	if fifos.Stderr != "" {
 		l, err := winio.ListenPipe(fifos.Stderr, nil)
 		if err != nil {
-			return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Stderr)
+			return nil, fmt.Errorf("failed to create stderr pipe %s: %w", fifos.Stderr, err)
 		}
 		cios.closers = append(cios.closers, l)
 

+ 25 - 26
vendor/github.com/containerd/containerd/client.go

@@ -21,7 +21,6 @@ import (
 	"context"
 	"encoding/json"
 	"fmt"
-	"net/http"
 	"runtime"
 	"strconv"
 	"strings"
@@ -62,10 +61,10 @@ import (
 	ptypes "github.com/gogo/protobuf/types"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
-	"github.com/pkg/errors"
 	"golang.org/x/sync/semaphore"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/backoff"
+	"google.golang.org/grpc/credentials/insecure"
 	"google.golang.org/grpc/health/grpc_health_v1"
 )
 
@@ -119,31 +118,33 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
 		}
 		gopts := []grpc.DialOption{
 			grpc.WithBlock(),
-			grpc.WithInsecure(),
+			grpc.WithTransportCredentials(insecure.NewCredentials()),
 			grpc.FailOnNonTempDialError(true),
 			grpc.WithConnectParams(connParams),
 			grpc.WithContextDialer(dialer.ContextDialer),
-
-			// TODO(stevvooe): We may need to allow configuration of this on the client.
-			grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
-			grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
+			grpc.WithReturnConnectionError(),
 		}
 		if len(copts.dialOptions) > 0 {
 			gopts = copts.dialOptions
 		}
+		gopts = append(gopts, grpc.WithDefaultCallOptions(
+			grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize),
+			grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)))
+		if len(copts.callOptions) > 0 {
+			gopts = append(gopts, grpc.WithDefaultCallOptions(copts.callOptions...))
+		}
 		if copts.defaultns != "" {
 			unary, stream := newNSInterceptors(copts.defaultns)
-			gopts = append(gopts,
-				grpc.WithUnaryInterceptor(unary),
-				grpc.WithStreamInterceptor(stream),
-			)
+			gopts = append(gopts, grpc.WithChainUnaryInterceptor(unary))
+			gopts = append(gopts, grpc.WithChainStreamInterceptor(stream))
 		}
+
 		connector := func() (*grpc.ClientConn, error) {
 			ctx, cancel := context.WithTimeout(context.Background(), copts.timeout)
 			defer cancel()
 			conn, err := grpc.DialContext(ctx, dialer.DialAddress(address), gopts...)
 			if err != nil {
-				return nil, errors.Wrapf(err, "failed to dial %q", address)
+				return nil, fmt.Errorf("failed to dial %q: %w", address, err)
 			}
 			return conn, nil
 		}
@@ -154,7 +155,7 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
 		c.conn, c.connector = conn, connector
 	}
 	if copts.services == nil && c.conn == nil {
-		return nil, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection or services is available")
+		return nil, fmt.Errorf("no grpc connection or services is available: %w", errdefs.ErrUnavailable)
 	}
 
 	// check namespace labels for default runtime
@@ -214,7 +215,7 @@ type Client struct {
 // Reconnect re-establishes the GRPC connection to the containerd daemon
 func (c *Client) Reconnect() error {
 	if c.connector == nil {
-		return errors.Wrap(errdefs.ErrUnavailable, "unable to reconnect to containerd, no connector available")
+		return fmt.Errorf("unable to reconnect to containerd, no connector available: %w", errdefs.ErrUnavailable)
 	}
 	c.connMu.Lock()
 	defer c.connMu.Unlock()
@@ -242,7 +243,7 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) {
 	c.connMu.Lock()
 	if c.conn == nil {
 		c.connMu.Unlock()
-		return false, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
+		return false, fmt.Errorf("no grpc connection available: %w", errdefs.ErrUnavailable)
 	}
 	c.connMu.Unlock()
 	r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true))
@@ -265,8 +266,8 @@ func (c *Client) Containers(ctx context.Context, filters ...string) ([]Container
 	return out, nil
 }
 
-// NewContainer will create a new container in container with the provided id
-// the id must be unique within the namespace
+// NewContainer will create a new container with the provided id.
+// The id must be unique within the namespace.
 func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
 	ctx, done, err := c.WithLease(ctx)
 	if err != nil {
@@ -369,9 +370,7 @@ type RemoteContext struct {
 
 func defaultRemoteContext() *RemoteContext {
 	return &RemoteContext{
-		Resolver: docker.NewResolver(docker.ResolverOptions{
-			Client: http.DefaultClient,
-		}),
+		Resolver: docker.NewResolver(docker.ResolverOptions{}),
 	}
 }
 
@@ -386,7 +385,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag
 	}
 
 	if fetchCtx.Unpack {
-		return images.Image{}, errors.Wrap(errdefs.ErrNotImplemented, "unpack on fetch not supported, try pull")
+		return images.Image{}, fmt.Errorf("unpack on fetch not supported, try pull: %w", errdefs.ErrNotImplemented)
 	}
 
 	if fetchCtx.PlatformMatcher == nil {
@@ -397,7 +396,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag
 			for _, s := range fetchCtx.Platforms {
 				p, err := platforms.Parse(s)
 				if err != nil {
-					return images.Image{}, errors.Wrapf(err, "invalid platform %s", s)
+					return images.Image{}, fmt.Errorf("invalid platform %s: %w", s, err)
 				}
 				ps = append(ps, p)
 			}
@@ -433,7 +432,7 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor,
 			for _, platform := range pushCtx.Platforms {
 				p, err := platforms.Parse(platform)
 				if err != nil {
-					return errors.Wrapf(err, "invalid platform %s", platform)
+					return fmt.Errorf("invalid platform %s: %w", platform, err)
 				}
 				ps = append(ps, p)
 			}
@@ -716,7 +715,7 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
 	c.connMu.Lock()
 	if c.conn == nil {
 		c.connMu.Unlock()
-		return Version{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
+		return Version{}, fmt.Errorf("no grpc connection available: %w", errdefs.ErrUnavailable)
 	}
 	c.connMu.Unlock()
 	response, err := c.VersionService().Version(ctx, &ptypes.Empty{})
@@ -739,7 +738,7 @@ func (c *Client) Server(ctx context.Context) (ServerInfo, error) {
 	c.connMu.Lock()
 	if c.conn == nil {
 		c.connMu.Unlock()
-		return ServerInfo{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
+		return ServerInfo{}, fmt.Errorf("no grpc connection available: %w", errdefs.ErrUnavailable)
 	}
 	c.connMu.Unlock()
 
@@ -777,7 +776,7 @@ func (c *Client) getSnapshotter(ctx context.Context, name string) (snapshots.Sna
 
 	s := c.SnapshotService(name)
 	if s == nil {
-		return nil, errors.Wrapf(errdefs.ErrNotFound, "snapshotter %s was not found", name)
+		return nil, fmt.Errorf("snapshotter %s was not found: %w", name, errdefs.ErrNotFound)
 	}
 
 	return s, nil

+ 9 - 0
vendor/github.com/containerd/containerd/client_opts.go

@@ -34,6 +34,7 @@ type clientOpts struct {
 	defaultPlatform platforms.MatchComparer
 	services        *services
 	dialOptions     []grpc.DialOption
+	callOptions     []grpc.CallOption
 	timeout         time.Duration
 }
 
@@ -75,6 +76,14 @@ func WithDialOpts(opts []grpc.DialOption) ClientOpt {
 	}
 }
 
+// WithCallOpts allows grpc.CallOptions to be set on the connection
+func WithCallOpts(opts []grpc.CallOption) ClientOpt {
+	return func(c *clientOpts) error {
+		c.callOptions = opts
+		return nil
+	}
+}
+
 // WithServices sets services used by the client.
 func WithServices(opts ...ServicesOpt) ClientOpt {
 	return func(c *clientOpts) error {

+ 6 - 6
vendor/github.com/containerd/containerd/container.go

@@ -19,6 +19,7 @@ package containerd
 import (
 	"context"
 	"encoding/json"
+	"fmt"
 	"os"
 	"path/filepath"
 	"strings"
@@ -38,7 +39,6 @@ import (
 	ver "github.com/opencontainers/image-spec/specs-go"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/opencontainers/selinux/go-selinux/label"
-	"github.com/pkg/errors"
 )
 
 const (
@@ -173,7 +173,7 @@ func (c *container) Spec(ctx context.Context) (*oci.Spec, error) {
 // an error is returned if the container has running tasks
 func (c *container) Delete(ctx context.Context, opts ...DeleteOpts) error {
 	if _, err := c.loadTask(ctx, nil); err == nil {
-		return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot delete running task %v", c.id)
+		return fmt.Errorf("cannot delete running task %v: %w", c.id, errdefs.ErrFailedPrecondition)
 	}
 	r, err := c.get(ctx)
 	if err != nil {
@@ -198,11 +198,11 @@ func (c *container) Image(ctx context.Context) (Image, error) {
 		return nil, err
 	}
 	if r.Image == "" {
-		return nil, errors.Wrap(errdefs.ErrNotFound, "container not created from an image")
+		return nil, fmt.Errorf("container not created from an image: %w", errdefs.ErrNotFound)
 	}
 	i, err := c.client.ImageService().Get(ctx, r.Image)
 	if err != nil {
-		return nil, errors.Wrapf(err, "failed to get image %s for container", r.Image)
+		return nil, fmt.Errorf("failed to get image %s for container: %w", r.Image, err)
 	}
 	return NewImage(c.client, i), nil
 }
@@ -232,7 +232,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N
 	}
 	if r.SnapshotKey != "" {
 		if r.Snapshotter == "" {
-			return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "unable to resolve rootfs mounts without snapshotter on container")
+			return nil, fmt.Errorf("unable to resolve rootfs mounts without snapshotter on container: %w", errdefs.ErrInvalidArgument)
 		}
 
 		// get the rootfs from the snapshotter and add it to the request
@@ -391,7 +391,7 @@ func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, er
 	if err != nil {
 		err = errdefs.FromGRPC(err)
 		if errdefs.IsNotFound(err) {
-			return nil, errors.Wrapf(err, "no running task found")
+			return nil, fmt.Errorf("no running task found: %w", err)
 		}
 		return nil, err
 	}

+ 1 - 1
vendor/github.com/containerd/containerd/container_checkpoint_opts.go

@@ -19,6 +19,7 @@ package containerd
 import (
 	"bytes"
 	"context"
+	"errors"
 	"fmt"
 	"runtime"
 
@@ -31,7 +32,6 @@ import (
 	"github.com/containerd/containerd/runtime/v2/runc/options"
 	"github.com/containerd/typeurl"
 	imagespec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 )
 
 var (

+ 5 - 5
vendor/github.com/containerd/containerd/container_opts.go

@@ -19,6 +19,7 @@ package containerd
 import (
 	"context"
 	"encoding/json"
+	"errors"
 	"fmt"
 
 	"github.com/containerd/containerd/containers"
@@ -31,7 +32,6 @@ import (
 	"github.com/gogo/protobuf/types"
 	"github.com/opencontainers/image-spec/identity"
 	v1 "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 )
 
 // DeleteOpts allows the caller to set options for the deletion of a container
@@ -227,7 +227,7 @@ func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts
 func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error {
 	if c.SnapshotKey != "" {
 		if c.Snapshotter == "" {
-			return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot")
+			return fmt.Errorf("container.Snapshotter must be set to cleanup rootfs snapshot: %w", errdefs.ErrInvalidArgument)
 		}
 		s, err := client.getSnapshotter(ctx, c.Snapshotter)
 		if err != nil {
@@ -276,15 +276,15 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer
 func WithContainerExtension(name string, extension interface{}) NewContainerOpts {
 	return func(ctx context.Context, client *Client, c *containers.Container) error {
 		if name == "" {
-			return errors.Wrapf(errdefs.ErrInvalidArgument, "extension key must not be zero-length")
+			return fmt.Errorf("extension key must not be zero-length: %w", errdefs.ErrInvalidArgument)
 		}
 
 		any, err := typeurl.MarshalAny(extension)
 		if err != nil {
 			if errors.Is(err, typeurl.ErrNotFound) {
-				return errors.Wrapf(err, "extension %q is not registered with the typeurl package, see `typeurl.Register`", name)
+				return fmt.Errorf("extension %q is not registered with the typeurl package, see `typeurl.Register`: %w", name, err)
 			}
-			return errors.Wrap(err, "error marshalling extension")
+			return fmt.Errorf("error marshalling extension: %w", err)
 		}
 
 		if c.Extensions == nil {

+ 1 - 0
vendor/github.com/containerd/containerd/container_opts_unix.go

@@ -1,3 +1,4 @@
+//go:build !windows
 // +build !windows
 
 /*

+ 5 - 4
vendor/github.com/containerd/containerd/container_restore_opts.go

@@ -18,6 +18,8 @@ package containerd
 
 import (
 	"context"
+	"errors"
+	"fmt"
 
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/content"
@@ -26,7 +28,6 @@ import (
 	ptypes "github.com/gogo/protobuf/types"
 	"github.com/opencontainers/image-spec/identity"
 	imagespec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 )
 
 var (
@@ -46,7 +47,7 @@ func WithRestoreImage(ctx context.Context, id string, client *Client, checkpoint
 	return func(ctx context.Context, client *Client, c *containers.Container) error {
 		name, ok := index.Annotations[checkpointImageNameLabel]
 		if !ok || name == "" {
-			return ErrRuntimeNameNotFoundInIndex
+			return ErrImageNameNotFoundInIndex
 		}
 		snapshotter, ok := index.Annotations[checkpointSnapshotterNameLabel]
 		if !ok || name == "" {
@@ -92,7 +93,7 @@ func WithRestoreRuntime(ctx context.Context, id string, client *Client, checkpoi
 			store := client.ContentStore()
 			data, err := content.ReadBlob(ctx, store, *m)
 			if err != nil {
-				return errors.Wrap(err, "unable to read checkpoint runtime")
+				return fmt.Errorf("unable to read checkpoint runtime: %w", err)
 			}
 			if err := proto.Unmarshal(data, &options); err != nil {
 				return err
@@ -117,7 +118,7 @@ func WithRestoreSpec(ctx context.Context, id string, client *Client, checkpoint
 		store := client.ContentStore()
 		data, err := content.ReadBlob(ctx, store, *m)
 		if err != nil {
-			return errors.Wrap(err, "unable to read checkpoint config")
+			return fmt.Errorf("unable to read checkpoint config: %w", err)
 		}
 		var any ptypes.Any
 		if err := proto.Unmarshal(data, &any); err != nil {

+ 19 - 16
vendor/github.com/containerd/containerd/content/helpers.go

@@ -18,8 +18,9 @@ package content
 
 import (
 	"context"
+	"errors"
+	"fmt"
 	"io"
-	"io/ioutil"
 	"math/rand"
 	"sync"
 	"time"
@@ -27,7 +28,6 @@ import (
 	"github.com/containerd/containerd/errdefs"
 	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 )
 
 var bufPool = sync.Pool{
@@ -77,7 +77,7 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc o
 	cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc))
 	if err != nil {
 		if !errdefs.IsAlreadyExists(err) {
-			return errors.Wrap(err, "failed to open writer")
+			return fmt.Errorf("failed to open writer: %w", err)
 		}
 
 		return nil // all ready present
@@ -134,28 +134,28 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er
 func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
 	ws, err := cw.Status()
 	if err != nil {
-		return errors.Wrap(err, "failed to get status")
+		return fmt.Errorf("failed to get status: %w", err)
 	}
 
 	if ws.Offset > 0 {
 		r, err = seekReader(r, ws.Offset, size)
 		if err != nil {
-			return errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
+			return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
 		}
 	}
 
 	copied, err := copyWithBuffer(cw, r)
 	if err != nil {
-		return errors.Wrap(err, "failed to copy")
+		return fmt.Errorf("failed to copy: %w", err)
 	}
 	if size != 0 && copied < size-ws.Offset {
 		// Short writes would return its own error, this indicates a read failure
-		return errors.Wrapf(io.ErrUnexpectedEOF, "failed to read expected number of bytes")
+		return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
 	}
 
 	if err := cw.Commit(ctx, size, expected, opts...); err != nil {
 		if !errdefs.IsAlreadyExists(err) {
-			return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
+			return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
 		}
 	}
 
@@ -172,11 +172,11 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
 
 	copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n))
 	if err != nil {
-		return errors.Wrap(err, "failed to copy")
+		return fmt.Errorf("failed to copy: %w", err)
 	}
 	if copied < n {
 		// Short writes would return its own error, this indicates a read failure
-		return errors.Wrap(io.ErrUnexpectedEOF, "failed to read expected number of bytes")
+		return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
 	}
 	return nil
 }
@@ -190,13 +190,13 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
 func CopyReader(cw Writer, r io.Reader) (int64, error) {
 	ws, err := cw.Status()
 	if err != nil {
-		return 0, errors.Wrap(err, "failed to get status")
+		return 0, fmt.Errorf("failed to get status: %w", err)
 	}
 
 	if ws.Offset > 0 {
 		r, err = seekReader(r, ws.Offset, 0)
 		if err != nil {
-			return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
+			return 0, fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
 		}
 	}
 
@@ -212,7 +212,10 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
 	if ok {
 		nn, err := seeker.Seek(offset, io.SeekStart)
 		if nn != offset {
-			return nil, errors.Wrapf(err, "failed to seek to offset %v", offset)
+			if err == nil {
+				err = fmt.Errorf("unexpected seek location without seek error")
+			}
+			return nil, fmt.Errorf("failed to seek to offset %v: %w", offset, err)
 		}
 
 		if err != nil {
@@ -230,12 +233,12 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
 	}
 
 	// well then, let's just discard up to the offset
-	n, err := copyWithBuffer(ioutil.Discard, io.LimitReader(r, offset))
+	n, err := copyWithBuffer(io.Discard, io.LimitReader(r, offset))
 	if err != nil {
-		return nil, errors.Wrap(err, "failed to discard to offset")
+		return nil, fmt.Errorf("failed to discard to offset: %w", err)
 	}
 	if n != offset {
-		return nil, errors.Errorf("unable to discard to offset")
+		return nil, errors.New("unable to discard to offset")
 	}
 
 	return r, nil

+ 8 - 2
vendor/github.com/containerd/containerd/content/local/locks.go

@@ -17,11 +17,11 @@
 package local
 
 import (
+	"fmt"
 	"sync"
 	"time"
 
 	"github.com/containerd/containerd/errdefs"
-	"github.com/pkg/errors"
 )
 
 // Handles locking references
@@ -41,7 +41,13 @@ func tryLock(ref string) error {
 	defer locksMu.Unlock()
 
 	if v, ok := locks[ref]; ok {
-		return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked since %s", ref, v.since)
+		// Returning the duration may help developers distinguish dead locks (long duration) from
+		// lock contentions (short duration).
+		now := time.Now()
+		return fmt.Errorf(
+			"ref %s locked for %s (since %s): %w", ref, now.Sub(v.since), v.since,
+			errdefs.ErrUnavailable,
+		)
 	}
 
 	locks[ref] = &lock{time.Now()}

+ 3 - 4
vendor/github.com/containerd/containerd/content/local/readerat.go

@@ -17,10 +17,9 @@
 package local
 
 import (
+	"fmt"
 	"os"
 
-	"github.com/pkg/errors"
-
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
 )
@@ -40,7 +39,7 @@ func OpenReader(p string) (content.ReaderAt, error) {
 			return nil, err
 		}
 
-		return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found")
+		return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound)
 	}
 
 	fp, err := os.Open(p)
@@ -49,7 +48,7 @@ func OpenReader(p string) (content.ReaderAt, error) {
 			return nil, err
 		}
 
-		return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found")
+		return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound)
 	}
 
 	return sizeReaderAt{size: fi.Size(), fp: fp}, nil

+ 40 - 37
vendor/github.com/containerd/containerd/content/local/store.go

@@ -20,7 +20,6 @@ import (
 	"context"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"math/rand"
 	"os"
 	"path/filepath"
@@ -37,7 +36,6 @@ import (
 
 	digest "github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 )
 
 var bufPool = sync.Pool{
@@ -94,13 +92,13 @@ func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
 func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
 	p, err := s.blobPath(dgst)
 	if err != nil {
-		return content.Info{}, errors.Wrapf(err, "calculating blob info path")
+		return content.Info{}, fmt.Errorf("calculating blob info path: %w", err)
 	}
 
 	fi, err := os.Stat(p)
 	if err != nil {
 		if os.IsNotExist(err) {
-			err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
+			err = fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound)
 		}
 
 		return content.Info{}, err
@@ -129,12 +127,12 @@ func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]strin
 func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
 	p, err := s.blobPath(desc.Digest)
 	if err != nil {
-		return nil, errors.Wrapf(err, "calculating blob path for ReaderAt")
+		return nil, fmt.Errorf("calculating blob path for ReaderAt: %w", err)
 	}
 
 	reader, err := OpenReader(p)
 	if err != nil {
-		return nil, errors.Wrapf(err, "blob %s expected at %s", desc.Digest, p)
+		return nil, fmt.Errorf("blob %s expected at %s: %w", desc.Digest, p, err)
 	}
 
 	return reader, nil
@@ -147,7 +145,7 @@ func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.
 func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
 	bp, err := s.blobPath(dgst)
 	if err != nil {
-		return errors.Wrapf(err, "calculating blob path for delete")
+		return fmt.Errorf("calculating blob path for delete: %w", err)
 	}
 
 	if err := os.RemoveAll(bp); err != nil {
@@ -155,7 +153,7 @@ func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
 			return err
 		}
 
-		return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
+		return fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound)
 	}
 
 	return nil
@@ -163,18 +161,18 @@ func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
 
 func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
 	if s.ls == nil {
-		return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store")
+		return content.Info{}, fmt.Errorf("update not supported on immutable content store: %w", errdefs.ErrFailedPrecondition)
 	}
 
 	p, err := s.blobPath(info.Digest)
 	if err != nil {
-		return content.Info{}, errors.Wrapf(err, "calculating blob path for update")
+		return content.Info{}, fmt.Errorf("calculating blob path for update: %w", err)
 	}
 
 	fi, err := os.Stat(p)
 	if err != nil {
 		if os.IsNotExist(err) {
-			err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest)
+			err = fmt.Errorf("content %v: %w", info.Digest, errdefs.ErrNotFound)
 		}
 
 		return content.Info{}, err
@@ -201,7 +199,7 @@ func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...str
 				all = true
 				labels = info.Labels
 			default:
-				return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest)
+				return content.Info{}, fmt.Errorf("cannot update %q field on content info %q: %w", path, info.Digest, errdefs.ErrInvalidArgument)
 			}
 		}
 	} else {
@@ -378,7 +376,7 @@ func (s *store) status(ingestPath string) (content.Status, error) {
 	fi, err := os.Stat(dp)
 	if err != nil {
 		if os.IsNotExist(err) {
-			err = errors.Wrap(errdefs.ErrNotFound, err.Error())
+			err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound)
 		}
 		return content.Status{}, err
 	}
@@ -386,19 +384,19 @@ func (s *store) status(ingestPath string) (content.Status, error) {
 	ref, err := readFileString(filepath.Join(ingestPath, "ref"))
 	if err != nil {
 		if os.IsNotExist(err) {
-			err = errors.Wrap(errdefs.ErrNotFound, err.Error())
+			err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound)
 		}
 		return content.Status{}, err
 	}
 
 	startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat"))
 	if err != nil {
-		return content.Status{}, errors.Wrapf(err, "could not read startedat")
+		return content.Status{}, fmt.Errorf("could not read startedat: %w", err)
 	}
 
 	updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat"))
 	if err != nil {
-		return content.Status{}, errors.Wrapf(err, "could not read updatedat")
+		return content.Status{}, fmt.Errorf("could not read updatedat: %w", err)
 	}
 
 	// because we don't write updatedat on every write, the mod time may
@@ -461,7 +459,7 @@ func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.
 	// TODO(AkihiroSuda): we could create a random string or one calculated based on the context
 	// https://github.com/containerd/containerd/issues/2129#issuecomment-380255019
 	if wOpts.Ref == "" {
-		return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
+		return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument)
 	}
 	var lockErr error
 	for count := uint64(0); count < 10; count++ {
@@ -495,16 +493,16 @@ func (s *store) resumeStatus(ref string, total int64, digester digest.Digester)
 	path, _, data := s.ingestPaths(ref)
 	status, err := s.status(path)
 	if err != nil {
-		return status, errors.Wrap(err, "failed reading status of resume write")
+		return status, fmt.Errorf("failed reading status of resume write: %w", err)
 	}
 	if ref != status.Ref {
 		// NOTE(stevvooe): This is fairly catastrophic. Either we have some
 		// layout corruption or a hash collision for the ref key.
-		return status, errors.Errorf("ref key does not match: %v != %v", ref, status.Ref)
+		return status, fmt.Errorf("ref key does not match: %v != %v", ref, status.Ref)
 	}
 
 	if total > 0 && status.Total > 0 && total != status.Total {
-		return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
+		return status, fmt.Errorf("provided total differs from status: %v != %v", total, status.Total)
 	}
 
 	// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
@@ -528,10 +526,10 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
 	if expected != "" {
 		p, err := s.blobPath(expected)
 		if err != nil {
-			return nil, errors.Wrap(err, "calculating expected blob path for writer")
+			return nil, fmt.Errorf("calculating expected blob path for writer: %w", err)
 		}
 		if _, err := os.Stat(p); err == nil {
-			return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected)
+			return nil, fmt.Errorf("content %v: %w", expected, errdefs.ErrAlreadyExists)
 		}
 	}
 
@@ -568,7 +566,7 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
 
 		// the ingest is new, we need to setup the target location.
 		// write the ref to a file for later use
-		if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil {
+		if err := os.WriteFile(refp, []byte(ref), 0666); err != nil {
 			return nil, err
 		}
 
@@ -581,7 +579,7 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
 		}
 
 		if total > 0 {
-			if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
+			if err := os.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
 				return nil, err
 			}
 		}
@@ -589,11 +587,12 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
 
 	fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
 	if err != nil {
-		return nil, errors.Wrap(err, "failed to open data file")
+		return nil, fmt.Errorf("failed to open data file: %w", err)
 	}
 
 	if _, err := fp.Seek(offset, io.SeekStart); err != nil {
-		return nil, errors.Wrap(err, "could not seek to current write offset")
+		fp.Close()
+		return nil, fmt.Errorf("could not seek to current write offset: %w", err)
 	}
 
 	return &writer{
@@ -615,7 +614,7 @@ func (s *store) Abort(ctx context.Context, ref string) error {
 	root := s.ingestRoot(ref)
 	if err := os.RemoveAll(root); err != nil {
 		if os.IsNotExist(err) {
-			return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref)
+			return fmt.Errorf("ingest ref %q: %w", ref, errdefs.ErrNotFound)
 		}
 
 		return err
@@ -626,7 +625,7 @@ func (s *store) Abort(ctx context.Context, ref string) error {
 
 func (s *store) blobPath(dgst digest.Digest) (string, error) {
 	if err := dgst.Validate(); err != nil {
-		return "", errors.Wrapf(errdefs.ErrInvalidArgument, "cannot calculate blob path from invalid digest: %v", err)
+		return "", fmt.Errorf("cannot calculate blob path from invalid digest: %v: %w", err, errdefs.ErrInvalidArgument)
 	}
 
 	return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil
@@ -656,23 +655,23 @@ func (s *store) ingestPaths(ref string) (string, string, string) {
 }
 
 func readFileString(path string) (string, error) {
-	p, err := ioutil.ReadFile(path)
+	p, err := os.ReadFile(path)
 	return string(p), err
 }
 
 // readFileTimestamp reads a file with just a timestamp present.
 func readFileTimestamp(p string) (time.Time, error) {
-	b, err := ioutil.ReadFile(p)
+	b, err := os.ReadFile(p)
 	if err != nil {
 		if os.IsNotExist(err) {
-			err = errors.Wrap(errdefs.ErrNotFound, err.Error())
+			err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound)
 		}
 		return time.Time{}, err
 	}
 
 	var t time.Time
 	if err := t.UnmarshalText(b); err != nil {
-		return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p)
+		return time.Time{}, fmt.Errorf("could not parse timestamp file %v: %w", p, err)
 	}
 
 	return t, nil
@@ -683,19 +682,23 @@ func writeTimestampFile(p string, t time.Time) error {
 	if err != nil {
 		return err
 	}
-	return atomicWrite(p, b, 0666)
+	return writeToCompletion(p, b, 0666)
 }
 
-func atomicWrite(path string, data []byte, mode os.FileMode) error {
+func writeToCompletion(path string, data []byte, mode os.FileMode) error {
 	tmp := fmt.Sprintf("%s.tmp", path)
 	f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode)
 	if err != nil {
-		return errors.Wrap(err, "create tmp file")
+		return fmt.Errorf("create tmp file: %w", err)
 	}
 	_, err = f.Write(data)
 	f.Close()
 	if err != nil {
-		return errors.Wrap(err, "write atomic data")
+		return fmt.Errorf("write tmp file: %w", err)
 	}
-	return os.Rename(tmp, path)
+	err = os.Rename(tmp, path)
+	if err != nil {
+		return fmt.Errorf("rename tmp file: %w", err)
+	}
+	return nil
 }

+ 2 - 1
vendor/github.com/containerd/containerd/content/local/store_bsd.go

@@ -1,3 +1,4 @@
+//go:build darwin || freebsd || netbsd
 // +build darwin freebsd netbsd
 
 /*
@@ -26,7 +27,7 @@ import (
 
 func getATime(fi os.FileInfo) time.Time {
 	if st, ok := fi.Sys().(*syscall.Stat_t); ok {
-		return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well.
+		return time.Unix(st.Atimespec.Unix())
 	}
 
 	return fi.ModTime()

+ 2 - 1
vendor/github.com/containerd/containerd/content/local/store_openbsd.go

@@ -1,3 +1,4 @@
+//go:build openbsd
 // +build openbsd
 
 /*
@@ -26,7 +27,7 @@ import (
 
 func getATime(fi os.FileInfo) time.Time {
 	if st, ok := fi.Sys().(*syscall.Stat_t); ok {
-		return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well.
+		return time.Unix(st.Atim.Unix())
 	}
 
 	return fi.ModTime()

+ 2 - 1
vendor/github.com/containerd/containerd/content/local/store_unix.go

@@ -1,3 +1,4 @@
+//go:build linux || solaris
 // +build linux solaris
 
 /*
@@ -26,7 +27,7 @@ import (
 
 func getATime(fi os.FileInfo) time.Time {
 	if st, ok := fi.Sys().(*syscall.Stat_t); ok {
-		return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well.
+		return time.Unix(st.Atim.Unix())
 	}
 
 	return fi.ModTime()

+ 14 - 13
vendor/github.com/containerd/containerd/content/local/writer.go

@@ -18,6 +18,8 @@ package local
 
 import (
 	"context"
+	"errors"
+	"fmt"
 	"io"
 	"os"
 	"path/filepath"
@@ -28,7 +30,6 @@ import (
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/log"
 	"github.com/opencontainers/go-digest"
-	"github.com/pkg/errors"
 )
 
 // writer represents a write transaction against the blob store.
@@ -88,30 +89,30 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
 	w.fp = nil
 
 	if fp == nil {
-		return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
+		return fmt.Errorf("cannot commit on closed writer: %w", errdefs.ErrFailedPrecondition)
 	}
 
 	if err := fp.Sync(); err != nil {
 		fp.Close()
-		return errors.Wrap(err, "sync failed")
+		return fmt.Errorf("sync failed: %w", err)
 	}
 
 	fi, err := fp.Stat()
 	closeErr := fp.Close()
 	if err != nil {
-		return errors.Wrap(err, "stat on ingest file failed")
+		return fmt.Errorf("stat on ingest file failed: %w", err)
 	}
 	if closeErr != nil {
-		return errors.Wrap(err, "failed to close ingest file")
+		return fmt.Errorf("failed to close ingest file: %w", closeErr)
 	}
 
 	if size > 0 && size != fi.Size() {
-		return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size)
+		return fmt.Errorf("unexpected commit size %d, expected %d: %w", fi.Size(), size, errdefs.ErrFailedPrecondition)
 	}
 
 	dgst := w.digester.Digest()
 	if expected != "" && expected != dgst {
-		return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
+		return fmt.Errorf("unexpected commit digest %s, expected %s: %w", dgst, expected, errdefs.ErrFailedPrecondition)
 	}
 
 	var (
@@ -127,9 +128,9 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
 	if _, err := os.Stat(target); err == nil {
 		// collision with the target file!
 		if err := os.RemoveAll(w.path); err != nil {
-			log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory")
+			log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory")
 		}
-		return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst)
+		return fmt.Errorf("content %v: %w", dgst, errdefs.ErrAlreadyExists)
 	}
 
 	if err := os.Rename(ingest, target); err != nil {
@@ -142,17 +143,17 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
 
 	commitTime := time.Now()
 	if err := os.Chtimes(target, commitTime, commitTime); err != nil {
-		log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time")
+		log.G(ctx).WithField("digest", dgst).Error("failed to change file time to commit time")
 	}
 
 	// clean up!!
 	if err := os.RemoveAll(w.path); err != nil {
-		log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory")
+		log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory")
 	}
 
 	if w.s.ls != nil && base.Labels != nil {
 		if err := w.s.ls.Set(dgst, base.Labels); err != nil {
-			log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels")
+			log.G(ctx).WithField("digest", dgst).Error("failed to set labels")
 		}
 	}
 
@@ -165,7 +166,7 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
 	// NOTE: Windows does not support this operation
 	if runtime.GOOS != "windows" {
 		if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil {
-			log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly")
+			log.G(ctx).WithField("ref", w.ref).Error("failed to make readonly")
 		}
 	}
 

+ 6 - 6
vendor/github.com/containerd/containerd/content/proxy/content_writer.go

@@ -18,13 +18,13 @@ package proxy
 
 import (
 	"context"
+	"fmt"
 	"io"
 
 	contentapi "github.com/containerd/containerd/api/services/content/v1"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
 	digest "github.com/opencontainers/go-digest"
-	"github.com/pkg/errors"
 )
 
 type remoteWriter struct {
@@ -57,7 +57,7 @@ func (rw *remoteWriter) Status() (content.Status, error) {
 		Action: contentapi.WriteActionStat,
 	})
 	if err != nil {
-		return content.Status{}, errors.Wrap(errdefs.FromGRPC(err), "error getting writer status")
+		return content.Status{}, fmt.Errorf("error getting writer status: %w", errdefs.FromGRPC(err))
 	}
 
 	return content.Status{
@@ -82,7 +82,7 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) {
 		Data:   p,
 	})
 	if err != nil {
-		return 0, errors.Wrap(errdefs.FromGRPC(err), "failed to send write")
+		return 0, fmt.Errorf("failed to send write: %w", errdefs.FromGRPC(err))
 	}
 
 	n = int(resp.Offset - offset)
@@ -119,15 +119,15 @@ func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.
 		Labels:   base.Labels,
 	})
 	if err != nil {
-		return errors.Wrap(errdefs.FromGRPC(err), "commit failed")
+		return fmt.Errorf("commit failed: %w", errdefs.FromGRPC(err))
 	}
 
 	if size != 0 && resp.Offset != size {
-		return errors.Errorf("unexpected size: %v != %v", resp.Offset, size)
+		return fmt.Errorf("unexpected size: %v != %v", resp.Offset, size)
 	}
 
 	if expected != "" && resp.Digest != expected {
-		return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
+		return fmt.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
 	}
 
 	rw.digest = resp.Digest

+ 11 - 1
vendor/github.com/containerd/containerd/contrib/nvidia/nvidia.go

@@ -20,13 +20,13 @@ import (
 	"context"
 	"fmt"
 	"os"
-	"os/exec"
 	"strconv"
 	"strings"
 
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/oci"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
+	exec "golang.org/x/sys/execabs"
 )
 
 // NvidiaCLI is the path to the Nvidia helper binary
@@ -111,6 +111,7 @@ type config struct {
 	LDConfig     string
 	Requirements []string
 	OCIHookPath  string
+	NoCgroups    bool
 }
 
 func (c *config) args() []string {
@@ -137,6 +138,9 @@ func (c *config) args() []string {
 	for _, r := range c.Requirements {
 		args = append(args, fmt.Sprintf("--require=%s", r))
 	}
+	if c.NoCgroups {
+		args = append(args, "--no-cgroups")
+	}
 	args = append(args, "--pid={{pid}}", "{{rootfs}}")
 	return args
 }
@@ -209,3 +213,9 @@ func WithLookupOCIHookPath(name string) Opts {
 		return nil
 	}
 }
+
+// WithNoCgroups passes --no-cgroups option to nvidia-container-cli.
+func WithNoCgroups(c *config) error {
+	c.NoCgroups = true
+	return nil
+}

+ 37 - 0
vendor/github.com/containerd/containerd/defaults/defaults_darwin.go

@@ -0,0 +1,37 @@
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package defaults
+
+const (
+	// DefaultRootDir is the default location used by containerd to store
+	// persistent data
+	DefaultRootDir = "/var/lib/containerd"
+	// DefaultStateDir is the default location used by containerd to store
+	// transient data
+	DefaultStateDir = "/var/run/containerd"
+	// DefaultAddress is the default unix socket address
+	DefaultAddress = "/var/run/containerd/containerd.sock"
+	// DefaultDebugAddress is the default unix socket address for pprof data
+	DefaultDebugAddress = "/var/run/containerd/debug.sock"
+	// DefaultFIFODir is the default location used by client-side cio library
+	// to store FIFOs.
+	DefaultFIFODir = "/var/run/containerd/fifo"
+	// DefaultRuntime would be a multiple of choices, thus empty
+	DefaultRuntime = ""
+	// DefaultConfigDir is the default location for config files.
+	DefaultConfigDir = "/etc/containerd"
+)

+ 2 - 1
vendor/github.com/containerd/containerd/defaults/defaults_unix.go

@@ -1,4 +1,5 @@
-// +build !windows
+//go:build !windows && !darwin
+// +build !windows,!darwin
 
 /*
    Copyright The containerd Authors.

+ 0 - 2
vendor/github.com/containerd/containerd/defaults/defaults_windows.go

@@ -1,5 +1,3 @@
-// +build windows
-
 /*
    Copyright The containerd Authors.
 

+ 15 - 0
vendor/github.com/containerd/containerd/diff/diff.go

@@ -18,6 +18,7 @@ package diff
 
 import (
 	"context"
+	"io"
 
 	"github.com/containerd/containerd/mount"
 	"github.com/gogo/protobuf/types"
@@ -37,6 +38,12 @@ type Config struct {
 
 	// Labels are the labels to apply to the generated content
 	Labels map[string]string
+
+	// Compressor is a function to compress the diff stream
+	// instead of the default gzip compressor. Differ passes
+	// the MediaType of the target diff content to the compressor.
+	// When using this config, MediaType must be specified as well.
+	Compressor func(dest io.Writer, mediaType string) (io.WriteCloser, error)
 }
 
 // Opt is used to configure a diff operation
@@ -71,6 +78,14 @@ type Applier interface {
 	Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount, opts ...ApplyOpt) (ocispec.Descriptor, error)
 }
 
+// WithCompressor sets the function to be used to compress the diff stream.
+func WithCompressor(f func(dest io.Writer, mediaType string) (io.WriteCloser, error)) Opt {
+	return func(c *Config) error {
+		c.Compressor = f
+		return nil
+	}
+}
+
 // WithMediaType sets the media type to use for creating the diff, without
 // specifying the differ will choose a default.
 func WithMediaType(m string) Opt {

+ 1 - 1
vendor/github.com/containerd/containerd/diff/stream.go

@@ -18,6 +18,7 @@ package diff
 
 import (
 	"context"
+	"errors"
 	"io"
 	"os"
 
@@ -25,7 +26,6 @@ import (
 	"github.com/containerd/containerd/images"
 	"github.com/gogo/protobuf/types"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 )
 
 var (

+ 3 - 2
vendor/github.com/containerd/containerd/diff/stream_unix.go

@@ -1,3 +1,4 @@
+//go:build !windows
 // +build !windows
 
 /*
@@ -21,15 +22,15 @@ package diff
 import (
 	"bytes"
 	"context"
+	"errors"
 	"fmt"
 	"io"
 	"os"
-	"os/exec"
 	"sync"
 
 	"github.com/gogo/protobuf/proto"
 	"github.com/gogo/protobuf/types"
-	"github.com/pkg/errors"
+	exec "golang.org/x/sys/execabs"
 )
 
 // NewBinaryProcessor returns a binary processor for use with processing content streams

+ 3 - 6
vendor/github.com/containerd/containerd/diff/stream_windows.go

@@ -1,5 +1,3 @@
-// +build windows
-
 /*
    Copyright The containerd Authors.
 
@@ -21,19 +19,18 @@ package diff
 import (
 	"bytes"
 	"context"
+	"errors"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
-	"os/exec"
 	"path/filepath"
 	"sync"
 
 	winio "github.com/Microsoft/go-winio"
 	"github.com/gogo/protobuf/proto"
 	"github.com/gogo/protobuf/types"
-	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
+	exec "golang.org/x/sys/execabs"
 )
 
 const processorPipe = "STREAM_PROCESSOR_PIPE"
@@ -157,7 +154,7 @@ func (c *binaryProcessor) Close() error {
 }
 
 func getUiqPath() (string, error) {
-	dir, err := ioutil.TempDir("", "")
+	dir, err := os.MkdirTemp("", "")
 	if err != nil {
 		return "", err
 	}

+ 2 - 3
vendor/github.com/containerd/containerd/errdefs/errors.go

@@ -17,7 +17,7 @@
 // Package errdefs defines the common errors used throughout containerd
 // packages.
 //
-// Use with errors.Wrap and error.Wrapf to add context to an error.
+// Use with fmt.Errorf to add context to an error.
 //
 // To detect an error class, use the IsXXX functions to tell whether an error
 // is of a certain type.
@@ -28,8 +28,7 @@ package errdefs
 
 import (
 	"context"
-
-	"github.com/pkg/errors"
+	"errors"
 )
 
 // Definitions of common error types used throughout containerd. All containerd

+ 5 - 5
vendor/github.com/containerd/containerd/errdefs/grpc.go

@@ -18,9 +18,9 @@ package errdefs
 
 import (
 	"context"
+	"fmt"
 	"strings"
 
-	"github.com/pkg/errors"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 )
@@ -68,9 +68,9 @@ func ToGRPC(err error) error {
 // ToGRPCf maps the error to grpc error codes, assembling the formatting string
 // and combining it with the target error string.
 //
-// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
+// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
 func ToGRPCf(err error, format string, args ...interface{}) error {
-	return ToGRPC(errors.Wrapf(err, format, args...))
+	return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
 }
 
 // FromGRPC returns the underlying error from a grpc service based on the grpc error code
@@ -104,9 +104,9 @@ func FromGRPC(err error) error {
 
 	msg := rebaseMessage(cls, err)
 	if msg != "" {
-		err = errors.Wrap(cls, msg)
+		err = fmt.Errorf("%s: %w", msg, cls)
 	} else {
-		err = errors.WithStack(cls)
+		err = cls
 	}
 
 	return err

+ 12 - 12
vendor/github.com/containerd/containerd/events/exchange/exchange.go

@@ -18,6 +18,7 @@ package exchange
 
 import (
 	"context"
+	"fmt"
 	"strings"
 	"time"
 
@@ -30,7 +31,6 @@ import (
 	"github.com/containerd/typeurl"
 	goevents "github.com/docker/go-events"
 	"github.com/gogo/protobuf/types"
-	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 )
 
@@ -88,10 +88,10 @@ func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event
 
 	namespace, err = namespaces.NamespaceRequired(ctx)
 	if err != nil {
-		return errors.Wrapf(err, "failed publishing event")
+		return fmt.Errorf("failed publishing event: %w", err)
 	}
 	if err := validateTopic(topic); err != nil {
-		return errors.Wrapf(err, "envelope topic %q", topic)
+		return fmt.Errorf("envelope topic %q: %w", topic, err)
 	}
 
 	encoded, err = typeurl.MarshalAny(event)
@@ -150,7 +150,7 @@ func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *even
 	if len(fs) > 0 {
 		filter, err := filters.ParseAll(fs...)
 		if err != nil {
-			errq <- errors.Wrapf(err, "failed parsing subscription filters")
+			errq <- fmt.Errorf("failed parsing subscription filters: %w", err)
 			closeAll()
 			return
 		}
@@ -175,7 +175,7 @@ func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *even
 					// TODO(stevvooe): For the most part, we are well protected
 					// from this condition. Both Forward and Publish protect
 					// from this.
-					err = errors.Errorf("invalid envelope encountered %#v; please file a bug", ev)
+					err = fmt.Errorf("invalid envelope encountered %#v; please file a bug", ev)
 					break
 				}
 
@@ -203,21 +203,21 @@ func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *even
 
 func validateTopic(topic string) error {
 	if topic == "" {
-		return errors.Wrap(errdefs.ErrInvalidArgument, "must not be empty")
+		return fmt.Errorf("must not be empty: %w", errdefs.ErrInvalidArgument)
 	}
 
 	if topic[0] != '/' {
-		return errors.Wrapf(errdefs.ErrInvalidArgument, "must start with '/'")
+		return fmt.Errorf("must start with '/': %w", errdefs.ErrInvalidArgument)
 	}
 
 	if len(topic) == 1 {
-		return errors.Wrapf(errdefs.ErrInvalidArgument, "must have at least one component")
+		return fmt.Errorf("must have at least one component: %w", errdefs.ErrInvalidArgument)
 	}
 
 	components := strings.Split(topic[1:], "/")
 	for _, component := range components {
 		if err := identifiers.Validate(component); err != nil {
-			return errors.Wrapf(err, "failed validation on component %q", component)
+			return fmt.Errorf("failed validation on component %q: %w", component, err)
 		}
 	}
 
@@ -226,15 +226,15 @@ func validateTopic(topic string) error {
 
 func validateEnvelope(envelope *events.Envelope) error {
 	if err := identifiers.Validate(envelope.Namespace); err != nil {
-		return errors.Wrapf(err, "event envelope has invalid namespace")
+		return fmt.Errorf("event envelope has invalid namespace: %w", err)
 	}
 
 	if err := validateTopic(envelope.Topic); err != nil {
-		return errors.Wrapf(err, "envelope topic %q", envelope.Topic)
+		return fmt.Errorf("envelope topic %q: %w", envelope.Topic, err)
 	}
 
 	if envelope.Timestamp.IsZero() {
-		return errors.Wrapf(errdefs.ErrInvalidArgument, "timestamp must be set on forwarded event")
+		return fmt.Errorf("timestamp must be set on forwarded event: %w", errdefs.ErrInvalidArgument)
 	}
 
 	return nil

+ 4 - 5
vendor/github.com/containerd/containerd/filters/parser.go

@@ -21,7 +21,6 @@ import (
 	"io"
 
 	"github.com/containerd/containerd/errdefs"
-	"github.com/pkg/errors"
 )
 
 /*
@@ -71,7 +70,7 @@ func ParseAll(ss ...string) (Filter, error) {
 	for _, s := range ss {
 		f, err := Parse(s)
 		if err != nil {
-			return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
+			return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument)
 		}
 
 		fs = append(fs, f)
@@ -90,7 +89,7 @@ func (p *parser) parse() (Filter, error) {
 
 	ss, err := p.selectors()
 	if err != nil {
-		return nil, errors.Wrap(err, "filters")
+		return nil, fmt.Errorf("filters: %w", err)
 	}
 
 	return ss, nil
@@ -284,9 +283,9 @@ func (pe parseError) Error() string {
 }
 
 func (p *parser) mkerr(pos int, format string, args ...interface{}) error {
-	return errors.Wrap(parseError{
+	return fmt.Errorf("parse error: %w", parseError{
 		input: p.input,
 		pos:   pos,
 		msg:   fmt.Sprintf(format, args...),
-	}, "parse error")
+	})
 }

+ 1 - 2
vendor/github.com/containerd/containerd/filters/quote.go

@@ -17,9 +17,8 @@
 package filters
 
 import (
+	"errors"
 	"unicode/utf8"
-
-	"github.com/pkg/errors"
 )
 
 // NOTE(stevvooe): Most of this code in this file is copied from the stdlib

+ 2 - 0
vendor/github.com/containerd/containerd/gc/gc.go

@@ -59,6 +59,8 @@ type Stats interface {
 //
 // We can probably use this to inform a design for incremental GC by injecting
 // callbacks to the set modification algorithms.
+//
+// https://en.wikipedia.org/wiki/Tracing_garbage_collection#Tri-color_marking
 func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struct{}, error) {
 	var (
 		grays     []Node                // maintain a gray "stack"

+ 4 - 4
vendor/github.com/containerd/containerd/identifiers/validate.go

@@ -25,10 +25,10 @@
 package identifiers
 
 import (
+	"fmt"
 	"regexp"
 
 	"github.com/containerd/containerd/errdefs"
-	"github.com/pkg/errors"
 )
 
 const (
@@ -51,15 +51,15 @@ var (
 // In general identifiers that pass this validation should be safe for use as filesystem path components.
 func Validate(s string) error {
 	if len(s) == 0 {
-		return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier must not be empty")
+		return fmt.Errorf("identifier must not be empty: %w", errdefs.ErrInvalidArgument)
 	}
 
 	if len(s) > maxLength {
-		return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q greater than maximum length (%d characters)", s, maxLength)
+		return fmt.Errorf("identifier %q greater than maximum length (%d characters): %w", s, maxLength, errdefs.ErrInvalidArgument)
 	}
 
 	if !identifierRe.MatchString(s) {
-		return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q must match %v", s, identifierRe)
+		return fmt.Errorf("identifier %q must match %v: %w", s, identifierRe, errdefs.ErrInvalidArgument)
 	}
 	return nil
 }

+ 9 - 3
vendor/github.com/containerd/containerd/image.go

@@ -19,6 +19,7 @@ package containerd
 import (
 	"context"
 	"encoding/json"
+	"errors"
 	"fmt"
 	"strings"
 	"sync/atomic"
@@ -33,7 +34,6 @@ import (
 	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/image-spec/identity"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 	"golang.org/x/sync/semaphore"
 )
 
@@ -61,6 +61,8 @@ type Image interface {
 	ContentStore() content.Store
 	// Metadata returns the underlying image metadata
 	Metadata() images.Image
+	// Platform returns the platform match comparer. Can be nil.
+	Platform() platforms.MatchComparer
 }
 
 type usageOptions struct {
@@ -397,10 +399,10 @@ func (i *image) getLayers(ctx context.Context, platform platforms.MatchComparer,
 	cs := i.ContentStore()
 	diffIDs, err := i.i.RootFS(ctx, cs, platform)
 	if err != nil {
-		return nil, errors.Wrap(err, "failed to resolve rootfs")
+		return nil, fmt.Errorf("failed to resolve rootfs: %w", err)
 	}
 	if len(diffIDs) != len(manifest.Layers) {
-		return nil, errors.Errorf("mismatched image rootfs and manifest layers")
+		return nil, errors.New("mismatched image rootfs and manifest layers")
 	}
 	layers := make([]rootfs.Layer, len(diffIDs))
 	for i := range diffIDs {
@@ -448,3 +450,7 @@ func (i *image) checkSnapshotterSupport(ctx context.Context, snapshotterName str
 func (i *image) ContentStore() content.Store {
 	return i.client.ContentStore()
 }
+
+func (i *image) Platform() platforms.MatchComparer {
+	return i.platform
+}

+ 10 - 10
vendor/github.com/containerd/containerd/images/archive/exporter.go

@@ -20,6 +20,7 @@ import (
 	"archive/tar"
 	"context"
 	"encoding/json"
+	"fmt"
 	"io"
 	"path"
 	"sort"
@@ -31,7 +32,6 @@ import (
 	digest "github.com/opencontainers/go-digest"
 	ocispecs "github.com/opencontainers/image-spec/specs-go"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 )
 
 type exportOptions struct {
@@ -230,7 +230,7 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts
 							manifest: manifests[0],
 						}
 					} else if eo.platform != nil {
-						return errors.Wrap(errdefs.ErrNotFound, "no manifest found for platform")
+						return fmt.Errorf("no manifest found for platform: %w", errdefs.ErrNotFound)
 					}
 				}
 				resolvedIndex[desc.Digest] = d
@@ -243,14 +243,14 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts
 
 			}
 		default:
-			return errors.Wrap(errdefs.ErrInvalidArgument, "only manifests may be exported")
+			return fmt.Errorf("only manifests may be exported: %w", errdefs.ErrInvalidArgument)
 		}
 	}
 
 	if len(dManifests) > 0 {
 		tr, err := manifestsRecord(ctx, store, dManifests)
 		if err != nil {
-			return errors.Wrap(err, "unable to create manifests file")
+			return fmt.Errorf("unable to create manifests file: %w", err)
 		}
 
 		records = append(records, tr)
@@ -316,7 +316,7 @@ func blobRecord(cs content.Provider, desc ocispec.Descriptor, opts *blobRecordOp
 		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
 			r, err := cs.ReaderAt(ctx, desc)
 			if err != nil {
-				return 0, errors.Wrap(err, "failed to get reader")
+				return 0, fmt.Errorf("failed to get reader: %w", err)
 			}
 			defer r.Close()
 
@@ -325,10 +325,10 @@ func blobRecord(cs content.Provider, desc ocispec.Descriptor, opts *blobRecordOp
 
 			n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
 			if err != nil {
-				return 0, errors.Wrap(err, "failed to copy to tar")
+				return 0, fmt.Errorf("failed to copy to tar: %w", err)
 			}
 			if dgstr.Digest() != desc.Digest {
-				return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
+				return 0, fmt.Errorf("unexpected digest %s copied", dgstr.Digest())
 			}
 			return n, nil
 		},
@@ -424,7 +424,7 @@ func manifestsRecord(ctx context.Context, store content.Provider, manifests map[
 			return tarRecord{}, err
 		}
 		if err := manifest.Config.Digest.Validate(); err != nil {
-			return tarRecord{}, errors.Wrapf(err, "invalid manifest %q", m.manifest.Digest)
+			return tarRecord{}, fmt.Errorf("invalid manifest %q: %w", m.manifest.Digest, err)
 		}
 
 		dgst := manifest.Config.Digest
@@ -491,10 +491,10 @@ func writeTar(ctx context.Context, tw *tar.Writer, recordsWithEmpty []tarRecord)
 				return err
 			}
 			if n != record.Header.Size {
-				return errors.Errorf("unexpected copy size for %s", record.Header.Name)
+				return fmt.Errorf("unexpected copy size for %s", record.Header.Name)
 			}
 		} else if record.Header.Size > 0 {
-			return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
+			return fmt.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
 		}
 	}
 	return nil

+ 69 - 27
vendor/github.com/containerd/containerd/images/archive/importer.go

@@ -22,9 +22,9 @@ import (
 	"bytes"
 	"context"
 	"encoding/json"
+	"errors"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"path"
 
 	"github.com/containerd/containerd/archive/compression"
@@ -32,10 +32,10 @@ import (
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/platforms"
 	digest "github.com/opencontainers/go-digest"
 	specs "github.com/opencontainers/image-spec/specs-go"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 )
 
 type importOpts struct {
@@ -104,16 +104,16 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
 		hdrName := path.Clean(hdr.Name)
 		if hdrName == ocispec.ImageLayoutFile {
 			if err = onUntarJSON(tr, &ociLayout); err != nil {
-				return ocispec.Descriptor{}, errors.Wrapf(err, "untar oci layout %q", hdr.Name)
+				return ocispec.Descriptor{}, fmt.Errorf("untar oci layout %q: %w", hdr.Name, err)
 			}
 		} else if hdrName == "manifest.json" {
 			if err = onUntarJSON(tr, &mfsts); err != nil {
-				return ocispec.Descriptor{}, errors.Wrapf(err, "untar manifest %q", hdr.Name)
+				return ocispec.Descriptor{}, fmt.Errorf("untar manifest %q: %w", hdr.Name, err)
 			}
 		} else {
 			dgst, err := onUntarBlob(ctx, tr, store, hdr.Size, "tar-"+hdrName)
 			if err != nil {
-				return ocispec.Descriptor{}, errors.Wrapf(err, "failed to ingest %q", hdr.Name)
+				return ocispec.Descriptor{}, fmt.Errorf("failed to ingest %q: %w", hdr.Name, err)
 			}
 
 			blobs[hdrName] = ocispec.Descriptor{
@@ -128,12 +128,12 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
 	// as Docker v1.1 or v1.2.
 	if ociLayout.Version != "" {
 		if ociLayout.Version != ocispec.ImageLayoutVersion {
-			return ocispec.Descriptor{}, errors.Errorf("unsupported OCI version %s", ociLayout.Version)
+			return ocispec.Descriptor{}, fmt.Errorf("unsupported OCI version %s", ociLayout.Version)
 		}
 
 		idx, ok := blobs["index.json"]
 		if !ok {
-			return ocispec.Descriptor{}, errors.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion)
+			return ocispec.Descriptor{}, fmt.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion)
 		}
 
 		idx.MediaType = ocispec.MediaTypeImageIndex
@@ -141,13 +141,13 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
 	}
 
 	if mfsts == nil {
-		return ocispec.Descriptor{}, errors.Errorf("unrecognized image format")
+		return ocispec.Descriptor{}, errors.New("unrecognized image format")
 	}
 
 	for name, linkname := range symlinks {
 		desc, ok := blobs[linkname]
 		if !ok {
-			return ocispec.Descriptor{}, errors.Errorf("no target for symlink layer from %q to %q", name, linkname)
+			return ocispec.Descriptor{}, fmt.Errorf("no target for symlink layer from %q to %q", name, linkname)
 		}
 		blobs[name] = desc
 	}
@@ -160,13 +160,13 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
 	for _, mfst := range mfsts {
 		config, ok := blobs[mfst.Config]
 		if !ok {
-			return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config)
+			return ocispec.Descriptor{}, fmt.Errorf("image config %q not found", mfst.Config)
 		}
 		config.MediaType = images.MediaTypeDockerSchema2Config
 
 		layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress)
 		if err != nil {
-			return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers")
+			return ocispec.Descriptor{}, fmt.Errorf("failed to resolve layers: %w", err)
 		}
 
 		manifest := struct {
@@ -183,18 +183,28 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
 
 		desc, err := writeManifest(ctx, store, manifest, manifest.MediaType)
 		if err != nil {
-			return ocispec.Descriptor{}, errors.Wrap(err, "write docker manifest")
+			return ocispec.Descriptor{}, fmt.Errorf("write docker manifest: %w", err)
 		}
 
-		platforms, err := images.Platforms(ctx, store, desc)
+		imgPlatforms, err := images.Platforms(ctx, store, desc)
 		if err != nil {
-			return ocispec.Descriptor{}, errors.Wrap(err, "unable to resolve platform")
+			return ocispec.Descriptor{}, fmt.Errorf("unable to resolve platform: %w", err)
 		}
-		if len(platforms) > 0 {
+		if len(imgPlatforms) > 0 {
 			// Only one platform can be resolved from non-index manifest,
 			// The platform can only come from the config included above,
 			// if the config has no platform it can be safely omitted.
-			desc.Platform = &platforms[0]
+			desc.Platform = &imgPlatforms[0]
+
+			// If the image we've just imported is a Windows image without the OSVersion set,
+			// we could just assume it matches this host's OS Version. Without this, the
+			// children labels might not be set on the image content, leading to it being
+			// garbage collected, breaking the image.
+			// See: https://github.com/containerd/containerd/issues/5690
+			if desc.Platform.OS == "windows" && desc.Platform.OSVersion == "" {
+				platform := platforms.DefaultSpec()
+				desc.Platform.OSVersion = platform.OSVersion
+			}
 		}
 
 		if len(mfst.RepoTags) == 0 {
@@ -223,7 +233,7 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
 }
 
 func onUntarJSON(r io.Reader, j interface{}) error {
-	b, err := ioutil.ReadAll(r)
+	b, err := io.ReadAll(r)
 	if err != nil {
 		return err
 	}
@@ -247,7 +257,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
 	for i, f := range layerFiles {
 		desc, ok := blobs[f]
 		if !ok {
-			return nil, errors.Errorf("layer %q not found", f)
+			return nil, fmt.Errorf("layer %q not found", f)
 		}
 		layers[i] = desc
 		descs[desc.Digest] = &layers[i]
@@ -259,15 +269,19 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
 		if ok {
 			desc := descs[digest.Digest(dgst)]
 			if desc != nil {
-				desc.MediaType = images.MediaTypeDockerSchema2LayerGzip
 				desc.Digest = info.Digest
 				desc.Size = info.Size
+				mediaType, err := detectLayerMediaType(ctx, store, *desc)
+				if err != nil {
+					return fmt.Errorf("failed to detect media type of layer: %w", err)
+				}
+				desc.MediaType = mediaType
 			}
 		}
 		return nil
 	}, filters...)
 	if err != nil {
-		return nil, errors.Wrap(err, "failure checking for compressed blobs")
+		return nil, fmt.Errorf("failure checking for compressed blobs: %w", err)
 	}
 
 	for i, desc := range layers {
@@ -277,11 +291,12 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
 		// Open blob, resolve media type
 		ra, err := store.ReaderAt(ctx, desc)
 		if err != nil {
-			return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest)
+			return nil, fmt.Errorf("failed to open %q (%s): %w", layerFiles[i], desc.Digest, err)
 		}
 		s, err := compression.DecompressStream(content.NewReader(ra))
 		if err != nil {
-			return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i])
+			ra.Close()
+			return nil, fmt.Errorf("failed to detect compression for %q: %w", layerFiles[i], err)
 		}
 		if s.GetCompression() == compression.Uncompressed {
 			if compress {
@@ -292,6 +307,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
 				layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels))
 				if err != nil {
 					s.Close()
+					ra.Close()
 					return nil, err
 				}
 				layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
@@ -302,7 +318,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
 			layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
 		}
 		s.Close()
-
+		ra.Close()
 	}
 	return layers, nil
 }
@@ -310,7 +326,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
 func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) {
 	w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
 	if err != nil {
-		return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer")
+		return ocispec.Descriptor{}, fmt.Errorf("failed to open writer: %w", err)
 	}
 
 	defer func() {
@@ -320,7 +336,7 @@ func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string
 		}
 	}()
 	if err := w.Truncate(0); err != nil {
-		return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer")
+		return ocispec.Descriptor{}, fmt.Errorf("failed to truncate writer: %w", err)
 	}
 
 	cw, err := compression.CompressStream(w, compression.Gzip)
@@ -337,7 +353,7 @@ func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string
 
 	cst, err := w.Status()
 	if err != nil {
-		return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status")
+		return ocispec.Descriptor{}, fmt.Errorf("failed to get writer status: %w", err)
 	}
 
 	desc.Digest = w.Digest()
@@ -345,7 +361,7 @@ func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string
 
 	if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil {
 		if !errdefs.IsAlreadyExists(err) {
-			return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit")
+			return ocispec.Descriptor{}, fmt.Errorf("failed to commit: %w", err)
 		}
 	}
 
@@ -369,3 +385,29 @@ func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{
 
 	return desc, nil
 }
+
+func detectLayerMediaType(ctx context.Context, store content.Store, desc ocispec.Descriptor) (string, error) {
+	var mediaType string
+	// need to parse existing blob to use the proper media type
+	bytes := make([]byte, 10)
+	ra, err := store.ReaderAt(ctx, desc)
+	if err != nil {
+		return "", fmt.Errorf("failed to read content store to detect layer media type: %w", err)
+	}
+	defer ra.Close()
+	_, err = ra.ReadAt(bytes, 0)
+	if err != nil && err != io.EOF {
+		return "", fmt.Errorf("failed to read header bytes from layer to detect media type: %w", err)
+	}
+	if err == io.EOF {
+		// in the case of an empty layer then the media type should be uncompressed
+		return images.MediaTypeDockerSchema2Layer, nil
+	}
+	switch c := compression.DetectCompression(bytes); c {
+	case compression.Uncompressed:
+		mediaType = images.MediaTypeDockerSchema2Layer
+	default:
+		mediaType = images.MediaTypeDockerSchema2LayerGzip
+	}
+	return mediaType, nil
+}

+ 3 - 3
vendor/github.com/containerd/containerd/images/archive/reference.go

@@ -17,12 +17,12 @@
 package archive
 
 import (
+	"fmt"
 	"strings"
 
 	"github.com/containerd/containerd/reference"
 	distref "github.com/containerd/containerd/reference/docker"
 	"github.com/opencontainers/go-digest"
-	"github.com/pkg/errors"
 )
 
 // FilterRefPrefix restricts references to having the given image
@@ -72,7 +72,7 @@ func normalizeReference(ref string) (string, error) {
 	// TODO: Replace this function to not depend on reference package
 	normalized, err := distref.ParseDockerRef(ref)
 	if err != nil {
-		return "", errors.Wrapf(err, "normalize image ref %q", ref)
+		return "", fmt.Errorf("normalize image ref %q: %w", ref, err)
 	}
 
 	return normalized.String(), nil
@@ -81,7 +81,7 @@ func normalizeReference(ref string) (string, error) {
 func familiarizeReference(ref string) (string, error) {
 	named, err := distref.ParseNormalizedNamed(ref)
 	if err != nil {
-		return "", errors.Wrapf(err, "failed to parse %q", ref)
+		return "", fmt.Errorf("failed to parse %q: %w", ref, err)
 	}
 	named = distref.TagNameOnly(named)
 

+ 38 - 4
vendor/github.com/containerd/containerd/images/handlers.go

@@ -18,6 +18,7 @@ package images
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"sort"
 
@@ -25,7 +26,6 @@ import (
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/platforms"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 	"golang.org/x/sync/errgroup"
 	"golang.org/x/sync/semaphore"
 )
@@ -33,13 +33,17 @@ import (
 var (
 	// ErrSkipDesc is used to skip processing of a descriptor and
 	// its descendants.
-	ErrSkipDesc = fmt.Errorf("skip descriptor")
+	ErrSkipDesc = errors.New("skip descriptor")
 
 	// ErrStopHandler is used to signify that the descriptor
 	// has been handled and should not be handled further.
 	// This applies only to a single descriptor in a handler
 	// chain and does not apply to descendant descriptors.
-	ErrStopHandler = fmt.Errorf("stop handler")
+	ErrStopHandler = errors.New("stop handler")
+
+	// ErrEmptyWalk is used when the WalkNotEmpty handlers return no
+	// children (e.g.: they were filtered out).
+	ErrEmptyWalk = errors.New("image might be filtered out")
 )
 
 // Handler handles image manifests
@@ -99,6 +103,36 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err
 			}
 		}
 	}
+	return nil
+}
+
+// WalkNotEmpty works the same way Walk does, with the exception that it ensures that
+// some children are still found by Walking the descriptors (for example, not all of
+// them have been filtered out by one of the handlers). If there are no children,
+// then an ErrEmptyWalk error is returned.
+func WalkNotEmpty(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error {
+	isEmpty := true
+	var notEmptyHandler HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+		children, err := handler.Handle(ctx, desc)
+		if err != nil {
+			return children, err
+		}
+
+		if len(children) > 0 {
+			isEmpty = false
+		}
+
+		return children, nil
+	}
+
+	err := Walk(ctx, notEmptyHandler, descs...)
+	if err != nil {
+		return err
+	}
+
+	if isEmpty {
+		return ErrEmptyWalk
+	}
 
 	return nil
 }
@@ -274,7 +308,7 @@ func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc
 
 			if n > 0 {
 				if len(children) == 0 {
-					return children, errors.Wrap(errdefs.ErrNotFound, "no match for platform in manifest")
+					return children, fmt.Errorf("no match for platform in manifest: %w", errdefs.ErrNotFound)
 				}
 				if len(children) > n {
 					children = children[:n]

+ 10 - 11
vendor/github.com/containerd/containerd/images/image.go

@@ -29,7 +29,6 @@ import (
 	"github.com/containerd/containerd/platforms"
 	digest "github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 )
 
 // Image provides the model for how containerd views container images.
@@ -115,7 +114,7 @@ func (image *Image) Size(ctx context.Context, provider content.Provider, platfor
 	var size int64
 	return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
 		if desc.Size < 0 {
-			return nil, errors.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType)
+			return nil, fmt.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType)
 		}
 		size += desc.Size
 		return nil, nil
@@ -156,7 +155,7 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
 			}
 
 			if err := validateMediaType(p, desc.MediaType); err != nil {
-				return nil, errors.Wrapf(err, "manifest: invalid desc %s", desc.Digest)
+				return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err)
 			}
 
 			var manifest ocispec.Manifest
@@ -200,7 +199,7 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
 			}
 
 			if err := validateMediaType(p, desc.MediaType); err != nil {
-				return nil, errors.Wrapf(err, "manifest: invalid desc %s", desc.Digest)
+				return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err)
 			}
 
 			var idx ocispec.Index
@@ -236,15 +235,15 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
 			}
 			return descs, nil
 		}
-		return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
+		return nil, fmt.Errorf("unexpected media type %v for %v: %w", desc.MediaType, desc.Digest, errdefs.ErrNotFound)
 	}), image); err != nil {
 		return ocispec.Manifest{}, err
 	}
 
 	if len(m) == 0 {
-		err := errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest)
+		err := fmt.Errorf("manifest %v: %w", image.Digest, errdefs.ErrNotFound)
 		if wasIndex {
-			err = errors.Wrapf(errdefs.ErrNotFound, "no match for platform in manifest %v", image.Digest)
+			err = fmt.Errorf("no match for platform in manifest %v: %w", image.Digest, errdefs.ErrNotFound)
 		}
 		return ocispec.Manifest{}, err
 	}
@@ -309,7 +308,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip
 			return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil
 		}
 
-		return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest)
+		return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", image.Digest, err)
 	}
 
 	// TODO(stevvooe): It is possible that referenced conponents could have
@@ -324,7 +323,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip
 				missing = append(missing, desc)
 				continue
 			} else {
-				return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest)
+				return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", desc.Digest, err)
 			}
 		}
 		ra.Close()
@@ -346,7 +345,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
 		}
 
 		if err := validateMediaType(p, desc.MediaType); err != nil {
-			return nil, errors.Wrapf(err, "children: invalid desc %s", desc.Digest)
+			return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err)
 		}
 
 		// TODO(stevvooe): We just assume oci manifest, for now. There may be
@@ -365,7 +364,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
 		}
 
 		if err := validateMediaType(p, desc.MediaType); err != nil {
-			return nil, errors.Wrapf(err, "children: invalid desc %s", desc.Digest)
+			return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err)
 		}
 
 		var index ocispec.Index

+ 2 - 2
vendor/github.com/containerd/containerd/images/mediatypes.go

@@ -18,12 +18,12 @@ package images
 
 import (
 	"context"
+	"fmt"
 	"sort"
 	"strings"
 
 	"github.com/containerd/containerd/errdefs"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
 )
 
 // mediatype definitions for image components handled in containerd.
@@ -87,7 +87,7 @@ func DiffCompression(ctx context.Context, mediaType string) (string, error) {
 		}
 		return "", nil
 	default:
-		return "", errors.Wrapf(errdefs.ErrNotImplemented, "unrecognised mediatype %s", mediaType)
+		return "", fmt.Errorf("unrecognised mediatype %s: %w", mediaType, errdefs.ErrNotImplemented)
 	}
 }
 

+ 37 - 9
vendor/github.com/containerd/containerd/import.go

@@ -31,11 +31,13 @@ import (
 )
 
 type importOpts struct {
-	indexName    string
-	imageRefT    func(string) string
-	dgstRefT     func(digest.Digest) string
-	allPlatforms bool
-	compress     bool
+	indexName       string
+	imageRefT       func(string) string
+	dgstRefT        func(digest.Digest) string
+	skipDgstRef     func(string) bool
+	allPlatforms    bool
+	platformMatcher platforms.MatchComparer
+	compress        bool
 }
 
 // ImportOpt allows the caller to specify import specific options
@@ -59,6 +61,17 @@ func WithDigestRef(f func(digest.Digest) string) ImportOpt {
 	}
 }
 
+// WithSkipDigestRef is used to specify when to skip applying
+// WithDigestRef. The callback receives an image reference (or an empty
+// string if not specified in the image). When the callback returns true,
+// the skip occurs.
+func WithSkipDigestRef(f func(string) bool) ImportOpt {
+	return func(c *importOpts) error {
+		c.skipDgstRef = f
+		return nil
+	}
+}
+
 // WithIndexName creates a tag pointing to the imported index
 func WithIndexName(name string) ImportOpt {
 	return func(c *importOpts) error {
@@ -75,6 +88,14 @@ func WithAllPlatforms(allPlatforms bool) ImportOpt {
 	}
 }
 
+// WithImportPlatform is used to import content for specific platform.
+func WithImportPlatform(platformMacher platforms.MatchComparer) ImportOpt {
+	return func(c *importOpts) error {
+		c.platformMatcher = platformMacher
+		return nil
+	}
+}
+
 // WithImportCompression compresses uncompressed layers on import.
 // This is used for import formats which do not include the manifest.
 func WithImportCompression() ImportOpt {
@@ -123,9 +144,11 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
 			Target: index,
 		})
 	}
-	var platformMatcher = platforms.All
-	if !iopts.allPlatforms {
-		platformMatcher = c.platform
+	var platformMatcher = c.platform
+	if iopts.allPlatforms {
+		platformMatcher = platforms.All
+	} else if iopts.platformMatcher != nil {
+		platformMatcher = iopts.platformMatcher
 	}
 
 	var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
@@ -152,6 +175,11 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
 					Target: m,
 				})
 			}
+			if iopts.skipDgstRef != nil {
+				if iopts.skipDgstRef(name) {
+					continue
+				}
+			}
 			if iopts.dgstRefT != nil {
 				ref := iopts.dgstRefT(m.Digest)
 				if ref != "" {
@@ -168,7 +196,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
 
 	handler = images.FilterPlatforms(handler, platformMatcher)
 	handler = images.SetChildrenLabels(cs, handler)
-	if err := images.Walk(ctx, handler, index); err != nil {
+	if err := images.WalkNotEmpty(ctx, handler, index); err != nil {
 		return nil, err
 	}
 

+ 6 - 2
vendor/github.com/containerd/containerd/install.go

@@ -19,6 +19,8 @@ package containerd
 import (
 	"archive/tar"
 	"context"
+	"errors"
+	"fmt"
 	"os"
 	"path/filepath"
 	"runtime"
@@ -28,7 +30,6 @@ import (
 	"github.com/containerd/containerd/archive/compression"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/images"
-	"github.com/pkg/errors"
 )
 
 // Install a binary image into the opt service
@@ -66,6 +67,7 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts)
 		cr := content.NewReader(ra)
 		r, err := compression.DecompressStream(cr)
 		if err != nil {
+			ra.Close()
 			return err
 		}
 		if _, err := archive.Apply(ctx, path, r, archive.WithFilter(func(hdr *tar.Header) (bool, error) {
@@ -81,15 +83,17 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts)
 			}
 			if result && !config.Replace {
 				if _, err := os.Lstat(filepath.Join(path, hdr.Name)); err == nil {
-					return false, errors.Errorf("cannot replace %s in %s", hdr.Name, path)
+					return false, fmt.Errorf("cannot replace %s in %s", hdr.Name, path)
 				}
 			}
 			return result, nil
 		})); err != nil {
 			r.Close()
+			ra.Close()
 			return err
 		}
 		r.Close()
+		ra.Close()
 	}
 	return nil
 }

+ 3 - 2
vendor/github.com/containerd/containerd/labels/validate.go

@@ -17,8 +17,9 @@
 package labels
 
 import (
+	"fmt"
+
 	"github.com/containerd/containerd/errdefs"
-	"github.com/pkg/errors"
 )
 
 const (
@@ -31,7 +32,7 @@ func Validate(k, v string) error {
 		if len(k) > 10 {
 			k = k[:10]
 		}
-		return errors.Wrapf(errdefs.ErrInvalidArgument, "label key and value greater than maximum size (%d bytes), key: %s", maxSize, k)
+		return fmt.Errorf("label key and value greater than maximum size (%d bytes), key: %s: %w", maxSize, k, errdefs.ErrInvalidArgument)
 	}
 	return nil
 }

Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů