瀏覽代碼

vendor: github.com/docker/swarmkit 616e8db4c3b0

Signed-off-by: Cory Snider <csnider@mirantis.com>
Cory Snider 3 年之前
父節點
當前提交
06c797f517
共有 100 個文件被更改,包括 19062 次插入6319 次删除
  1. 1 1
      daemon/cluster/executor/container/executor.go
  2. 1 1
      daemon/oci_windows_test.go
  3. 15 4
      vendor.mod
  4. 75 7
      vendor.sum
  5. 1 2
      vendor/github.com/container-storage-interface/spec/LICENSE
  6. 6280 0
      vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go
  7. 0 5
      vendor/github.com/coreos/etcd/NOTICE
  8. 0 284
      vendor/github.com/coreos/etcd/raft/progress.go
  9. 0 1407
      vendor/github.com/coreos/etcd/raft/raft.go
  10. 0 95
      vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
  11. 0 129
      vendor/github.com/coreos/etcd/raft/util.go
  12. 0 191
      vendor/github.com/coreos/go-systemd/LICENSE
  13. 0 5
      vendor/github.com/coreos/go-systemd/NOTICE
  14. 0 182
      vendor/github.com/coreos/go-systemd/journal/journal.go
  15. 0 5
      vendor/github.com/coreos/pkg/NOTICE
  16. 0 39
      vendor/github.com/coreos/pkg/capnslog/README.md
  17. 0 157
      vendor/github.com/coreos/pkg/capnslog/formatters.go
  18. 0 96
      vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
  19. 0 49
      vendor/github.com/coreos/pkg/capnslog/init.go
  20. 0 68
      vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
  21. 0 39
      vendor/github.com/coreos/pkg/capnslog/log_hijack.go
  22. 0 245
      vendor/github.com/coreos/pkg/capnslog/logmap.go
  23. 0 191
      vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
  24. 0 65
      vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
  25. 35 1
      vendor/github.com/docker/swarmkit/agent/agent.go
  26. 120 0
      vendor/github.com/docker/swarmkit/agent/csi/plugin/manager.go
  27. 459 0
      vendor/github.com/docker/swarmkit/agent/csi/plugin/plugin.go
  28. 227 0
      vendor/github.com/docker/swarmkit/agent/csi/volumes.go
  29. 18 2
      vendor/github.com/docker/swarmkit/agent/dependency.go
  30. 11 9
      vendor/github.com/docker/swarmkit/agent/exec/errors.go
  31. 44 0
      vendor/github.com/docker/swarmkit/agent/exec/executor.go
  32. 54 6
      vendor/github.com/docker/swarmkit/agent/reporter.go
  33. 18 0
      vendor/github.com/docker/swarmkit/agent/session.go
  34. 69 9
      vendor/github.com/docker/swarmkit/agent/worker.go
  35. 866 182
      vendor/github.com/docker/swarmkit/api/api.pb.txt
  36. 887 187
      vendor/github.com/docker/swarmkit/api/control.pb.go
  37. 82 0
      vendor/github.com/docker/swarmkit/api/control.proto
  38. 860 78
      vendor/github.com/docker/swarmkit/api/dispatcher.pb.go
  39. 34 1
      vendor/github.com/docker/swarmkit/api/dispatcher.proto
  40. 720 201
      vendor/github.com/docker/swarmkit/api/objects.pb.go
  41. 44 0
      vendor/github.com/docker/swarmkit/api/objects.proto
  42. 164 66
      vendor/github.com/docker/swarmkit/api/raft.pb.go
  43. 2 1
      vendor/github.com/docker/swarmkit/api/raft.proto
  44. 103 33
      vendor/github.com/docker/swarmkit/api/snapshot.pb.go
  45. 1 0
      vendor/github.com/docker/swarmkit/api/snapshot.proto
  46. 794 149
      vendor/github.com/docker/swarmkit/api/specs.pb.go
  47. 68 0
      vendor/github.com/docker/swarmkit/api/specs.proto
  48. 2715 1941
      vendor/github.com/docker/swarmkit/api/types.pb.go
  49. 507 0
      vendor/github.com/docker/swarmkit/api/types.proto
  50. 174 76
      vendor/github.com/docker/swarmkit/api/watch.pb.go
  51. 1 0
      vendor/github.com/docker/swarmkit/api/watch.proto
  52. 4 5
      vendor/github.com/docker/swarmkit/ca/certificates.go
  53. 1 2
      vendor/github.com/docker/swarmkit/ca/external.go
  54. 8 9
      vendor/github.com/docker/swarmkit/ca/keyreadwriter.go
  55. 1 2
      vendor/github.com/docker/swarmkit/ioutils/ioutils.go
  56. 1 0
      vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/drivers_unsupported.go
  57. 1 1
      vendor/github.com/docker/swarmkit/manager/controlapi/config.go
  58. 256 0
      vendor/github.com/docker/swarmkit/manager/controlapi/volume.go
  59. 138 0
      vendor/github.com/docker/swarmkit/manager/csi/convert.go
  60. 29 0
      vendor/github.com/docker/swarmkit/manager/csi/doc.go
  61. 481 0
      vendor/github.com/docker/swarmkit/manager/csi/manager.go
  62. 334 0
      vendor/github.com/docker/swarmkit/manager/csi/plugin.go
  63. 34 0
      vendor/github.com/docker/swarmkit/manager/csi/secret.go
  64. 138 10
      vendor/github.com/docker/swarmkit/manager/dispatcher/assignments.go
  65. 154 9
      vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
  66. 26 3
      vendor/github.com/docker/swarmkit/manager/manager.go
  67. 114 0
      vendor/github.com/docker/swarmkit/manager/orchestrator/volumeenforcer/volume_enforcer.go
  68. 61 0
      vendor/github.com/docker/swarmkit/manager/scheduler/filter.go
  69. 4 0
      vendor/github.com/docker/swarmkit/manager/scheduler/pipeline.go
  70. 210 3
      vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
  71. 47 0
      vendor/github.com/docker/swarmkit/manager/scheduler/topology.go
  72. 319 0
      vendor/github.com/docker/swarmkit/manager/scheduler/volumes.go
  73. 1 1
      vendor/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go
  74. 11 5
      vendor/github.com/docker/swarmkit/manager/state/raft/raft.go
  75. 2 2
      vendor/github.com/docker/swarmkit/manager/state/raft/storage.go
  76. 9 10
      vendor/github.com/docker/swarmkit/manager/state/raft/storage/snapwrap.go
  77. 11 9
      vendor/github.com/docker/swarmkit/manager/state/raft/storage/storage.go
  78. 13 14
      vendor/github.com/docker/swarmkit/manager/state/raft/storage/walwrap.go
  79. 2 2
      vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go
  80. 2 2
      vendor/github.com/docker/swarmkit/manager/state/raft/transport/transport.go
  81. 32 0
      vendor/github.com/docker/swarmkit/manager/state/store/by.go
  82. 36 15
      vendor/github.com/docker/swarmkit/manager/state/store/memory.go
  83. 26 1
      vendor/github.com/docker/swarmkit/manager/state/store/tasks.go
  84. 149 0
      vendor/github.com/docker/swarmkit/manager/state/store/volumes.go
  85. 4 5
      vendor/github.com/docker/swarmkit/node/node.go
  86. 9 0
      vendor/github.com/docker/swarmkit/template/getter.go
  87. 4 0
      vendor/github.com/docker/swarmkit/volumequeue/doc.go
  88. 215 0
      vendor/github.com/docker/swarmkit/volumequeue/queue.go
  89. 1 0
      vendor/github.com/docker/swarmkit/xnet/xnet_unix.go
  90. 1 0
      vendor/github.com/docker/swarmkit/xnet/xnet_windows.go
  91. 21 0
      vendor/github.com/dustin/go-humanize/.travis.yml
  92. 21 0
      vendor/github.com/dustin/go-humanize/LICENSE
  93. 124 0
      vendor/github.com/dustin/go-humanize/README.markdown
  94. 31 0
      vendor/github.com/dustin/go-humanize/big.go
  95. 173 0
      vendor/github.com/dustin/go-humanize/bigbytes.go
  96. 143 0
      vendor/github.com/dustin/go-humanize/bytes.go
  97. 116 0
      vendor/github.com/dustin/go-humanize/comma.go
  98. 40 0
      vendor/github.com/dustin/go-humanize/commaf.go
  99. 46 0
      vendor/github.com/dustin/go-humanize/ftoa.go
  100. 8 0
      vendor/github.com/dustin/go-humanize/humanize.go

+ 1 - 1
daemon/cluster/executor/container/executor.go

@@ -52,7 +52,7 @@ func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBac
 		pluginBackend: p,
 		imageBackend:  i,
 		volumeBackend: v,
-		dependencies:  agent.NewDependencyManager(),
+		dependencies:  agent.NewDependencyManager(b.PluginGetter()),
 	}
 }
 

+ 1 - 1
daemon/oci_windows_test.go

@@ -179,7 +179,7 @@ func TestSetWindowsCredentialSpecInSpec(t *testing.T) {
 	t.Run("happy path with a 'config://' option", func(t *testing.T) {
 		configID := "my-cred-spec"
 
-		dependencyManager := swarmagent.NewDependencyManager()
+		dependencyManager := swarmagent.NewDependencyManager(nil)
 		dependencyManager.Configs().Add(swarmapi.Config{
 			ID: configID,
 			Spec: swarmapi.ConfigSpec{

+ 15 - 4
vendor.mod

@@ -33,7 +33,7 @@ require (
 	github.com/docker/go-units v0.4.0
 	github.com/docker/libkv v0.2.2-0.20211217103745-e480589147e3
 	github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4
-	github.com/docker/swarmkit v1.12.1-0.20210726173615-3629f50980f6
+	github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0
 	github.com/fluent/fluent-logger-golang v1.9.0
 	github.com/fsnotify/fsnotify v1.5.1
 	github.com/godbus/dbus/v5 v5.0.6
@@ -89,17 +89,17 @@ require (
 require (
 	code.cloudfoundry.org/clock v1.0.0 // indirect
 	github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
+	github.com/akutz/memconn v0.1.0 // indirect
 	github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/cespare/xxhash/v2 v2.1.2 // indirect
 	github.com/cilium/ebpf v0.7.0 // indirect
+	github.com/container-storage-interface/spec v1.5.0 // indirect
 	github.com/containerd/console v1.0.3 // indirect
 	github.com/containerd/go-runc v1.0.0 // indirect
 	github.com/containerd/ttrpc v1.1.0 // indirect
-	github.com/coreos/etcd v3.3.27+incompatible // indirect
-	github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
-	github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
 	github.com/cyphar/filepath-securejoin v0.2.3 // indirect
+	github.com/dustin/go-humanize v1.0.0 // indirect
 	github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e // indirect
 	github.com/gofrs/flock v0.7.3 // indirect
 	github.com/gogo/googleapis v1.4.0 // indirect
@@ -129,11 +129,19 @@ require (
 	github.com/prometheus/client_model v0.2.0 // indirect
 	github.com/prometheus/common v0.10.0 // indirect
 	github.com/prometheus/procfs v0.6.0 // indirect
+	github.com/rexray/gocsi v1.2.2 // indirect
 	github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
 	github.com/tinylib/msgp v1.1.0 // indirect
 	github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
 	github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect
+	go.etcd.io/etcd/client/pkg/v3 v3.5.2 // indirect
+	go.etcd.io/etcd/pkg/v3 v3.5.2 // indirect
+	go.etcd.io/etcd/raft/v3 v3.5.2 // indirect
+	go.etcd.io/etcd/server/v3 v3.5.2 // indirect
 	go.opencensus.io v0.23.0 // indirect
+	go.uber.org/atomic v1.7.0 // indirect
+	go.uber.org/multierr v1.6.0 // indirect
+	go.uber.org/zap v1.17.0 // indirect
 	golang.org/x/crypto v0.0.0-20211202192323-5770296d904e // indirect
 	golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
 	golang.org/x/mod v0.4.2 // indirect
@@ -170,5 +178,8 @@ replace (
 	google.golang.org/grpc => google.golang.org/grpc v1.27.1
 )
 
+// Removes etcd dependency
+replace github.com/rexray/gocsi => github.com/dperny/gocsi v1.2.3-pre
+
 // autogen/winresources/dockerd is generated a build time, this replacement is only for the purpose of `go mod vendor`
 replace github.com/docker/docker/autogen/winresources/dockerd => ./hack/make/.resources-windows

+ 75 - 7
vendor.sum

@@ -66,9 +66,14 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko
 github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91 h1:vX+gnvBc56EbWYrmlhYbFYRaeikAke1GL84N4BEYOFE=
 github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91/go.mod h1:cDLGBht23g0XQdLjzn6xOGXDkLK182YfINAaZEQLCHQ=
 github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/akutz/gosync v0.1.0 h1:naxPT/aDYDh79PMwM3XmencmNQeYmpNFSZy4ZE9zIW0=
+github.com/akutz/gosync v0.1.0/go.mod h1:I8I4aiqJI1nqaeYOOB1WS+CgRJVVPqhct9Y4njywM84=
+github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A=
+github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
 github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
 github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
 github.com/armon/go-metrics v0.0.0-20150106224455-eb0af217e5e9 h1:j0r1R47jEcPk5M3GY3tFbv7q5J6j0Ppler3q4Guh6C0=
 github.com/armon/go-metrics v0.0.0-20150106224455-eb0af217e5e9/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@@ -78,6 +83,7 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l
 github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
 github.com/aws/aws-sdk-go v1.31.6 h1:nKjQbpXhdImctBh1e0iLg9iQW/X297LPPuY/9f92R2k=
 github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
 github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
@@ -98,6 +104,9 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0Bsq
 github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
 github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
 github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
 github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -117,7 +126,16 @@ github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2u
 github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5 h1:PqZ3bA4yzwywivzk7PBQWngJp2/PAS0bWRZerKteicY=
 github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
 github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
+github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
 github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
+github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
+github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo=
+github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s=
 github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
 github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
 github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
@@ -222,20 +240,16 @@ github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B
 github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
 github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
 github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/etcd v3.3.27+incompatible h1:QIudLb9KeBsE5zyYxd1mjzRSkzLg9Wf9QlRwFgd6oTA=
-github.com/coreos/etcd v3.3.27+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
 github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
 github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
 github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
 github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
 github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM=
 github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
 github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
 github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
 github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
 github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw=
 github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
 github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
@@ -289,10 +303,13 @@ github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNE
 github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 h1:k8TfKGeAcDQFFQOGCQMRN04N4a9YrPlRMMKnzAuvM9Q=
 github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
 github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-github.com/docker/swarmkit v1.12.1-0.20210726173615-3629f50980f6 h1:mFQcXSzzNXVKAnl0KltjSQ7rbgipTYcXJns4sucurKA=
-github.com/docker/swarmkit v1.12.1-0.20210726173615-3629f50980f6/go.mod h1:n3Z4lIEl7g261ptkGDBcYi/3qBMDl9csaAhwi2MPejs=
+github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0 h1:YehAv2BPLfTm58HW04wRnNy8Oo/CAzWji7mjJ6UJWgM=
+github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0/go.mod h1:n3Z4lIEl7g261ptkGDBcYi/3qBMDl9csaAhwi2MPejs=
 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dperny/gocsi v1.2.3-pre h1:GRTvl8G6yEXYPyul1h6YAqtyxzUHTrQHo6G3xZpb9oM=
+github.com/dperny/gocsi v1.2.3-pre/go.mod h1:qQw5mIunz1RqMUfZcGJ9/Lt9EDaL0N3wPNYxFTuyLQo=
 github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
 github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
@@ -306,6 +323,7 @@ github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e/go.mod h1:2H9hjfb
 github.com/fluent/fluent-logger-golang v1.9.0 h1:zUdY44CHX2oIUc7VTNZc+4m+ORuO/mldQDA7czhWXEg=
 github.com/fluent/fluent-logger-golang v1.9.0/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU=
 github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
 github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
 github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -314,6 +332,8 @@ github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWp
 github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
 github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
 github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
 github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
@@ -423,6 +443,7 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy
 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
 github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
 github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
 github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok=
@@ -475,10 +496,12 @@ github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2
 github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
 github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
 github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
 github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
 github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
 github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
 github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
 github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
@@ -580,6 +603,7 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI
 github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
 github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -590,6 +614,7 @@ github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
 github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
 github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
 github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
@@ -665,6 +690,7 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
 github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI=
 github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
 github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
 github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -679,6 +705,7 @@ github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My
 github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
 github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
 github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
 github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@@ -689,9 +716,11 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
 github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
 github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
 github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
 github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
 github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
 github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
@@ -699,6 +728,7 @@ github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
 github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
 github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
 github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@@ -726,10 +756,12 @@ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG
 github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
 github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
 github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/thecodeteam/gosync v0.1.0/go.mod h1:43QHsngcnWc8GE1aCmi7PEypslflHjCzXFleuWKEb00=
 github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=
 github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
 github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
 github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
 github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf/go.mod h1:lJAxK//iyZ3yGbQswdrPTxugZIDM7sd4bEsD0x3XMHk=
 github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274 h1:wbyZxD6IPFp0sl5uscMOJRsz5UKGFiNiD16e+MVfKZY=
 github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274/go.mod h1:oPAfvw32vlUJSjyDcQ3Bu0nb2ON2B+G0dtVN/SZNJiA=
@@ -776,16 +808,46 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
 go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
 go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
 go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
+go.etcd.io/etcd/client/pkg/v3 v3.5.2 h1:4hzqQ6hIb3blLyQ8usCU4h3NghkqcsohEQ3o3VetYxE=
+go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI=
+go.etcd.io/etcd/client/v3 v3.5.2/go.mod h1:kOOaWFFgHygyT0WlSmL8TJiXmMysO/nNUlEsSsN6W4o=
+go.etcd.io/etcd/pkg/v3 v3.5.2 h1:YZUojdoPhOyl5QILYnR8LTUbbNefu/sV4ma+ZMr2tto=
+go.etcd.io/etcd/pkg/v3 v3.5.2/go.mod h1:zsXz+9D/kijzRiG/UnFGDTyHKcVp0orwiO8iMLAi+k0=
+go.etcd.io/etcd/raft/v3 v3.5.2 h1:uCC37qOXqBvKqTGHGyhASsaCsnTuJugl1GvneJNwHWo=
+go.etcd.io/etcd/raft/v3 v3.5.2/go.mod h1:G6pCP1sFgbjod7/KnEHY0vHUViqxjkdt6AiKsD0GRr8=
+go.etcd.io/etcd/server/v3 v3.5.2 h1:B6ytJvS4Fmt8nkjzS2/8POf4tuPhFMluE0lWd4dx/7U=
+go.etcd.io/etcd/server/v3 v3.5.2/go.mod h1:mlG8znIEz4N/28GABrohZCBM11FqgGVQcpbcyJgh0j0=
 go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
 go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
 go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
+go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
+go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
+go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
+go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
 go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
 go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
 go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
 go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
 go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
 go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
 golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -847,6 +909,7 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
 golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
@@ -874,6 +937,7 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -956,6 +1020,7 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M=
 golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -972,6 +1037,7 @@ golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgw
 golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
 golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -1035,14 +1101,16 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
 gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
 gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
 gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
 gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
 gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
 gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=

+ 1 - 2
vendor/github.com/coreos/pkg/LICENSE → vendor/github.com/container-storage-interface/spec/LICENSE

@@ -1,4 +1,4 @@
-Apache License
+                                 Apache License
                            Version 2.0, January 2004
                         http://www.apache.org/licenses/
 
@@ -199,4 +199,3 @@ Apache License
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
-

+ 6280 - 0
vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go

@@ -0,0 +1,6280 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/container-storage-interface/spec/csi.proto
+
+package csi
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	timestamp "github.com/golang/protobuf/ptypes/timestamp"
+	wrappers "github.com/golang/protobuf/ptypes/wrappers"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type PluginCapability_Service_Type int32
+
+const (
+	PluginCapability_Service_UNKNOWN PluginCapability_Service_Type = 0
+	// CONTROLLER_SERVICE indicates that the Plugin provides RPCs for
+	// the ControllerService. Plugins SHOULD provide this capability.
+	// In rare cases certain plugins MAY wish to omit the
+	// ControllerService entirely from their implementation, but such
+	// SHOULD NOT be the common case.
+	// The presence of this capability determines whether the CO will
+	// attempt to invoke the REQUIRED ControllerService RPCs, as well
+	// as specific RPCs as indicated by ControllerGetCapabilities.
+	PluginCapability_Service_CONTROLLER_SERVICE PluginCapability_Service_Type = 1
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for
+	// this plugin MAY NOT be equally accessible by all nodes in the
+	// cluster. The CO MUST use the topology information returned by
+	// CreateVolumeRequest along with the topology information
+	// returned by NodeGetInfo to ensure that a given volume is
+	// accessible from a given node when scheduling workloads.
+	PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS PluginCapability_Service_Type = 2
+)
+
+var PluginCapability_Service_Type_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "CONTROLLER_SERVICE",
+	2: "VOLUME_ACCESSIBILITY_CONSTRAINTS",
+}
+
+var PluginCapability_Service_Type_value = map[string]int32{
+	"UNKNOWN":                          0,
+	"CONTROLLER_SERVICE":               1,
+	"VOLUME_ACCESSIBILITY_CONSTRAINTS": 2,
+}
+
+func (x PluginCapability_Service_Type) String() string {
+	return proto.EnumName(PluginCapability_Service_Type_name, int32(x))
+}
+
+func (PluginCapability_Service_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{4, 0, 0}
+}
+
+type PluginCapability_VolumeExpansion_Type int32
+
+const (
+	PluginCapability_VolumeExpansion_UNKNOWN PluginCapability_VolumeExpansion_Type = 0
+	// ONLINE indicates that volumes may be expanded when published to
+	// a node. When a Plugin implements this capability it MUST
+	// implement either the EXPAND_VOLUME controller capability or the
+	// EXPAND_VOLUME node capability or both. When a plugin supports
+	// ONLINE volume expansion and also has the EXPAND_VOLUME
+	// controller capability then the plugin MUST support expansion of
+	// volumes currently published and available on a node. When a
+	// plugin supports ONLINE volume expansion and also has the
+	// EXPAND_VOLUME node capability then the plugin MAY support
+	// expansion of node-published volume via NodeExpandVolume.
+	//
+	// Example 1: Given a shared filesystem volume (e.g. GlusterFs),
+	//   the Plugin may set the ONLINE volume expansion capability and
+	//   implement ControllerExpandVolume but not NodeExpandVolume.
+	//
+	// Example 2: Given a block storage volume type (e.g. EBS), the
+	//   Plugin may set the ONLINE volume expansion capability and
+	//   implement both ControllerExpandVolume and NodeExpandVolume.
+	//
+	// Example 3: Given a Plugin that supports volume expansion only
+	//   upon a node, the Plugin may set the ONLINE volume
+	//   expansion capability and implement NodeExpandVolume but not
+	//   ControllerExpandVolume.
+	PluginCapability_VolumeExpansion_ONLINE PluginCapability_VolumeExpansion_Type = 1
+	// OFFLINE indicates that volumes currently published and
+	// available on a node SHALL NOT be expanded via
+	// ControllerExpandVolume. When a plugin supports OFFLINE volume
+	// expansion it MUST implement either the EXPAND_VOLUME controller
+	// capability or both the EXPAND_VOLUME controller capability and
+	// the EXPAND_VOLUME node capability.
+	//
+	// Example 1: Given a block storage volume type (e.g. Azure Disk)
+	//   that does not support expansion of "node-attached" (i.e.
+	//   controller-published) volumes, the Plugin may indicate
+	//   OFFLINE volume expansion support and implement both
+	//   ControllerExpandVolume and NodeExpandVolume.
+	PluginCapability_VolumeExpansion_OFFLINE PluginCapability_VolumeExpansion_Type = 2
+)
+
+var PluginCapability_VolumeExpansion_Type_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "ONLINE",
+	2: "OFFLINE",
+}
+
+var PluginCapability_VolumeExpansion_Type_value = map[string]int32{
+	"UNKNOWN": 0,
+	"ONLINE":  1,
+	"OFFLINE": 2,
+}
+
+func (x PluginCapability_VolumeExpansion_Type) String() string {
+	return proto.EnumName(PluginCapability_VolumeExpansion_Type_name, int32(x))
+}
+
+func (PluginCapability_VolumeExpansion_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{4, 1, 0}
+}
+
+type VolumeCapability_AccessMode_Mode int32
+
+const (
+	VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0
+	// Can only be published once as read/write on a single node, at
+	// any given time.
+	VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1
+	// Can only be published once as readonly on a single node, at
+	// any given time.
+	VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2
+	// Can be published as readonly at multiple nodes simultaneously.
+	VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3
+	// Can be published at multiple nodes simultaneously. Only one of
+	// the node can be used as read/write. The rest will be readonly.
+	VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4
+	// Can be published as read/write at multiple nodes
+	// simultaneously.
+	VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5
+	// Can only be published once as read/write at a single workload
+	// on a single node, at any given time. SHOULD be used instead of
+	// SINGLE_NODE_WRITER for COs using the experimental
+	// SINGLE_NODE_MULTI_WRITER capability.
+	VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 6
+	// Can be published as read/write at multiple workloads on a
+	// single node simultaneously. SHOULD be used instead of
+	// SINGLE_NODE_WRITER for COs using the experimental
+	// SINGLE_NODE_MULTI_WRITER capability.
+	VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 7
+)
+
+var VolumeCapability_AccessMode_Mode_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "SINGLE_NODE_WRITER",
+	2: "SINGLE_NODE_READER_ONLY",
+	3: "MULTI_NODE_READER_ONLY",
+	4: "MULTI_NODE_SINGLE_WRITER",
+	5: "MULTI_NODE_MULTI_WRITER",
+	6: "SINGLE_NODE_SINGLE_WRITER",
+	7: "SINGLE_NODE_MULTI_WRITER",
+}
+
+var VolumeCapability_AccessMode_Mode_value = map[string]int32{
+	"UNKNOWN":                   0,
+	"SINGLE_NODE_WRITER":        1,
+	"SINGLE_NODE_READER_ONLY":   2,
+	"MULTI_NODE_READER_ONLY":    3,
+	"MULTI_NODE_SINGLE_WRITER":  4,
+	"MULTI_NODE_MULTI_WRITER":   5,
+	"SINGLE_NODE_SINGLE_WRITER": 6,
+	"SINGLE_NODE_MULTI_WRITER":  7,
+}
+
+func (x VolumeCapability_AccessMode_Mode) String() string {
+	return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x))
+}
+
+func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{10, 2, 0}
+}
+
+type ControllerServiceCapability_RPC_Type int32
+
+const (
+	ControllerServiceCapability_RPC_UNKNOWN                  ControllerServiceCapability_RPC_Type = 0
+	ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME     ControllerServiceCapability_RPC_Type = 1
+	ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2
+	ControllerServiceCapability_RPC_LIST_VOLUMES             ControllerServiceCapability_RPC_Type = 3
+	ControllerServiceCapability_RPC_GET_CAPACITY             ControllerServiceCapability_RPC_Type = 4
+	// Currently the only way to consume a snapshot is to create
+	// a volume from it. Therefore plugins supporting
+	// CREATE_DELETE_SNAPSHOT MUST support creating volume from
+	// snapshot.
+	ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT ControllerServiceCapability_RPC_Type = 5
+	ControllerServiceCapability_RPC_LIST_SNAPSHOTS         ControllerServiceCapability_RPC_Type = 6
+	// Plugins supporting volume cloning at the storage level MAY
+	// report this capability. The source volume MUST be managed by
+	// the same plugin. Not all volume sources and parameters
+	// combinations MAY work.
+	ControllerServiceCapability_RPC_CLONE_VOLUME ControllerServiceCapability_RPC_Type = 7
+	// Indicates the SP supports ControllerPublishVolume.readonly
+	// field.
+	ControllerServiceCapability_RPC_PUBLISH_READONLY ControllerServiceCapability_RPC_Type = 8
+	// See VolumeExpansion for details.
+	ControllerServiceCapability_RPC_EXPAND_VOLUME ControllerServiceCapability_RPC_Type = 9
+	// Indicates the SP supports the
+	// ListVolumesResponse.entry.published_nodes field
+	ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES ControllerServiceCapability_RPC_Type = 10
+	// Indicates that the Controller service can report volume
+	// conditions.
+	// An SP MAY implement `VolumeCondition` in only the Controller
+	// Plugin, only the Node Plugin, or both.
+	// If `VolumeCondition` is implemented in both the Controller and
+	// Node Plugins, it SHALL report from different perspectives.
+	// If for some reason Controller and Node Plugins report
+	// misaligned volume conditions, CO SHALL assume the worst case
+	// is the truth.
+	// Note that, for alpha, `VolumeCondition` is intended be
+	// informative for humans only, not for automation.
+	ControllerServiceCapability_RPC_VOLUME_CONDITION ControllerServiceCapability_RPC_Type = 11
+	// Indicates the SP supports the ControllerGetVolume RPC.
+	// This enables COs to, for example, fetch per volume
+	// condition after a volume is provisioned.
+	ControllerServiceCapability_RPC_GET_VOLUME ControllerServiceCapability_RPC_Type = 12
+	// Indicates the SP supports the SINGLE_NODE_SINGLE_WRITER and/or
+	// SINGLE_NODE_MULTI_WRITER access modes.
+	// These access modes are intended to replace the
+	// SINGLE_NODE_WRITER access mode to clarify the number of writers
+	// for a volume on a single node. Plugins MUST accept and allow
+	// use of the SINGLE_NODE_WRITER access mode when either
+	// SINGLE_NODE_SINGLE_WRITER and/or SINGLE_NODE_MULTI_WRITER are
+	// supported, in order to permit older COs to continue working.
+	ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER ControllerServiceCapability_RPC_Type = 13
+)
+
+var ControllerServiceCapability_RPC_Type_name = map[int32]string{
+	0:  "UNKNOWN",
+	1:  "CREATE_DELETE_VOLUME",
+	2:  "PUBLISH_UNPUBLISH_VOLUME",
+	3:  "LIST_VOLUMES",
+	4:  "GET_CAPACITY",
+	5:  "CREATE_DELETE_SNAPSHOT",
+	6:  "LIST_SNAPSHOTS",
+	7:  "CLONE_VOLUME",
+	8:  "PUBLISH_READONLY",
+	9:  "EXPAND_VOLUME",
+	10: "LIST_VOLUMES_PUBLISHED_NODES",
+	11: "VOLUME_CONDITION",
+	12: "GET_VOLUME",
+	13: "SINGLE_NODE_MULTI_WRITER",
+}
+
+var ControllerServiceCapability_RPC_Type_value = map[string]int32{
+	"UNKNOWN":                      0,
+	"CREATE_DELETE_VOLUME":         1,
+	"PUBLISH_UNPUBLISH_VOLUME":     2,
+	"LIST_VOLUMES":                 3,
+	"GET_CAPACITY":                 4,
+	"CREATE_DELETE_SNAPSHOT":       5,
+	"LIST_SNAPSHOTS":               6,
+	"CLONE_VOLUME":                 7,
+	"PUBLISH_READONLY":             8,
+	"EXPAND_VOLUME":                9,
+	"LIST_VOLUMES_PUBLISHED_NODES": 10,
+	"VOLUME_CONDITION":             11,
+	"GET_VOLUME":                   12,
+	"SINGLE_NODE_MULTI_WRITER":     13,
+}
+
+func (x ControllerServiceCapability_RPC_Type) String() string {
+	return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x))
+}
+
+func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{31, 0, 0}
+}
+
+type VolumeUsage_Unit int32
+
+const (
+	VolumeUsage_UNKNOWN VolumeUsage_Unit = 0
+	VolumeUsage_BYTES   VolumeUsage_Unit = 1
+	VolumeUsage_INODES  VolumeUsage_Unit = 2
+)
+
+var VolumeUsage_Unit_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "BYTES",
+	2: "INODES",
+}
+
+var VolumeUsage_Unit_value = map[string]int32{
+	"UNKNOWN": 0,
+	"BYTES":   1,
+	"INODES":  2,
+}
+
+func (x VolumeUsage_Unit) String() string {
+	return proto.EnumName(VolumeUsage_Unit_name, int32(x))
+}
+
+func (VolumeUsage_Unit) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{51, 0}
+}
+
+type NodeServiceCapability_RPC_Type int32
+
+const (
+	NodeServiceCapability_RPC_UNKNOWN              NodeServiceCapability_RPC_Type = 0
+	NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME NodeServiceCapability_RPC_Type = 1
+	// If Plugin implements GET_VOLUME_STATS capability
+	// then it MUST implement NodeGetVolumeStats RPC
+	// call for fetching volume statistics.
+	NodeServiceCapability_RPC_GET_VOLUME_STATS NodeServiceCapability_RPC_Type = 2
+	// See VolumeExpansion for details.
+	NodeServiceCapability_RPC_EXPAND_VOLUME NodeServiceCapability_RPC_Type = 3
+	// Indicates that the Node service can report volume conditions.
+	// An SP MAY implement `VolumeCondition` in only the Node
+	// Plugin, only the Controller Plugin, or both.
+	// If `VolumeCondition` is implemented in both the Node and
+	// Controller Plugins, it SHALL report from different
+	// perspectives.
+	// If for some reason Node and Controller Plugins report
+	// misaligned volume conditions, CO SHALL assume the worst case
+	// is the truth.
+	// Note that, for alpha, `VolumeCondition` is intended to be
+	// informative for humans only, not for automation.
+	NodeServiceCapability_RPC_VOLUME_CONDITION NodeServiceCapability_RPC_Type = 4
+	// Indicates the SP supports the SINGLE_NODE_SINGLE_WRITER and/or
+	// SINGLE_NODE_MULTI_WRITER access modes.
+	// These access modes are intended to replace the
+	// SINGLE_NODE_WRITER access mode to clarify the number of writers
+	// for a volume on a single node. Plugins MUST accept and allow
+	// use of the SINGLE_NODE_WRITER access mode (subject to the
+	// processing rules for NodePublishVolume), when either
+	// SINGLE_NODE_SINGLE_WRITER and/or SINGLE_NODE_MULTI_WRITER are
+	// supported, in order to permit older COs to continue working.
+	NodeServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER NodeServiceCapability_RPC_Type = 5
+	// Indicates that Node service supports mounting volumes
+	// with provided volume group identifier during node stage
+	// or node publish RPC calls.
+	NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP NodeServiceCapability_RPC_Type = 6
+)
+
+var NodeServiceCapability_RPC_Type_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "STAGE_UNSTAGE_VOLUME",
+	2: "GET_VOLUME_STATS",
+	3: "EXPAND_VOLUME",
+	4: "VOLUME_CONDITION",
+	5: "SINGLE_NODE_MULTI_WRITER",
+	6: "VOLUME_MOUNT_GROUP",
+}
+
+var NodeServiceCapability_RPC_Type_value = map[string]int32{
+	"UNKNOWN":                  0,
+	"STAGE_UNSTAGE_VOLUME":     1,
+	"GET_VOLUME_STATS":         2,
+	"EXPAND_VOLUME":            3,
+	"VOLUME_CONDITION":         4,
+	"SINGLE_NODE_MULTI_WRITER": 5,
+	"VOLUME_MOUNT_GROUP":       6,
+}
+
+func (x NodeServiceCapability_RPC_Type) String() string {
+	return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x))
+}
+
+func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{55, 0, 0}
+}
+
+type GetPluginInfoRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GetPluginInfoRequest) Reset()         { *m = GetPluginInfoRequest{} }
+func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPluginInfoRequest) ProtoMessage()    {}
+func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{0}
+}
+
+func (m *GetPluginInfoRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetPluginInfoRequest.Unmarshal(m, b)
+}
+func (m *GetPluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetPluginInfoRequest.Marshal(b, m, deterministic)
+}
+func (m *GetPluginInfoRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetPluginInfoRequest.Merge(m, src)
+}
+func (m *GetPluginInfoRequest) XXX_Size() int {
+	return xxx_messageInfo_GetPluginInfoRequest.Size(m)
+}
+func (m *GetPluginInfoRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetPluginInfoRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetPluginInfoRequest proto.InternalMessageInfo
+
+type GetPluginInfoResponse struct {
+	// The name MUST follow domain name notation format
+	// (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD
+	// include the plugin's host company name and the plugin name,
+	// to minimize the possibility of collisions. It MUST be 63
+	// characters or less, beginning and ending with an alphanumeric
+	// character ([a-z0-9A-Z]) with dashes (-), dots (.), and
+	// alphanumerics between. This field is REQUIRED.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// This field is REQUIRED. Value of this field is opaque to the CO.
+	VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion,proto3" json:"vendor_version,omitempty"`
+	// This field is OPTIONAL. Values are opaque to the CO.
+	Manifest             map[string]string `protobuf:"bytes,3,rep,name=manifest,proto3" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *GetPluginInfoResponse) Reset()         { *m = GetPluginInfoResponse{} }
+func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) }
+func (*GetPluginInfoResponse) ProtoMessage()    {}
+func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{1}
+}
+
+func (m *GetPluginInfoResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetPluginInfoResponse.Unmarshal(m, b)
+}
+func (m *GetPluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetPluginInfoResponse.Marshal(b, m, deterministic)
+}
+func (m *GetPluginInfoResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetPluginInfoResponse.Merge(m, src)
+}
+func (m *GetPluginInfoResponse) XXX_Size() int {
+	return xxx_messageInfo_GetPluginInfoResponse.Size(m)
+}
+func (m *GetPluginInfoResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetPluginInfoResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetPluginInfoResponse proto.InternalMessageInfo
+
+func (m *GetPluginInfoResponse) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *GetPluginInfoResponse) GetVendorVersion() string {
+	if m != nil {
+		return m.VendorVersion
+	}
+	return ""
+}
+
+func (m *GetPluginInfoResponse) GetManifest() map[string]string {
+	if m != nil {
+		return m.Manifest
+	}
+	return nil
+}
+
+type GetPluginCapabilitiesRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GetPluginCapabilitiesRequest) Reset()         { *m = GetPluginCapabilitiesRequest{} }
+func (m *GetPluginCapabilitiesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPluginCapabilitiesRequest) ProtoMessage()    {}
+func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{2}
+}
+
+func (m *GetPluginCapabilitiesRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetPluginCapabilitiesRequest.Unmarshal(m, b)
+}
+func (m *GetPluginCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetPluginCapabilitiesRequest.Marshal(b, m, deterministic)
+}
+func (m *GetPluginCapabilitiesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetPluginCapabilitiesRequest.Merge(m, src)
+}
+func (m *GetPluginCapabilitiesRequest) XXX_Size() int {
+	return xxx_messageInfo_GetPluginCapabilitiesRequest.Size(m)
+}
+func (m *GetPluginCapabilitiesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetPluginCapabilitiesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetPluginCapabilitiesRequest proto.InternalMessageInfo
+
+type GetPluginCapabilitiesResponse struct {
+	// All the capabilities that the controller service supports. This
+	// field is OPTIONAL.
+	Capabilities         []*PluginCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *GetPluginCapabilitiesResponse) Reset()         { *m = GetPluginCapabilitiesResponse{} }
+func (m *GetPluginCapabilitiesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetPluginCapabilitiesResponse) ProtoMessage()    {}
+func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{3}
+}
+
+func (m *GetPluginCapabilitiesResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetPluginCapabilitiesResponse.Unmarshal(m, b)
+}
+func (m *GetPluginCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetPluginCapabilitiesResponse.Marshal(b, m, deterministic)
+}
+func (m *GetPluginCapabilitiesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetPluginCapabilitiesResponse.Merge(m, src)
+}
+func (m *GetPluginCapabilitiesResponse) XXX_Size() int {
+	return xxx_messageInfo_GetPluginCapabilitiesResponse.Size(m)
+}
+func (m *GetPluginCapabilitiesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetPluginCapabilitiesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetPluginCapabilitiesResponse proto.InternalMessageInfo
+
+func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability {
+	if m != nil {
+		return m.Capabilities
+	}
+	return nil
+}
+
+// Specifies a capability of the plugin.
+type PluginCapability struct {
+	// Types that are valid to be assigned to Type:
+	//	*PluginCapability_Service_
+	//	*PluginCapability_VolumeExpansion_
+	Type                 isPluginCapability_Type `protobuf_oneof:"type"`
+	XXX_NoUnkeyedLiteral struct{}                `json:"-"`
+	XXX_unrecognized     []byte                  `json:"-"`
+	XXX_sizecache        int32                   `json:"-"`
+}
+
+func (m *PluginCapability) Reset()         { *m = PluginCapability{} }
+func (m *PluginCapability) String() string { return proto.CompactTextString(m) }
+func (*PluginCapability) ProtoMessage()    {}
+func (*PluginCapability) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{4}
+}
+
+func (m *PluginCapability) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PluginCapability.Unmarshal(m, b)
+}
+func (m *PluginCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PluginCapability.Marshal(b, m, deterministic)
+}
+func (m *PluginCapability) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PluginCapability.Merge(m, src)
+}
+func (m *PluginCapability) XXX_Size() int {
+	return xxx_messageInfo_PluginCapability.Size(m)
+}
+func (m *PluginCapability) XXX_DiscardUnknown() {
+	xxx_messageInfo_PluginCapability.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PluginCapability proto.InternalMessageInfo
+
+type isPluginCapability_Type interface {
+	isPluginCapability_Type()
+}
+
+type PluginCapability_Service_ struct {
+	Service *PluginCapability_Service `protobuf:"bytes,1,opt,name=service,proto3,oneof"`
+}
+
+type PluginCapability_VolumeExpansion_ struct {
+	VolumeExpansion *PluginCapability_VolumeExpansion `protobuf:"bytes,2,opt,name=volume_expansion,json=volumeExpansion,proto3,oneof"`
+}
+
+func (*PluginCapability_Service_) isPluginCapability_Type() {}
+
+func (*PluginCapability_VolumeExpansion_) isPluginCapability_Type() {}
+
+func (m *PluginCapability) GetType() isPluginCapability_Type {
+	if m != nil {
+		return m.Type
+	}
+	return nil
+}
+
+func (m *PluginCapability) GetService() *PluginCapability_Service {
+	if x, ok := m.GetType().(*PluginCapability_Service_); ok {
+		return x.Service
+	}
+	return nil
+}
+
+func (m *PluginCapability) GetVolumeExpansion() *PluginCapability_VolumeExpansion {
+	if x, ok := m.GetType().(*PluginCapability_VolumeExpansion_); ok {
+		return x.VolumeExpansion
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*PluginCapability) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*PluginCapability_Service_)(nil),
+		(*PluginCapability_VolumeExpansion_)(nil),
+	}
+}
+
+type PluginCapability_Service struct {
+	Type                 PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_Service_Type" json:"type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
+	XXX_unrecognized     []byte                        `json:"-"`
+	XXX_sizecache        int32                         `json:"-"`
+}
+
+func (m *PluginCapability_Service) Reset()         { *m = PluginCapability_Service{} }
+func (m *PluginCapability_Service) String() string { return proto.CompactTextString(m) }
+func (*PluginCapability_Service) ProtoMessage()    {}
+func (*PluginCapability_Service) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{4, 0}
+}
+
+func (m *PluginCapability_Service) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PluginCapability_Service.Unmarshal(m, b)
+}
+func (m *PluginCapability_Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PluginCapability_Service.Marshal(b, m, deterministic)
+}
+func (m *PluginCapability_Service) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PluginCapability_Service.Merge(m, src)
+}
+func (m *PluginCapability_Service) XXX_Size() int {
+	return xxx_messageInfo_PluginCapability_Service.Size(m)
+}
+func (m *PluginCapability_Service) XXX_DiscardUnknown() {
+	xxx_messageInfo_PluginCapability_Service.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PluginCapability_Service proto.InternalMessageInfo
+
+func (m *PluginCapability_Service) GetType() PluginCapability_Service_Type {
+	if m != nil {
+		return m.Type
+	}
+	return PluginCapability_Service_UNKNOWN
+}
+
+type PluginCapability_VolumeExpansion struct {
+	Type                 PluginCapability_VolumeExpansion_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_VolumeExpansion_Type" json:"type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                              `json:"-"`
+	XXX_unrecognized     []byte                                `json:"-"`
+	XXX_sizecache        int32                                 `json:"-"`
+}
+
+func (m *PluginCapability_VolumeExpansion) Reset()         { *m = PluginCapability_VolumeExpansion{} }
+func (m *PluginCapability_VolumeExpansion) String() string { return proto.CompactTextString(m) }
+func (*PluginCapability_VolumeExpansion) ProtoMessage()    {}
+func (*PluginCapability_VolumeExpansion) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{4, 1}
+}
+
+func (m *PluginCapability_VolumeExpansion) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PluginCapability_VolumeExpansion.Unmarshal(m, b)
+}
+func (m *PluginCapability_VolumeExpansion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PluginCapability_VolumeExpansion.Marshal(b, m, deterministic)
+}
+func (m *PluginCapability_VolumeExpansion) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PluginCapability_VolumeExpansion.Merge(m, src)
+}
+func (m *PluginCapability_VolumeExpansion) XXX_Size() int {
+	return xxx_messageInfo_PluginCapability_VolumeExpansion.Size(m)
+}
+func (m *PluginCapability_VolumeExpansion) XXX_DiscardUnknown() {
+	xxx_messageInfo_PluginCapability_VolumeExpansion.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PluginCapability_VolumeExpansion proto.InternalMessageInfo
+
+func (m *PluginCapability_VolumeExpansion) GetType() PluginCapability_VolumeExpansion_Type {
+	if m != nil {
+		return m.Type
+	}
+	return PluginCapability_VolumeExpansion_UNKNOWN
+}
+
+type ProbeRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ProbeRequest) Reset()         { *m = ProbeRequest{} }
+func (m *ProbeRequest) String() string { return proto.CompactTextString(m) }
+func (*ProbeRequest) ProtoMessage()    {}
+func (*ProbeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{5}
+}
+
+func (m *ProbeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ProbeRequest.Unmarshal(m, b)
+}
+func (m *ProbeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ProbeRequest.Marshal(b, m, deterministic)
+}
+func (m *ProbeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ProbeRequest.Merge(m, src)
+}
+func (m *ProbeRequest) XXX_Size() int {
+	return xxx_messageInfo_ProbeRequest.Size(m)
+}
+func (m *ProbeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ProbeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProbeRequest proto.InternalMessageInfo
+
+type ProbeResponse struct {
+	// Readiness allows a plugin to report its initialization status back
+	// to the CO. Initialization for some plugins MAY be time consuming
+	// and it is important for a CO to distinguish between the following
+	// cases:
+	//
+	// 1) The plugin is in an unhealthy state and MAY need restarting. In
+	//    this case a gRPC error code SHALL be returned.
+	// 2) The plugin is still initializing, but is otherwise perfectly
+	//    healthy. In this case a successful response SHALL be returned
+	//    with a readiness value of `false`. Calls to the plugin's
+	//    Controller and/or Node services MAY fail due to an incomplete
+	//    initialization state.
+	// 3) The plugin has finished initializing and is ready to service
+	//    calls to its Controller and/or Node services. A successful
+	//    response is returned with a readiness value of `true`.
+	//
+	// This field is OPTIONAL. If not present, the caller SHALL assume
+	// that the plugin is in a ready state and is accepting calls to its
+	// Controller and/or Node services (according to the plugin's reported
+	// capabilities).
+	Ready                *wrappers.BoolValue `protobuf:"bytes,1,opt,name=ready,proto3" json:"ready,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *ProbeResponse) Reset()         { *m = ProbeResponse{} }
+func (m *ProbeResponse) String() string { return proto.CompactTextString(m) }
+func (*ProbeResponse) ProtoMessage()    {}
+func (*ProbeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{6}
+}
+
+func (m *ProbeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ProbeResponse.Unmarshal(m, b)
+}
+func (m *ProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ProbeResponse.Marshal(b, m, deterministic)
+}
+func (m *ProbeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ProbeResponse.Merge(m, src)
+}
+func (m *ProbeResponse) XXX_Size() int {
+	return xxx_messageInfo_ProbeResponse.Size(m)
+}
+func (m *ProbeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ProbeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProbeResponse proto.InternalMessageInfo
+
+func (m *ProbeResponse) GetReady() *wrappers.BoolValue {
+	if m != nil {
+		return m.Ready
+	}
+	return nil
+}
+
+type CreateVolumeRequest struct {
+	// The suggested name for the storage space. This field is REQUIRED.
+	// It serves two purposes:
+	// 1) Idempotency - This name is generated by the CO to achieve
+	//    idempotency.  The Plugin SHOULD ensure that multiple
+	//    `CreateVolume` calls for the same name do not result in more
+	//    than one piece of storage provisioned corresponding to that
+	//    name. If a Plugin is unable to enforce idempotency, the CO's
+	//    error recovery logic could result in multiple (unused) volumes
+	//    being provisioned.
+	//    In the case of error, the CO MUST handle the gRPC error codes
+	//    per the recovery behavior defined in the "CreateVolume Errors"
+	//    section below.
+	//    The CO is responsible for cleaning up volumes it provisioned
+	//    that it no longer needs. If the CO is uncertain whether a volume
+	//    was provisioned or not when a `CreateVolume` call fails, the CO
+	//    MAY call `CreateVolume` again, with the same name, to ensure the
+	//    volume exists and to retrieve the volume's `volume_id` (unless
+	//    otherwise prohibited by "CreateVolume Errors").
+	// 2) Suggested name - Some storage systems allow callers to specify
+	//    an identifier by which to refer to the newly provisioned
+	//    storage. If a storage system supports this, it can optionally
+	//    use this name as the identifier for the new volume.
+	// Any Unicode string that conforms to the length limit is allowed
+	// except those containing the following banned characters:
+	// U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F.
+	// (These are control characters other than commonly used whitespace.)
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// This field is OPTIONAL. This allows the CO to specify the capacity
+	// requirement of the volume to be provisioned. If not specified, the
+	// Plugin MAY choose an implementation-defined capacity range. If
+	// specified it MUST always be honored, even when creating volumes
+	// from a source; which MAY force some backends to internally extend
+	// the volume after creating it.
+	CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"`
+	// The capabilities that the provisioned volume MUST have. SP MUST
+	// provision a volume that will satisfy ALL of the capabilities
+	// specified in this list. Otherwise SP MUST return the appropriate
+	// gRPC error code.
+	// The Plugin MUST assume that the CO MAY use the provisioned volume
+	// with ANY of the capabilities specified in this list.
+	// For example, a CO MAY specify two volume capabilities: one with
+	// access mode SINGLE_NODE_WRITER and another with access mode
+	// MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the
+	// provisioned volume can be used in either mode.
+	// This also enables the CO to do early validation: If ANY of the
+	// specified volume capabilities are not supported by the SP, the call
+	// MUST return the appropriate gRPC error code.
+	// This field is REQUIRED.
+	VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"`
+	// Plugin specific parameters passed in as opaque key-value pairs.
+	// This field is OPTIONAL. The Plugin is responsible for parsing and
+	// validating these parameters. COs will treat these as opaque.
+	Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Secrets required by plugin to complete volume creation request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// If specified, the new volume will be pre-populated with data from
+	// this source. This field is OPTIONAL.
+	VolumeContentSource *VolumeContentSource `protobuf:"bytes,6,opt,name=volume_content_source,json=volumeContentSource,proto3" json:"volume_content_source,omitempty"`
+	// Specifies where (regions, zones, racks, etc.) the provisioned
+	// volume MUST be accessible from.
+	// An SP SHALL advertise the requirements for topological
+	// accessibility information in documentation. COs SHALL only specify
+	// topological accessibility information supported by the SP.
+	// This field is OPTIONAL.
+	// This field SHALL NOT be specified unless the SP has the
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability.
+	// If this field is not specified and the SP has the
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY
+	// choose where the provisioned volume is accessible from.
+	AccessibilityRequirements *TopologyRequirement `protobuf:"bytes,7,opt,name=accessibility_requirements,json=accessibilityRequirements,proto3" json:"accessibility_requirements,omitempty"`
+	XXX_NoUnkeyedLiteral      struct{}             `json:"-"`
+	XXX_unrecognized          []byte               `json:"-"`
+	XXX_sizecache             int32                `json:"-"`
+}
+
+func (m *CreateVolumeRequest) Reset()         { *m = CreateVolumeRequest{} }
+func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateVolumeRequest) ProtoMessage()    {}
+func (*CreateVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{7}
+}
+
+func (m *CreateVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CreateVolumeRequest.Unmarshal(m, b)
+}
+func (m *CreateVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CreateVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateVolumeRequest.Merge(m, src)
+}
+func (m *CreateVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_CreateVolumeRequest.Size(m)
+}
+func (m *CreateVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateVolumeRequest proto.InternalMessageInfo
+
+func (m *CreateVolumeRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange {
+	if m != nil {
+		return m.CapacityRange
+	}
+	return nil
+}
+
+func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability {
+	if m != nil {
+		return m.VolumeCapabilities
+	}
+	return nil
+}
+
+func (m *CreateVolumeRequest) GetParameters() map[string]string {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+func (m *CreateVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+func (m *CreateVolumeRequest) GetVolumeContentSource() *VolumeContentSource {
+	if m != nil {
+		return m.VolumeContentSource
+	}
+	return nil
+}
+
+func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequirement {
+	if m != nil {
+		return m.AccessibilityRequirements
+	}
+	return nil
+}
+
+// Specifies what source the volume will be created from. One of the
+// type fields MUST be specified.
+type VolumeContentSource struct {
+	// Types that are valid to be assigned to Type:
+	//	*VolumeContentSource_Snapshot
+	//	*VolumeContentSource_Volume
+	Type                 isVolumeContentSource_Type `protobuf_oneof:"type"`
+	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
+	XXX_unrecognized     []byte                     `json:"-"`
+	XXX_sizecache        int32                      `json:"-"`
+}
+
+func (m *VolumeContentSource) Reset()         { *m = VolumeContentSource{} }
+func (m *VolumeContentSource) String() string { return proto.CompactTextString(m) }
+func (*VolumeContentSource) ProtoMessage()    {}
+func (*VolumeContentSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{8}
+}
+
+func (m *VolumeContentSource) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeContentSource.Unmarshal(m, b)
+}
+func (m *VolumeContentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeContentSource.Marshal(b, m, deterministic)
+}
+func (m *VolumeContentSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeContentSource.Merge(m, src)
+}
+func (m *VolumeContentSource) XXX_Size() int {
+	return xxx_messageInfo_VolumeContentSource.Size(m)
+}
+func (m *VolumeContentSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeContentSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeContentSource proto.InternalMessageInfo
+
+type isVolumeContentSource_Type interface {
+	isVolumeContentSource_Type()
+}
+
+type VolumeContentSource_Snapshot struct {
+	Snapshot *VolumeContentSource_SnapshotSource `protobuf:"bytes,1,opt,name=snapshot,proto3,oneof"`
+}
+
+type VolumeContentSource_Volume struct {
+	Volume *VolumeContentSource_VolumeSource `protobuf:"bytes,2,opt,name=volume,proto3,oneof"`
+}
+
+func (*VolumeContentSource_Snapshot) isVolumeContentSource_Type() {}
+
+func (*VolumeContentSource_Volume) isVolumeContentSource_Type() {}
+
+func (m *VolumeContentSource) GetType() isVolumeContentSource_Type {
+	if m != nil {
+		return m.Type
+	}
+	return nil
+}
+
+func (m *VolumeContentSource) GetSnapshot() *VolumeContentSource_SnapshotSource {
+	if x, ok := m.GetType().(*VolumeContentSource_Snapshot); ok {
+		return x.Snapshot
+	}
+	return nil
+}
+
+func (m *VolumeContentSource) GetVolume() *VolumeContentSource_VolumeSource {
+	if x, ok := m.GetType().(*VolumeContentSource_Volume); ok {
+		return x.Volume
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*VolumeContentSource) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*VolumeContentSource_Snapshot)(nil),
+		(*VolumeContentSource_Volume)(nil),
+	}
+}
+
+type VolumeContentSource_SnapshotSource struct {
+	// Contains identity information for the existing source snapshot.
+	// This field is REQUIRED. Plugin is REQUIRED to support creating
+	// volume from snapshot if it supports the capability
+	// CREATE_DELETE_SNAPSHOT.
+	SnapshotId           string   `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *VolumeContentSource_SnapshotSource) Reset()         { *m = VolumeContentSource_SnapshotSource{} }
+func (m *VolumeContentSource_SnapshotSource) String() string { return proto.CompactTextString(m) }
+func (*VolumeContentSource_SnapshotSource) ProtoMessage()    {}
+func (*VolumeContentSource_SnapshotSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{8, 0}
+}
+
+func (m *VolumeContentSource_SnapshotSource) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeContentSource_SnapshotSource.Unmarshal(m, b)
+}
+func (m *VolumeContentSource_SnapshotSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeContentSource_SnapshotSource.Marshal(b, m, deterministic)
+}
+func (m *VolumeContentSource_SnapshotSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeContentSource_SnapshotSource.Merge(m, src)
+}
+func (m *VolumeContentSource_SnapshotSource) XXX_Size() int {
+	return xxx_messageInfo_VolumeContentSource_SnapshotSource.Size(m)
+}
+func (m *VolumeContentSource_SnapshotSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeContentSource_SnapshotSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeContentSource_SnapshotSource proto.InternalMessageInfo
+
+func (m *VolumeContentSource_SnapshotSource) GetSnapshotId() string {
+	if m != nil {
+		return m.SnapshotId
+	}
+	return ""
+}
+
+type VolumeContentSource_VolumeSource struct {
+	// Contains identity information for the existing source volume.
+	// This field is REQUIRED. Plugins reporting CLONE_VOLUME
+	// capability MUST support creating a volume from another volume.
+	VolumeId             string   `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *VolumeContentSource_VolumeSource) Reset()         { *m = VolumeContentSource_VolumeSource{} }
+func (m *VolumeContentSource_VolumeSource) String() string { return proto.CompactTextString(m) }
+func (*VolumeContentSource_VolumeSource) ProtoMessage()    {}
+func (*VolumeContentSource_VolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{8, 1}
+}
+
+func (m *VolumeContentSource_VolumeSource) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeContentSource_VolumeSource.Unmarshal(m, b)
+}
+func (m *VolumeContentSource_VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeContentSource_VolumeSource.Marshal(b, m, deterministic)
+}
+func (m *VolumeContentSource_VolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeContentSource_VolumeSource.Merge(m, src)
+}
+func (m *VolumeContentSource_VolumeSource) XXX_Size() int {
+	return xxx_messageInfo_VolumeContentSource_VolumeSource.Size(m)
+}
+func (m *VolumeContentSource_VolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeContentSource_VolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeContentSource_VolumeSource proto.InternalMessageInfo
+
+func (m *VolumeContentSource_VolumeSource) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+type CreateVolumeResponse struct {
+	// Contains all attributes of the newly created volume that are
+	// relevant to the CO along with information required by the Plugin
+	// to uniquely identify the volume. This field is REQUIRED.
+	Volume               *Volume  `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CreateVolumeResponse) Reset()         { *m = CreateVolumeResponse{} }
+func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateVolumeResponse) ProtoMessage()    {}
+func (*CreateVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{9}
+}
+
+func (m *CreateVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CreateVolumeResponse.Unmarshal(m, b)
+}
+func (m *CreateVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CreateVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *CreateVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateVolumeResponse.Merge(m, src)
+}
+func (m *CreateVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_CreateVolumeResponse.Size(m)
+}
+func (m *CreateVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateVolumeResponse proto.InternalMessageInfo
+
+func (m *CreateVolumeResponse) GetVolume() *Volume {
+	if m != nil {
+		return m.Volume
+	}
+	return nil
+}
+
+// Specify a capability of a volume.
+type VolumeCapability struct {
+	// Specifies what API the volume will be accessed using. One of the
+	// following fields MUST be specified.
+	//
+	// Types that are valid to be assigned to AccessType:
+	//	*VolumeCapability_Block
+	//	*VolumeCapability_Mount
+	AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"`
+	// This is a REQUIRED field.
+	AccessMode           *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode,proto3" json:"access_mode,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
+}
+
+func (m *VolumeCapability) Reset()         { *m = VolumeCapability{} }
+func (m *VolumeCapability) String() string { return proto.CompactTextString(m) }
+func (*VolumeCapability) ProtoMessage()    {}
+func (*VolumeCapability) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{10}
+}
+
+func (m *VolumeCapability) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeCapability.Unmarshal(m, b)
+}
+func (m *VolumeCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeCapability.Marshal(b, m, deterministic)
+}
+func (m *VolumeCapability) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeCapability.Merge(m, src)
+}
+func (m *VolumeCapability) XXX_Size() int {
+	return xxx_messageInfo_VolumeCapability.Size(m)
+}
+func (m *VolumeCapability) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeCapability.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeCapability proto.InternalMessageInfo
+
+type isVolumeCapability_AccessType interface {
+	isVolumeCapability_AccessType()
+}
+
+type VolumeCapability_Block struct {
+	Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,proto3,oneof"`
+}
+
+type VolumeCapability_Mount struct {
+	Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,proto3,oneof"`
+}
+
+func (*VolumeCapability_Block) isVolumeCapability_AccessType() {}
+
+func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {}
+
+func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType {
+	if m != nil {
+		return m.AccessType
+	}
+	return nil
+}
+
+func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume {
+	if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok {
+		return x.Block
+	}
+	return nil
+}
+
+func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume {
+	if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok {
+		return x.Mount
+	}
+	return nil
+}
+
+func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode {
+	if m != nil {
+		return m.AccessMode
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*VolumeCapability) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*VolumeCapability_Block)(nil),
+		(*VolumeCapability_Mount)(nil),
+	}
+}
+
+// Indicate that the volume will be accessed via the block device API.
+type VolumeCapability_BlockVolume struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *VolumeCapability_BlockVolume) Reset()         { *m = VolumeCapability_BlockVolume{} }
+func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) }
+func (*VolumeCapability_BlockVolume) ProtoMessage()    {}
+func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{10, 0}
+}
+
+func (m *VolumeCapability_BlockVolume) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeCapability_BlockVolume.Unmarshal(m, b)
+}
+func (m *VolumeCapability_BlockVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeCapability_BlockVolume.Marshal(b, m, deterministic)
+}
+func (m *VolumeCapability_BlockVolume) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeCapability_BlockVolume.Merge(m, src)
+}
+func (m *VolumeCapability_BlockVolume) XXX_Size() int {
+	return xxx_messageInfo_VolumeCapability_BlockVolume.Size(m)
+}
+func (m *VolumeCapability_BlockVolume) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeCapability_BlockVolume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeCapability_BlockVolume proto.InternalMessageInfo
+
+// Indicate that the volume will be accessed via the filesystem API.
+type VolumeCapability_MountVolume struct {
+	// The filesystem type. This field is OPTIONAL.
+	// An empty string is equal to an unspecified field value.
+	FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType,proto3" json:"fs_type,omitempty"`
+	// The mount options that can be used for the volume. This field is
+	// OPTIONAL. `mount_flags` MAY contain sensitive information.
+	// Therefore, the CO and the Plugin MUST NOT leak this information
+	// to untrusted entities. The total size of this repeated field
+	// SHALL NOT exceed 4 KiB.
+	MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags,proto3" json:"mount_flags,omitempty"`
+	// If SP has VOLUME_MOUNT_GROUP node capability and CO provides
+	// this field then SP MUST ensure that the volume_mount_group
+	// parameter is passed as the group identifier to the underlying
+	// operating system mount system call, with the understanding
+	// that the set of available mount call parameters and/or
+	// mount implementations may vary across operating systems.
+	// Additionally, new file and/or directory entries written to
+	// the underlying filesystem SHOULD be permission-labeled in such a
+	// manner, unless otherwise modified by a workload, that they are
+	// both readable and writable by said mount group identifier.
+	// This is an OPTIONAL field.
+	VolumeMountGroup     string   `protobuf:"bytes,3,opt,name=volume_mount_group,json=volumeMountGroup,proto3" json:"volume_mount_group,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *VolumeCapability_MountVolume) Reset()         { *m = VolumeCapability_MountVolume{} }
+func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) }
+func (*VolumeCapability_MountVolume) ProtoMessage()    {}
+func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{10, 1}
+}
+
+func (m *VolumeCapability_MountVolume) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeCapability_MountVolume.Unmarshal(m, b)
+}
+func (m *VolumeCapability_MountVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeCapability_MountVolume.Marshal(b, m, deterministic)
+}
+func (m *VolumeCapability_MountVolume) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeCapability_MountVolume.Merge(m, src)
+}
+func (m *VolumeCapability_MountVolume) XXX_Size() int {
+	return xxx_messageInfo_VolumeCapability_MountVolume.Size(m)
+}
+func (m *VolumeCapability_MountVolume) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeCapability_MountVolume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeCapability_MountVolume proto.InternalMessageInfo
+
+func (m *VolumeCapability_MountVolume) GetFsType() string {
+	if m != nil {
+		return m.FsType
+	}
+	return ""
+}
+
+func (m *VolumeCapability_MountVolume) GetMountFlags() []string {
+	if m != nil {
+		return m.MountFlags
+	}
+	return nil
+}
+
+func (m *VolumeCapability_MountVolume) GetVolumeMountGroup() string {
+	if m != nil {
+		return m.VolumeMountGroup
+	}
+	return ""
+}
+
+// Specify how a volume can be accessed.
+type VolumeCapability_AccessMode struct {
+	// This field is REQUIRED.
+	Mode                 VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=csi.v1.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                         `json:"-"`
+	XXX_unrecognized     []byte                           `json:"-"`
+	XXX_sizecache        int32                            `json:"-"`
+}
+
+func (m *VolumeCapability_AccessMode) Reset()         { *m = VolumeCapability_AccessMode{} }
+func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) }
+func (*VolumeCapability_AccessMode) ProtoMessage()    {}
+func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{10, 2}
+}
+
+func (m *VolumeCapability_AccessMode) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeCapability_AccessMode.Unmarshal(m, b)
+}
+func (m *VolumeCapability_AccessMode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeCapability_AccessMode.Marshal(b, m, deterministic)
+}
+func (m *VolumeCapability_AccessMode) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeCapability_AccessMode.Merge(m, src)
+}
+func (m *VolumeCapability_AccessMode) XXX_Size() int {
+	return xxx_messageInfo_VolumeCapability_AccessMode.Size(m)
+}
+func (m *VolumeCapability_AccessMode) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeCapability_AccessMode.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeCapability_AccessMode proto.InternalMessageInfo
+
+func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode {
+	if m != nil {
+		return m.Mode
+	}
+	return VolumeCapability_AccessMode_UNKNOWN
+}
+
+// The capacity of the storage space in bytes. To specify an exact size,
+// `required_bytes` and `limit_bytes` SHALL be set to the same value. At
+// least one of the these fields MUST be specified.
+type CapacityRange struct {
+	// Volume MUST be at least this big. This field is OPTIONAL.
+	// A value of 0 is equal to an unspecified field value.
+	// The value of this field MUST NOT be negative.
+	RequiredBytes int64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes,proto3" json:"required_bytes,omitempty"`
+	// Volume MUST not be bigger than this. This field is OPTIONAL.
+	// A value of 0 is equal to an unspecified field value.
+	// The value of this field MUST NOT be negative.
+	LimitBytes           int64    `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes,proto3" json:"limit_bytes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CapacityRange) Reset()         { *m = CapacityRange{} }
+func (m *CapacityRange) String() string { return proto.CompactTextString(m) }
+func (*CapacityRange) ProtoMessage()    {}
+func (*CapacityRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{11}
+}
+
+func (m *CapacityRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CapacityRange.Unmarshal(m, b)
+}
+func (m *CapacityRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CapacityRange.Marshal(b, m, deterministic)
+}
+func (m *CapacityRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CapacityRange.Merge(m, src)
+}
+func (m *CapacityRange) XXX_Size() int {
+	return xxx_messageInfo_CapacityRange.Size(m)
+}
+func (m *CapacityRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_CapacityRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CapacityRange proto.InternalMessageInfo
+
+func (m *CapacityRange) GetRequiredBytes() int64 {
+	if m != nil {
+		return m.RequiredBytes
+	}
+	return 0
+}
+
+func (m *CapacityRange) GetLimitBytes() int64 {
+	if m != nil {
+		return m.LimitBytes
+	}
+	return 0
+}
+
+// Information about a specific volume.
+type Volume struct {
+	// The capacity of the volume in bytes. This field is OPTIONAL. If not
+	// set (value of 0), it indicates that the capacity of the volume is
+	// unknown (e.g., NFS share).
+	// The value of this field MUST NOT be negative.
+	CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"`
+	// The identifier for this volume, generated by the plugin.
+	// This field is REQUIRED.
+	// This field MUST contain enough information to uniquely identify
+	// this specific volume vs all other volumes supported by this plugin.
+	// This field SHALL be used by the CO in subsequent calls to refer to
+	// this volume.
+	// The SP is NOT responsible for global uniqueness of volume_id across
+	// multiple SPs.
+	VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// Opaque static properties of the volume. SP MAY use this field to
+	// ensure subsequent volume validation and publishing calls have
+	// contextual information.
+	// The contents of this field SHALL be opaque to a CO.
+	// The contents of this field SHALL NOT be mutable.
+	// The contents of this field SHALL be safe for the CO to cache.
+	// The contents of this field SHOULD NOT contain sensitive
+	// information.
+	// The contents of this field SHOULD NOT be used for uniquely
+	// identifying a volume. The `volume_id` alone SHOULD be sufficient to
+	// identify the volume.
+	// A volume uniquely identified by `volume_id` SHALL always report the
+	// same volume_context.
+	// This field is OPTIONAL and when present MUST be passed to volume
+	// validation and publishing calls.
+	VolumeContext map[string]string `protobuf:"bytes,3,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// If specified, indicates that the volume is not empty and is
+	// pre-populated with data from the specified source.
+	// This field is OPTIONAL.
+	ContentSource *VolumeContentSource `protobuf:"bytes,4,opt,name=content_source,json=contentSource,proto3" json:"content_source,omitempty"`
+	// Specifies where (regions, zones, racks, etc.) the provisioned
+	// volume is accessible from.
+	// A plugin that returns this field MUST also set the
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability.
+	// An SP MAY specify multiple topologies to indicate the volume is
+	// accessible from multiple locations.
+	// COs MAY use this information along with the topology information
+	// returned by NodeGetInfo to ensure that a given volume is accessible
+	// from a given node when scheduling workloads.
+	// This field is OPTIONAL. If it is not specified, the CO MAY assume
+	// the volume is equally accessible from all nodes in the cluster and
+	// MAY schedule workloads referencing the volume on any available
+	// node.
+	//
+	// Example 1:
+	//   accessible_topology = {"region": "R1", "zone": "Z2"}
+	// Indicates a volume accessible only from the "region" "R1" and the
+	// "zone" "Z2".
+	//
+	// Example 2:
+	//   accessible_topology =
+	//     {"region": "R1", "zone": "Z2"},
+	//     {"region": "R1", "zone": "Z3"}
+	// Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3"
+	// in the "region" "R1".
+	AccessibleTopology   []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *Volume) Reset()         { *m = Volume{} }
+func (m *Volume) String() string { return proto.CompactTextString(m) }
+func (*Volume) ProtoMessage()    {}
+func (*Volume) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{12}
+}
+
+func (m *Volume) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Volume.Unmarshal(m, b)
+}
+func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Volume.Marshal(b, m, deterministic)
+}
+func (m *Volume) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Volume.Merge(m, src)
+}
+func (m *Volume) XXX_Size() int {
+	return xxx_messageInfo_Volume.Size(m)
+}
+func (m *Volume) XXX_DiscardUnknown() {
+	xxx_messageInfo_Volume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Volume proto.InternalMessageInfo
+
+func (m *Volume) GetCapacityBytes() int64 {
+	if m != nil {
+		return m.CapacityBytes
+	}
+	return 0
+}
+
+func (m *Volume) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *Volume) GetVolumeContext() map[string]string {
+	if m != nil {
+		return m.VolumeContext
+	}
+	return nil
+}
+
+func (m *Volume) GetContentSource() *VolumeContentSource {
+	if m != nil {
+		return m.ContentSource
+	}
+	return nil
+}
+
+func (m *Volume) GetAccessibleTopology() []*Topology {
+	if m != nil {
+		return m.AccessibleTopology
+	}
+	return nil
+}
+
+type TopologyRequirement struct {
+	// Specifies the list of topologies the provisioned volume MUST be
+	// accessible from.
+	// This field is OPTIONAL. If TopologyRequirement is specified either
+	// requisite or preferred or both MUST be specified.
+	//
+	// If requisite is specified, the provisioned volume MUST be
+	// accessible from at least one of the requisite topologies.
+	//
+	// Given
+	//   x = number of topologies provisioned volume is accessible from
+	//   n = number of requisite topologies
+	// The CO MUST ensure n >= 1. The SP MUST ensure x >= 1
+	// If x==n, then the SP MUST make the provisioned volume available to
+	// all topologies from the list of requisite topologies. If it is
+	// unable to do so, the SP MUST fail the CreateVolume call.
+	// For example, if a volume should be accessible from a single zone,
+	// and requisite =
+	//   {"region": "R1", "zone": "Z2"}
+	// then the provisioned volume MUST be accessible from the "region"
+	// "R1" and the "zone" "Z2".
+	// Similarly, if a volume should be accessible from two zones, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"}
+	// then the provisioned volume MUST be accessible from the "region"
+	// "R1" and both "zone" "Z2" and "zone" "Z3".
+	//
+	// If x<n, then the SP SHALL choose x unique topologies from the list
+	// of requisite topologies. If it is unable to do so, the SP MUST fail
+	// the CreateVolume call.
+	// For example, if a volume should be accessible from a single zone,
+	// and requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"}
+	// then the SP may choose to make the provisioned volume available in
+	// either the "zone" "Z2" or the "zone" "Z3" in the "region" "R1".
+	// Similarly, if a volume should be accessible from two zones, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"},
+	//   {"region": "R1", "zone": "Z4"}
+	// then the provisioned volume MUST be accessible from any combination
+	// of two unique topologies: e.g. "R1/Z2" and "R1/Z3", or "R1/Z2" and
+	//  "R1/Z4", or "R1/Z3" and "R1/Z4".
+	//
+	// If x>n, then the SP MUST make the provisioned volume available from
+	// all topologies from the list of requisite topologies and MAY choose
+	// the remaining x-n unique topologies from the list of all possible
+	// topologies. If it is unable to do so, the SP MUST fail the
+	// CreateVolume call.
+	// For example, if a volume should be accessible from two zones, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"}
+	// then the provisioned volume MUST be accessible from the "region"
+	// "R1" and the "zone" "Z2" and the SP may select the second zone
+	// independently, e.g. "R1/Z4".
+	Requisite []*Topology `protobuf:"bytes,1,rep,name=requisite,proto3" json:"requisite,omitempty"`
+	// Specifies the list of topologies the CO would prefer the volume to
+	// be provisioned in.
+	//
+	// This field is OPTIONAL. If TopologyRequirement is specified either
+	// requisite or preferred or both MUST be specified.
+	//
+	// An SP MUST attempt to make the provisioned volume available using
+	// the preferred topologies in order from first to last.
+	//
+	// If requisite is specified, all topologies in preferred list MUST
+	// also be present in the list of requisite topologies.
+	//
+	// If the SP is unable to to make the provisioned volume available
+	// from any of the preferred topologies, the SP MAY choose a topology
+	// from the list of requisite topologies.
+	// If the list of requisite topologies is not specified, then the SP
+	// MAY choose from the list of all possible topologies.
+	// If the list of requisite topologies is specified and the SP is
+	// unable to to make the provisioned volume available from any of the
+	// requisite topologies it MUST fail the CreateVolume call.
+	//
+	// Example 1:
+	// Given a volume should be accessible from a single zone, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"}
+	// preferred =
+	//   {"region": "R1", "zone": "Z3"}
+	// then the the SP SHOULD first attempt to make the provisioned volume
+	// available from "zone" "Z3" in the "region" "R1" and fall back to
+	// "zone" "Z2" in the "region" "R1" if that is not possible.
+	//
+	// Example 2:
+	// Given a volume should be accessible from a single zone, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"},
+	//   {"region": "R1", "zone": "Z4"},
+	//   {"region": "R1", "zone": "Z5"}
+	// preferred =
+	//   {"region": "R1", "zone": "Z4"},
+	//   {"region": "R1", "zone": "Z2"}
+	// then the the SP SHOULD first attempt to make the provisioned volume
+	// accessible from "zone" "Z4" in the "region" "R1" and fall back to
+	// "zone" "Z2" in the "region" "R1" if that is not possible. If that
+	// is not possible, the SP may choose between either the "zone"
+	// "Z3" or "Z5" in the "region" "R1".
+	//
+	// Example 3:
+	// Given a volume should be accessible from TWO zones (because an
+	// opaque parameter in CreateVolumeRequest, for example, specifies
+	// the volume is accessible from two zones, aka synchronously
+	// replicated), and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"},
+	//   {"region": "R1", "zone": "Z4"},
+	//   {"region": "R1", "zone": "Z5"}
+	// preferred =
+	//   {"region": "R1", "zone": "Z5"},
+	//   {"region": "R1", "zone": "Z3"}
+	// then the the SP SHOULD first attempt to make the provisioned volume
+	// accessible from the combination of the two "zones" "Z5" and "Z3" in
+	// the "region" "R1". If that's not possible, it should fall back to
+	// a combination of "Z5" and other possibilities from the list of
+	// requisite. If that's not possible, it should fall back  to a
+	// combination of "Z3" and other possibilities from the list of
+	// requisite. If that's not possible, it should fall back  to a
+	// combination of other possibilities from the list of requisite.
+	Preferred            []*Topology `protobuf:"bytes,2,rep,name=preferred,proto3" json:"preferred,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *TopologyRequirement) Reset()         { *m = TopologyRequirement{} }
+func (m *TopologyRequirement) String() string { return proto.CompactTextString(m) }
+func (*TopologyRequirement) ProtoMessage()    {}
+func (*TopologyRequirement) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{13}
+}
+
+func (m *TopologyRequirement) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TopologyRequirement.Unmarshal(m, b)
+}
+func (m *TopologyRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TopologyRequirement.Marshal(b, m, deterministic)
+}
+func (m *TopologyRequirement) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TopologyRequirement.Merge(m, src)
+}
+func (m *TopologyRequirement) XXX_Size() int {
+	return xxx_messageInfo_TopologyRequirement.Size(m)
+}
+func (m *TopologyRequirement) XXX_DiscardUnknown() {
+	xxx_messageInfo_TopologyRequirement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TopologyRequirement proto.InternalMessageInfo
+
+func (m *TopologyRequirement) GetRequisite() []*Topology {
+	if m != nil {
+		return m.Requisite
+	}
+	return nil
+}
+
+func (m *TopologyRequirement) GetPreferred() []*Topology {
+	if m != nil {
+		return m.Preferred
+	}
+	return nil
+}
+
+// Topology is a map of topological domains to topological segments.
+// A topological domain is a sub-division of a cluster, like "region",
+// "zone", "rack", etc.
+// A topological segment is a specific instance of a topological domain,
+// like "zone3", "rack3", etc.
+// For example {"com.company/zone": "Z1", "com.company/rack": "R3"}
+// Valid keys have two segments: an OPTIONAL prefix and name, separated
+// by a slash (/), for example: "com.company.example/zone".
+// The key name segment is REQUIRED. The prefix is OPTIONAL.
+// The key name MUST be 63 characters or less, begin and end with an
+// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
+// underscores (_), dots (.), or alphanumerics in between, for example
+// "zone".
+// The key prefix MUST be 63 characters or less, begin and end with a
+// lower-case alphanumeric character ([a-z0-9]), contain only
+// dashes (-), dots (.), or lower-case alphanumerics in between, and
+// follow domain name notation format
+// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
+// The key prefix SHOULD include the plugin's host company name and/or
+// the plugin name, to minimize the possibility of collisions with keys
+// from other plugins.
+// If a key prefix is specified, it MUST be identical across all
+// topology keys returned by the SP (across all RPCs).
+// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone"
+// MUST not both exist.
+// Each value (topological segment) MUST contain 1 or more strings.
+// Each string MUST be 63 characters or less and begin and end with an
+// alphanumeric character with '-', '_', '.', or alphanumerics in
+// between.
+type Topology struct {
+	Segments             map[string]string `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *Topology) Reset()         { *m = Topology{} }
+func (m *Topology) String() string { return proto.CompactTextString(m) }
+func (*Topology) ProtoMessage()    {}
+func (*Topology) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{14}
+}
+
+func (m *Topology) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Topology.Unmarshal(m, b)
+}
+func (m *Topology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Topology.Marshal(b, m, deterministic)
+}
+func (m *Topology) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Topology.Merge(m, src)
+}
+func (m *Topology) XXX_Size() int {
+	return xxx_messageInfo_Topology.Size(m)
+}
+func (m *Topology) XXX_DiscardUnknown() {
+	xxx_messageInfo_Topology.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Topology proto.InternalMessageInfo
+
+func (m *Topology) GetSegments() map[string]string {
+	if m != nil {
+		return m.Segments
+	}
+	return nil
+}
+
+type DeleteVolumeRequest struct {
+	// The ID of the volume to be deprovisioned.
+	// This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// Secrets required by plugin to complete volume deletion request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets              map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *DeleteVolumeRequest) Reset()         { *m = DeleteVolumeRequest{} }
+func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteVolumeRequest) ProtoMessage()    {}
+func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{15}
+}
+
+func (m *DeleteVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeleteVolumeRequest.Unmarshal(m, b)
+}
+func (m *DeleteVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeleteVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *DeleteVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteVolumeRequest.Merge(m, src)
+}
+func (m *DeleteVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_DeleteVolumeRequest.Size(m)
+}
+func (m *DeleteVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteVolumeRequest proto.InternalMessageInfo
+
+func (m *DeleteVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *DeleteVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+type DeleteVolumeResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeleteVolumeResponse) Reset()         { *m = DeleteVolumeResponse{} }
+func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteVolumeResponse) ProtoMessage()    {}
+func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{16}
+}
+
+func (m *DeleteVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeleteVolumeResponse.Unmarshal(m, b)
+}
+func (m *DeleteVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeleteVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *DeleteVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteVolumeResponse.Merge(m, src)
+}
+func (m *DeleteVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_DeleteVolumeResponse.Size(m)
+}
+func (m *DeleteVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteVolumeResponse proto.InternalMessageInfo
+
+type ControllerPublishVolumeRequest struct {
+	// The ID of the volume to be used on a node.
+	// This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The ID of the node. This field is REQUIRED. The CO SHALL set this
+	// field to match the node ID returned by `NodeGetInfo`.
+	NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+	// Volume capability describing how the CO intends to use this volume.
+	// SP MUST ensure the CO can use the published volume as described.
+	// Otherwise SP MUST return the appropriate gRPC error code.
+	// This is a REQUIRED field.
+	VolumeCapability *VolumeCapability `protobuf:"bytes,3,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"`
+	// Indicates SP MUST publish the volume in readonly mode.
+	// CO MUST set this field to false if SP does not have the
+	// PUBLISH_READONLY controller capability.
+	// This is a REQUIRED field.
+	Readonly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"`
+	// Secrets required by plugin to complete controller publish volume
+	// request. This field is OPTIONAL. Refer to the
+	// `Secrets Requirements` section on how to use this field.
+	Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Volume context as returned by SP in
+	// CreateVolumeResponse.Volume.volume_context.
+	// This field is OPTIONAL and MUST match the volume_context of the
+	// volume identified by `volume_id`.
+	VolumeContext        map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ControllerPublishVolumeRequest) Reset()         { *m = ControllerPublishVolumeRequest{} }
+func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*ControllerPublishVolumeRequest) ProtoMessage()    {}
+func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{17}
+}
+
+func (m *ControllerPublishVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerPublishVolumeRequest.Unmarshal(m, b)
+}
+func (m *ControllerPublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerPublishVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *ControllerPublishVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerPublishVolumeRequest.Merge(m, src)
+}
+func (m *ControllerPublishVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_ControllerPublishVolumeRequest.Size(m)
+}
+func (m *ControllerPublishVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerPublishVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerPublishVolumeRequest proto.InternalMessageInfo
+
+func (m *ControllerPublishVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *ControllerPublishVolumeRequest) GetNodeId() string {
+	if m != nil {
+		return m.NodeId
+	}
+	return ""
+}
+
+func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability {
+	if m != nil {
+		return m.VolumeCapability
+	}
+	return nil
+}
+
+func (m *ControllerPublishVolumeRequest) GetReadonly() bool {
+	if m != nil {
+		return m.Readonly
+	}
+	return false
+}
+
+func (m *ControllerPublishVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+func (m *ControllerPublishVolumeRequest) GetVolumeContext() map[string]string {
+	if m != nil {
+		return m.VolumeContext
+	}
+	return nil
+}
+
+type ControllerPublishVolumeResponse struct {
+	// Opaque static publish properties of the volume. SP MAY use this
+	// field to ensure subsequent `NodeStageVolume` or `NodePublishVolume`
+	// calls calls have contextual information.
+	// The contents of this field SHALL be opaque to a CO.
+	// The contents of this field SHALL NOT be mutable.
+	// The contents of this field SHALL be safe for the CO to cache.
+	// The contents of this field SHOULD NOT contain sensitive
+	// information.
+	// The contents of this field SHOULD NOT be used for uniquely
+	// identifying a volume. The `volume_id` alone SHOULD be sufficient to
+	// identify the volume.
+	// This field is OPTIONAL and when present MUST be passed to
+	// subsequent `NodeStageVolume` or `NodePublishVolume` calls
+	PublishContext       map[string]string `protobuf:"bytes,1,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ControllerPublishVolumeResponse) Reset()         { *m = ControllerPublishVolumeResponse{} }
+func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*ControllerPublishVolumeResponse) ProtoMessage()    {}
+func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{18}
+}
+
+func (m *ControllerPublishVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerPublishVolumeResponse.Unmarshal(m, b)
+}
+func (m *ControllerPublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerPublishVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *ControllerPublishVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerPublishVolumeResponse.Merge(m, src)
+}
+func (m *ControllerPublishVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_ControllerPublishVolumeResponse.Size(m)
+}
+func (m *ControllerPublishVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerPublishVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerPublishVolumeResponse proto.InternalMessageInfo
+
+func (m *ControllerPublishVolumeResponse) GetPublishContext() map[string]string {
+	if m != nil {
+		return m.PublishContext
+	}
+	return nil
+}
+
+type ControllerUnpublishVolumeRequest struct {
+	// The ID of the volume. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The ID of the node. This field is OPTIONAL. The CO SHOULD set this
+	// field to match the node ID returned by `NodeGetInfo` or leave it
+	// unset. If the value is set, the SP MUST unpublish the volume from
+	// the specified node. If the value is unset, the SP MUST unpublish
+	// the volume from all nodes it is published to.
+	NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+	// Secrets required by plugin to complete controller unpublish volume
+	// request. This SHOULD be the same secrets passed to the
+	// ControllerPublishVolume call for the specified volume.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets              map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ControllerUnpublishVolumeRequest) Reset()         { *m = ControllerUnpublishVolumeRequest{} }
+func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*ControllerUnpublishVolumeRequest) ProtoMessage()    {}
+func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{19}
+}
+
+func (m *ControllerUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerUnpublishVolumeRequest.Unmarshal(m, b)
+}
+func (m *ControllerUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerUnpublishVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *ControllerUnpublishVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerUnpublishVolumeRequest.Merge(m, src)
+}
+func (m *ControllerUnpublishVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_ControllerUnpublishVolumeRequest.Size(m)
+}
+func (m *ControllerUnpublishVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerUnpublishVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerUnpublishVolumeRequest proto.InternalMessageInfo
+
+func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *ControllerUnpublishVolumeRequest) GetNodeId() string {
+	if m != nil {
+		return m.NodeId
+	}
+	return ""
+}
+
+func (m *ControllerUnpublishVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+type ControllerUnpublishVolumeResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ControllerUnpublishVolumeResponse) Reset()         { *m = ControllerUnpublishVolumeResponse{} }
+func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*ControllerUnpublishVolumeResponse) ProtoMessage()    {}
+func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{20}
+}
+
+func (m *ControllerUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerUnpublishVolumeResponse.Unmarshal(m, b)
+}
+func (m *ControllerUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerUnpublishVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *ControllerUnpublishVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerUnpublishVolumeResponse.Merge(m, src)
+}
+func (m *ControllerUnpublishVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_ControllerUnpublishVolumeResponse.Size(m)
+}
+func (m *ControllerUnpublishVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerUnpublishVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerUnpublishVolumeResponse proto.InternalMessageInfo
+
+type ValidateVolumeCapabilitiesRequest struct {
+	// The ID of the volume to check. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// Volume context as returned by SP in
+	// CreateVolumeResponse.Volume.volume_context.
+	// This field is OPTIONAL and MUST match the volume_context of the
+	// volume identified by `volume_id`.
+	VolumeContext map[string]string `protobuf:"bytes,2,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// The capabilities that the CO wants to check for the volume. This
+	// call SHALL return "confirmed" only if all the volume capabilities
+	// specified below are supported. This field is REQUIRED.
+	VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"`
+	// See CreateVolumeRequest.parameters.
+	// This field is OPTIONAL.
+	Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Secrets required by plugin to complete volume validation request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets              map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ValidateVolumeCapabilitiesRequest) Reset()         { *m = ValidateVolumeCapabilitiesRequest{} }
+func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) }
+func (*ValidateVolumeCapabilitiesRequest) ProtoMessage()    {}
+func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{21}
+}
+
+func (m *ValidateVolumeCapabilitiesRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Unmarshal(m, b)
+}
+func (m *ValidateVolumeCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Marshal(b, m, deterministic)
+}
+func (m *ValidateVolumeCapabilitiesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Merge(m, src)
+}
+func (m *ValidateVolumeCapabilitiesRequest) XXX_Size() int {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Size(m)
+}
+func (m *ValidateVolumeCapabilitiesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ValidateVolumeCapabilitiesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidateVolumeCapabilitiesRequest proto.InternalMessageInfo
+
+func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *ValidateVolumeCapabilitiesRequest) GetVolumeContext() map[string]string {
+	if m != nil {
+		return m.VolumeContext
+	}
+	return nil
+}
+
+func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability {
+	if m != nil {
+		return m.VolumeCapabilities
+	}
+	return nil
+}
+
+func (m *ValidateVolumeCapabilitiesRequest) GetParameters() map[string]string {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+func (m *ValidateVolumeCapabilitiesRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+type ValidateVolumeCapabilitiesResponse struct {
+	// Confirmed indicates to the CO the set of capabilities that the
+	// plugin has validated. This field SHALL only be set to a non-empty
+	// value for successful validation responses.
+	// For successful validation responses, the CO SHALL compare the
+	// fields of this message to the originally requested capabilities in
+	// order to guard against an older plugin reporting "valid" for newer
+	// capability fields that it does not yet understand.
+	// This field is OPTIONAL.
+	Confirmed *ValidateVolumeCapabilitiesResponse_Confirmed `protobuf:"bytes,1,opt,name=confirmed,proto3" json:"confirmed,omitempty"`
+	// Message to the CO if `confirmed` above is empty. This field is
+	// OPTIONAL.
+	// An empty string is equal to an unspecified field value.
+	Message              string   `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ValidateVolumeCapabilitiesResponse) Reset()         { *m = ValidateVolumeCapabilitiesResponse{} }
+func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) }
+func (*ValidateVolumeCapabilitiesResponse) ProtoMessage()    {}
+func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{22}
+}
+
+func (m *ValidateVolumeCapabilitiesResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Unmarshal(m, b)
+}
+func (m *ValidateVolumeCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Marshal(b, m, deterministic)
+}
+func (m *ValidateVolumeCapabilitiesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Merge(m, src)
+}
+func (m *ValidateVolumeCapabilitiesResponse) XXX_Size() int {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Size(m)
+}
+func (m *ValidateVolumeCapabilitiesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ValidateVolumeCapabilitiesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidateVolumeCapabilitiesResponse proto.InternalMessageInfo
+
+func (m *ValidateVolumeCapabilitiesResponse) GetConfirmed() *ValidateVolumeCapabilitiesResponse_Confirmed {
+	if m != nil {
+		return m.Confirmed
+	}
+	return nil
+}
+
+func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string {
+	if m != nil {
+		return m.Message
+	}
+	return ""
+}
+
+type ValidateVolumeCapabilitiesResponse_Confirmed struct {
+	// Volume context validated by the plugin.
+	// This field is OPTIONAL.
+	VolumeContext map[string]string `protobuf:"bytes,1,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Volume capabilities supported by the plugin.
+	// This field is REQUIRED.
+	VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"`
+	// The volume creation parameters validated by the plugin.
+	// This field is OPTIONAL.
+	Parameters           map[string]string `protobuf:"bytes,3,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) Reset() {
+	*m = ValidateVolumeCapabilitiesResponse_Confirmed{}
+}
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) String() string {
+	return proto.CompactTextString(m)
+}
+func (*ValidateVolumeCapabilitiesResponse_Confirmed) ProtoMessage() {}
+func (*ValidateVolumeCapabilitiesResponse_Confirmed) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{22, 0}
+}
+
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Unmarshal(m, b)
+}
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Marshal(b, m, deterministic)
+}
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Merge(m, src)
+}
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Size() int {
+	return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Size(m)
+}
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_DiscardUnknown() {
+	xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed proto.InternalMessageInfo
+
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeContext() map[string]string {
+	if m != nil {
+		return m.VolumeContext
+	}
+	return nil
+}
+
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeCapabilities() []*VolumeCapability {
+	if m != nil {
+		return m.VolumeCapabilities
+	}
+	return nil
+}
+
+func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetParameters() map[string]string {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+type ListVolumesRequest struct {
+	// If specified (non-zero value), the Plugin MUST NOT return more
+	// entries than this number in the response. If the actual number of
+	// entries is more than this number, the Plugin MUST set `next_token`
+	// in the response which can be used to get the next page of entries
+	// in the subsequent `ListVolumes` call. This field is OPTIONAL. If
+	// not specified (zero value), it means there is no restriction on the
+	// number of entries that can be returned.
+	// The value of this field MUST NOT be negative.
+	MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"`
+	// A token to specify where to start paginating. Set this field to
+	// `next_token` returned by a previous `ListVolumes` call to get the
+	// next page of entries. This field is OPTIONAL.
+	// An empty string is equal to an unspecified field value.
+	StartingToken        string   `protobuf:"bytes,2,opt,name=starting_token,json=startingToken,proto3" json:"starting_token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ListVolumesRequest) Reset()         { *m = ListVolumesRequest{} }
+func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListVolumesRequest) ProtoMessage()    {}
+func (*ListVolumesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{23}
+}
+
+func (m *ListVolumesRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListVolumesRequest.Unmarshal(m, b)
+}
+func (m *ListVolumesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListVolumesRequest.Marshal(b, m, deterministic)
+}
+func (m *ListVolumesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListVolumesRequest.Merge(m, src)
+}
+func (m *ListVolumesRequest) XXX_Size() int {
+	return xxx_messageInfo_ListVolumesRequest.Size(m)
+}
+func (m *ListVolumesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListVolumesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListVolumesRequest proto.InternalMessageInfo
+
+func (m *ListVolumesRequest) GetMaxEntries() int32 {
+	if m != nil {
+		return m.MaxEntries
+	}
+	return 0
+}
+
+func (m *ListVolumesRequest) GetStartingToken() string {
+	if m != nil {
+		return m.StartingToken
+	}
+	return ""
+}
+
+type ListVolumesResponse struct {
+	Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+	// This token allows you to get the next page of entries for
+	// `ListVolumes` request. If the number of entries is larger than
+	// `max_entries`, use the `next_token` as a value for the
+	// `starting_token` field in the next `ListVolumes` request. This
+	// field is OPTIONAL.
+	// An empty string is equal to an unspecified field value.
+	NextToken            string   `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ListVolumesResponse) Reset()         { *m = ListVolumesResponse{} }
+func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) }
+func (*ListVolumesResponse) ProtoMessage()    {}
+func (*ListVolumesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{24}
+}
+
+func (m *ListVolumesResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListVolumesResponse.Unmarshal(m, b)
+}
+func (m *ListVolumesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListVolumesResponse.Marshal(b, m, deterministic)
+}
+func (m *ListVolumesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListVolumesResponse.Merge(m, src)
+}
+func (m *ListVolumesResponse) XXX_Size() int {
+	return xxx_messageInfo_ListVolumesResponse.Size(m)
+}
+func (m *ListVolumesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListVolumesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListVolumesResponse proto.InternalMessageInfo
+
+func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry {
+	if m != nil {
+		return m.Entries
+	}
+	return nil
+}
+
+func (m *ListVolumesResponse) GetNextToken() string {
+	if m != nil {
+		return m.NextToken
+	}
+	return ""
+}
+
+type ListVolumesResponse_VolumeStatus struct {
+	// A list of all `node_id` of nodes that the volume in this entry
+	// is controller published on.
+	// This field is OPTIONAL. If it is not specified and the SP has
+	// the LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO
+	// MAY assume the volume is not controller published to any nodes.
+	// If the field is not specified and the SP does not have the
+	// LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO MUST
+	// not interpret this field.
+	// published_node_ids MAY include nodes not published to or
+	// reported by the SP. The CO MUST be resilient to that.
+	PublishedNodeIds []string `protobuf:"bytes,1,rep,name=published_node_ids,json=publishedNodeIds,proto3" json:"published_node_ids,omitempty"`
+	// Information about the current condition of the volume.
+	// This field is OPTIONAL.
+	// This field MUST be specified if the
+	// VOLUME_CONDITION controller capability is supported.
+	VolumeCondition      *VolumeCondition `protobuf:"bytes,2,opt,name=volume_condition,json=volumeCondition,proto3" json:"volume_condition,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *ListVolumesResponse_VolumeStatus) Reset()         { *m = ListVolumesResponse_VolumeStatus{} }
+func (m *ListVolumesResponse_VolumeStatus) String() string { return proto.CompactTextString(m) }
+func (*ListVolumesResponse_VolumeStatus) ProtoMessage()    {}
+func (*ListVolumesResponse_VolumeStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{24, 0}
+}
+
+func (m *ListVolumesResponse_VolumeStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Unmarshal(m, b)
+}
+func (m *ListVolumesResponse_VolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Marshal(b, m, deterministic)
+}
+func (m *ListVolumesResponse_VolumeStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListVolumesResponse_VolumeStatus.Merge(m, src)
+}
+func (m *ListVolumesResponse_VolumeStatus) XXX_Size() int {
+	return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Size(m)
+}
+func (m *ListVolumesResponse_VolumeStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListVolumesResponse_VolumeStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListVolumesResponse_VolumeStatus proto.InternalMessageInfo
+
+func (m *ListVolumesResponse_VolumeStatus) GetPublishedNodeIds() []string {
+	if m != nil {
+		return m.PublishedNodeIds
+	}
+	return nil
+}
+
+func (m *ListVolumesResponse_VolumeStatus) GetVolumeCondition() *VolumeCondition {
+	if m != nil {
+		return m.VolumeCondition
+	}
+	return nil
+}
+
+type ListVolumesResponse_Entry struct {
+	// This field is REQUIRED
+	Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"`
+	// This field is OPTIONAL. This field MUST be specified if the
+	// LIST_VOLUMES_PUBLISHED_NODES controller capability is
+	// supported.
+	Status               *ListVolumesResponse_VolumeStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                          `json:"-"`
+	XXX_unrecognized     []byte                            `json:"-"`
+	XXX_sizecache        int32                             `json:"-"`
+}
+
+func (m *ListVolumesResponse_Entry) Reset()         { *m = ListVolumesResponse_Entry{} }
+func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) }
+func (*ListVolumesResponse_Entry) ProtoMessage()    {}
+func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{24, 1}
+}
+
+func (m *ListVolumesResponse_Entry) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListVolumesResponse_Entry.Unmarshal(m, b)
+}
+func (m *ListVolumesResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListVolumesResponse_Entry.Marshal(b, m, deterministic)
+}
+func (m *ListVolumesResponse_Entry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListVolumesResponse_Entry.Merge(m, src)
+}
+func (m *ListVolumesResponse_Entry) XXX_Size() int {
+	return xxx_messageInfo_ListVolumesResponse_Entry.Size(m)
+}
+func (m *ListVolumesResponse_Entry) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListVolumesResponse_Entry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListVolumesResponse_Entry proto.InternalMessageInfo
+
+func (m *ListVolumesResponse_Entry) GetVolume() *Volume {
+	if m != nil {
+		return m.Volume
+	}
+	return nil
+}
+
+func (m *ListVolumesResponse_Entry) GetStatus() *ListVolumesResponse_VolumeStatus {
+	if m != nil {
+		return m.Status
+	}
+	return nil
+}
+
+type ControllerGetVolumeRequest struct {
+	// The ID of the volume to fetch current volume information for.
+	// This field is REQUIRED.
+	VolumeId             string   `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ControllerGetVolumeRequest) Reset()         { *m = ControllerGetVolumeRequest{} }
+func (m *ControllerGetVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*ControllerGetVolumeRequest) ProtoMessage()    {}
+func (*ControllerGetVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{25}
+}
+
+func (m *ControllerGetVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerGetVolumeRequest.Unmarshal(m, b)
+}
+func (m *ControllerGetVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerGetVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *ControllerGetVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerGetVolumeRequest.Merge(m, src)
+}
+func (m *ControllerGetVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_ControllerGetVolumeRequest.Size(m)
+}
+func (m *ControllerGetVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerGetVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerGetVolumeRequest proto.InternalMessageInfo
+
+func (m *ControllerGetVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+type ControllerGetVolumeResponse struct {
+	// This field is REQUIRED
+	Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"`
+	// This field is REQUIRED.
+	Status               *ControllerGetVolumeResponse_VolumeStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                                  `json:"-"`
+	XXX_unrecognized     []byte                                    `json:"-"`
+	XXX_sizecache        int32                                     `json:"-"`
+}
+
+func (m *ControllerGetVolumeResponse) Reset()         { *m = ControllerGetVolumeResponse{} }
+func (m *ControllerGetVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*ControllerGetVolumeResponse) ProtoMessage()    {}
+func (*ControllerGetVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{26}
+}
+
+func (m *ControllerGetVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerGetVolumeResponse.Unmarshal(m, b)
+}
+func (m *ControllerGetVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerGetVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *ControllerGetVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerGetVolumeResponse.Merge(m, src)
+}
+func (m *ControllerGetVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_ControllerGetVolumeResponse.Size(m)
+}
+func (m *ControllerGetVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerGetVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerGetVolumeResponse proto.InternalMessageInfo
+
+func (m *ControllerGetVolumeResponse) GetVolume() *Volume {
+	if m != nil {
+		return m.Volume
+	}
+	return nil
+}
+
+func (m *ControllerGetVolumeResponse) GetStatus() *ControllerGetVolumeResponse_VolumeStatus {
+	if m != nil {
+		return m.Status
+	}
+	return nil
+}
+
+type ControllerGetVolumeResponse_VolumeStatus struct {
+	// A list of all the `node_id` of nodes that this volume is
+	// controller published on.
+	// This field is OPTIONAL.
+	// This field MUST be specified if the PUBLISH_UNPUBLISH_VOLUME
+	// controller capability is supported.
+	// published_node_ids MAY include nodes not published to or
+	// reported by the SP. The CO MUST be resilient to that.
+	PublishedNodeIds []string `protobuf:"bytes,1,rep,name=published_node_ids,json=publishedNodeIds,proto3" json:"published_node_ids,omitempty"`
+	// Information about the current condition of the volume.
+	// This field is OPTIONAL.
+	// This field MUST be specified if the
+	// VOLUME_CONDITION controller capability is supported.
+	VolumeCondition      *VolumeCondition `protobuf:"bytes,2,opt,name=volume_condition,json=volumeCondition,proto3" json:"volume_condition,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *ControllerGetVolumeResponse_VolumeStatus) Reset() {
+	*m = ControllerGetVolumeResponse_VolumeStatus{}
+}
+func (m *ControllerGetVolumeResponse_VolumeStatus) String() string { return proto.CompactTextString(m) }
+func (*ControllerGetVolumeResponse_VolumeStatus) ProtoMessage()    {}
+func (*ControllerGetVolumeResponse_VolumeStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{26, 0}
+}
+
+func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.Unmarshal(m, b)
+}
+func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.Marshal(b, m, deterministic)
+}
+func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.Merge(m, src)
+}
+func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_Size() int {
+	return xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.Size(m)
+}
+func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus proto.InternalMessageInfo
+
+func (m *ControllerGetVolumeResponse_VolumeStatus) GetPublishedNodeIds() []string {
+	if m != nil {
+		return m.PublishedNodeIds
+	}
+	return nil
+}
+
+func (m *ControllerGetVolumeResponse_VolumeStatus) GetVolumeCondition() *VolumeCondition {
+	if m != nil {
+		return m.VolumeCondition
+	}
+	return nil
+}
+
+type GetCapacityRequest struct {
+	// If specified, the Plugin SHALL report the capacity of the storage
+	// that can be used to provision volumes that satisfy ALL of the
+	// specified `volume_capabilities`. These are the same
+	// `volume_capabilities` the CO will use in `CreateVolumeRequest`.
+	// This field is OPTIONAL.
+	VolumeCapabilities []*VolumeCapability `protobuf:"bytes,1,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"`
+	// If specified, the Plugin SHALL report the capacity of the storage
+	// that can be used to provision volumes with the given Plugin
+	// specific `parameters`. These are the same `parameters` the CO will
+	// use in `CreateVolumeRequest`. This field is OPTIONAL.
+	Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// If specified, the Plugin SHALL report the capacity of the storage
+	// that can be used to provision volumes that in the specified
+	// `accessible_topology`. This is the same as the
+	// `accessible_topology` the CO returns in a `CreateVolumeResponse`.
+	// This field is OPTIONAL. This field SHALL NOT be set unless the
+	// plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability.
+	AccessibleTopology   *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *GetCapacityRequest) Reset()         { *m = GetCapacityRequest{} }
+func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) }
+func (*GetCapacityRequest) ProtoMessage()    {}
+func (*GetCapacityRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{27}
+}
+
+func (m *GetCapacityRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetCapacityRequest.Unmarshal(m, b)
+}
+func (m *GetCapacityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetCapacityRequest.Marshal(b, m, deterministic)
+}
+func (m *GetCapacityRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetCapacityRequest.Merge(m, src)
+}
+func (m *GetCapacityRequest) XXX_Size() int {
+	return xxx_messageInfo_GetCapacityRequest.Size(m)
+}
+func (m *GetCapacityRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetCapacityRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetCapacityRequest proto.InternalMessageInfo
+
+func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability {
+	if m != nil {
+		return m.VolumeCapabilities
+	}
+	return nil
+}
+
+func (m *GetCapacityRequest) GetParameters() map[string]string {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+func (m *GetCapacityRequest) GetAccessibleTopology() *Topology {
+	if m != nil {
+		return m.AccessibleTopology
+	}
+	return nil
+}
+
+type GetCapacityResponse struct {
+	// The available capacity, in bytes, of the storage that can be used
+	// to provision volumes. If `volume_capabilities` or `parameters` is
+	// specified in the request, the Plugin SHALL take those into
+	// consideration when calculating the available capacity of the
+	// storage. This field is REQUIRED.
+	// The value of this field MUST NOT be negative.
+	AvailableCapacity int64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity,proto3" json:"available_capacity,omitempty"`
+	// The largest size that may be used in a
+	// CreateVolumeRequest.capacity_range.required_bytes field
+	// to create a volume with the same parameters as those in
+	// GetCapacityRequest.
+	//
+	// If `volume_capabilities` or `parameters` is
+	// specified in the request, the Plugin SHALL take those into
+	// consideration when calculating the minimum volume size of the
+	// storage.
+	//
+	// This field is OPTIONAL. MUST NOT be negative.
+	// The Plugin SHOULD provide a value for this field if it has
+	// a maximum size for individual volumes and leave it unset
+	// otherwise. COs MAY use it to make decision about
+	// where to create volumes.
+	MaximumVolumeSize *wrappers.Int64Value `protobuf:"bytes,2,opt,name=maximum_volume_size,json=maximumVolumeSize,proto3" json:"maximum_volume_size,omitempty"`
+	// The smallest size that may be used in a
+	// CreateVolumeRequest.capacity_range.limit_bytes field
+	// to create a volume with the same parameters as those in
+	// GetCapacityRequest.
+	//
+	// If `volume_capabilities` or `parameters` is
+	// specified in the request, the Plugin SHALL take those into
+	// consideration when calculating the maximum volume size of the
+	// storage.
+	//
+	// This field is OPTIONAL. MUST NOT be negative.
+	// The Plugin SHOULD provide a value for this field if it has
+	// a minimum size for individual volumes and leave it unset
+	// otherwise. COs MAY use it to make decision about
+	// where to create volumes.
+	MinimumVolumeSize    *wrappers.Int64Value `protobuf:"bytes,3,opt,name=minimum_volume_size,json=minimumVolumeSize,proto3" json:"minimum_volume_size,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *GetCapacityResponse) Reset()         { *m = GetCapacityResponse{} }
+func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) }
+func (*GetCapacityResponse) ProtoMessage()    {}
+func (*GetCapacityResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{28}
+}
+
+func (m *GetCapacityResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetCapacityResponse.Unmarshal(m, b)
+}
+func (m *GetCapacityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetCapacityResponse.Marshal(b, m, deterministic)
+}
+func (m *GetCapacityResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetCapacityResponse.Merge(m, src)
+}
+func (m *GetCapacityResponse) XXX_Size() int {
+	return xxx_messageInfo_GetCapacityResponse.Size(m)
+}
+func (m *GetCapacityResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetCapacityResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetCapacityResponse proto.InternalMessageInfo
+
+func (m *GetCapacityResponse) GetAvailableCapacity() int64 {
+	if m != nil {
+		return m.AvailableCapacity
+	}
+	return 0
+}
+
+func (m *GetCapacityResponse) GetMaximumVolumeSize() *wrappers.Int64Value {
+	if m != nil {
+		return m.MaximumVolumeSize
+	}
+	return nil
+}
+
+func (m *GetCapacityResponse) GetMinimumVolumeSize() *wrappers.Int64Value {
+	if m != nil {
+		return m.MinimumVolumeSize
+	}
+	return nil
+}
+
+type ControllerGetCapabilitiesRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ControllerGetCapabilitiesRequest) Reset()         { *m = ControllerGetCapabilitiesRequest{} }
+func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) }
+func (*ControllerGetCapabilitiesRequest) ProtoMessage()    {}
+func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{29}
+}
+
+func (m *ControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerGetCapabilitiesRequest.Unmarshal(m, b)
+}
+func (m *ControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerGetCapabilitiesRequest.Marshal(b, m, deterministic)
+}
+func (m *ControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerGetCapabilitiesRequest.Merge(m, src)
+}
+func (m *ControllerGetCapabilitiesRequest) XXX_Size() int {
+	return xxx_messageInfo_ControllerGetCapabilitiesRequest.Size(m)
+}
+func (m *ControllerGetCapabilitiesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerGetCapabilitiesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerGetCapabilitiesRequest proto.InternalMessageInfo
+
+type ControllerGetCapabilitiesResponse struct {
+	// All the capabilities that the controller service supports. This
+	// field is OPTIONAL.
+	Capabilities         []*ControllerServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
+	XXX_unrecognized     []byte                         `json:"-"`
+	XXX_sizecache        int32                          `json:"-"`
+}
+
+func (m *ControllerGetCapabilitiesResponse) Reset()         { *m = ControllerGetCapabilitiesResponse{} }
+func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) }
+func (*ControllerGetCapabilitiesResponse) ProtoMessage()    {}
+func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{30}
+}
+
+func (m *ControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerGetCapabilitiesResponse.Unmarshal(m, b)
+}
+func (m *ControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerGetCapabilitiesResponse.Marshal(b, m, deterministic)
+}
+func (m *ControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerGetCapabilitiesResponse.Merge(m, src)
+}
+func (m *ControllerGetCapabilitiesResponse) XXX_Size() int {
+	return xxx_messageInfo_ControllerGetCapabilitiesResponse.Size(m)
+}
+func (m *ControllerGetCapabilitiesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerGetCapabilitiesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerGetCapabilitiesResponse proto.InternalMessageInfo
+
+func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability {
+	if m != nil {
+		return m.Capabilities
+	}
+	return nil
+}
+
+// Specifies a capability of the controller service.
+type ControllerServiceCapability struct {
+	// Types that are valid to be assigned to Type:
+	//	*ControllerServiceCapability_Rpc
+	Type                 isControllerServiceCapability_Type `protobuf_oneof:"type"`
+	XXX_NoUnkeyedLiteral struct{}                           `json:"-"`
+	XXX_unrecognized     []byte                             `json:"-"`
+	XXX_sizecache        int32                              `json:"-"`
+}
+
+func (m *ControllerServiceCapability) Reset()         { *m = ControllerServiceCapability{} }
+func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) }
+func (*ControllerServiceCapability) ProtoMessage()    {}
+func (*ControllerServiceCapability) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{31}
+}
+
+func (m *ControllerServiceCapability) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerServiceCapability.Unmarshal(m, b)
+}
+func (m *ControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerServiceCapability.Marshal(b, m, deterministic)
+}
+func (m *ControllerServiceCapability) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerServiceCapability.Merge(m, src)
+}
+func (m *ControllerServiceCapability) XXX_Size() int {
+	return xxx_messageInfo_ControllerServiceCapability.Size(m)
+}
+func (m *ControllerServiceCapability) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerServiceCapability.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerServiceCapability proto.InternalMessageInfo
+
+type isControllerServiceCapability_Type interface {
+	isControllerServiceCapability_Type()
+}
+
+type ControllerServiceCapability_Rpc struct {
+	Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"`
+}
+
+func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {}
+
+func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type {
+	if m != nil {
+		return m.Type
+	}
+	return nil
+}
+
+func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC {
+	if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok {
+		return x.Rpc
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*ControllerServiceCapability) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*ControllerServiceCapability_Rpc)(nil),
+	}
+}
+
+type ControllerServiceCapability_RPC struct {
+	Type                 ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.ControllerServiceCapability_RPC_Type" json:"type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                             `json:"-"`
+	XXX_unrecognized     []byte                               `json:"-"`
+	XXX_sizecache        int32                                `json:"-"`
+}
+
+func (m *ControllerServiceCapability_RPC) Reset()         { *m = ControllerServiceCapability_RPC{} }
+func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) }
+func (*ControllerServiceCapability_RPC) ProtoMessage()    {}
+func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{31, 0}
+}
+
+func (m *ControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerServiceCapability_RPC.Unmarshal(m, b)
+}
+func (m *ControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerServiceCapability_RPC.Marshal(b, m, deterministic)
+}
+func (m *ControllerServiceCapability_RPC) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerServiceCapability_RPC.Merge(m, src)
+}
+func (m *ControllerServiceCapability_RPC) XXX_Size() int {
+	return xxx_messageInfo_ControllerServiceCapability_RPC.Size(m)
+}
+func (m *ControllerServiceCapability_RPC) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerServiceCapability_RPC.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerServiceCapability_RPC proto.InternalMessageInfo
+
+func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type {
+	if m != nil {
+		return m.Type
+	}
+	return ControllerServiceCapability_RPC_UNKNOWN
+}
+
+type CreateSnapshotRequest struct {
+	// The ID of the source volume to be snapshotted.
+	// This field is REQUIRED.
+	SourceVolumeId string `protobuf:"bytes,1,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"`
+	// The suggested name for the snapshot. This field is REQUIRED for
+	// idempotency.
+	// Any Unicode string that conforms to the length limit is allowed
+	// except those containing the following banned characters:
+	// U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F.
+	// (These are control characters other than commonly used whitespace.)
+	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	// Secrets required by plugin to complete snapshot creation request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Plugin specific parameters passed in as opaque key-value pairs.
+	// This field is OPTIONAL. The Plugin is responsible for parsing and
+	// validating these parameters. COs will treat these as opaque.
+	// Use cases for opaque parameters:
+	// - Specify a policy to automatically clean up the snapshot.
+	// - Specify an expiration date for the snapshot.
+	// - Specify whether the snapshot is readonly or read/write.
+	// - Specify if the snapshot should be replicated to some place.
+	// - Specify primary or secondary for replication systems that
+	//   support snapshotting only on primary.
+	Parameters           map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *CreateSnapshotRequest) Reset()         { *m = CreateSnapshotRequest{} }
+func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateSnapshotRequest) ProtoMessage()    {}
+func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{32}
+}
+
+func (m *CreateSnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CreateSnapshotRequest.Unmarshal(m, b)
+}
+func (m *CreateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CreateSnapshotRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateSnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateSnapshotRequest.Merge(m, src)
+}
+func (m *CreateSnapshotRequest) XXX_Size() int {
+	return xxx_messageInfo_CreateSnapshotRequest.Size(m)
+}
+func (m *CreateSnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateSnapshotRequest proto.InternalMessageInfo
+
+func (m *CreateSnapshotRequest) GetSourceVolumeId() string {
+	if m != nil {
+		return m.SourceVolumeId
+	}
+	return ""
+}
+
+func (m *CreateSnapshotRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *CreateSnapshotRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+func (m *CreateSnapshotRequest) GetParameters() map[string]string {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+type CreateSnapshotResponse struct {
+	// Contains all attributes of the newly created snapshot that are
+	// relevant to the CO along with information required by the Plugin
+	// to uniquely identify the snapshot. This field is REQUIRED.
+	Snapshot             *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *CreateSnapshotResponse) Reset()         { *m = CreateSnapshotResponse{} }
+func (m *CreateSnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateSnapshotResponse) ProtoMessage()    {}
+func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{33}
+}
+
+func (m *CreateSnapshotResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CreateSnapshotResponse.Unmarshal(m, b)
+}
+func (m *CreateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CreateSnapshotResponse.Marshal(b, m, deterministic)
+}
+func (m *CreateSnapshotResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateSnapshotResponse.Merge(m, src)
+}
+func (m *CreateSnapshotResponse) XXX_Size() int {
+	return xxx_messageInfo_CreateSnapshotResponse.Size(m)
+}
+func (m *CreateSnapshotResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateSnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateSnapshotResponse proto.InternalMessageInfo
+
+func (m *CreateSnapshotResponse) GetSnapshot() *Snapshot {
+	if m != nil {
+		return m.Snapshot
+	}
+	return nil
+}
+
+// Information about a specific snapshot.
+type Snapshot struct {
+	// This is the complete size of the snapshot in bytes. The purpose of
+	// this field is to give CO guidance on how much space is needed to
+	// create a volume from this snapshot. The size of the volume MUST NOT
+	// be less than the size of the source snapshot. This field is
+	// OPTIONAL. If this field is not set, it indicates that this size is
+	// unknown. The value of this field MUST NOT be negative and a size of
+	// zero means it is unspecified.
+	SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"`
+	// The identifier for this snapshot, generated by the plugin.
+	// This field is REQUIRED.
+	// This field MUST contain enough information to uniquely identify
+	// this specific snapshot vs all other snapshots supported by this
+	// plugin.
+	// This field SHALL be used by the CO in subsequent calls to refer to
+	// this snapshot.
+	// The SP is NOT responsible for global uniqueness of snapshot_id
+	// across multiple SPs.
+	SnapshotId string `protobuf:"bytes,2,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
+	// Identity information for the source volume. Note that creating a
+	// snapshot from a snapshot is not supported here so the source has to
+	// be a volume. This field is REQUIRED.
+	SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"`
+	// Timestamp when the point-in-time snapshot is taken on the storage
+	// system. This field is REQUIRED.
+	CreationTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"`
+	// Indicates if a snapshot is ready to use as a
+	// `volume_content_source` in a `CreateVolumeRequest`. The default
+	// value is false. This field is REQUIRED.
+	ReadyToUse           bool     `protobuf:"varint,5,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Snapshot) Reset()         { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()    {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{34}
+}
+
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Snapshot.Unmarshal(m, b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+}
+func (m *Snapshot) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Snapshot.Merge(m, src)
+}
+func (m *Snapshot) XXX_Size() int {
+	return xxx_messageInfo_Snapshot.Size(m)
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+	xxx_messageInfo_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
+
+func (m *Snapshot) GetSizeBytes() int64 {
+	if m != nil {
+		return m.SizeBytes
+	}
+	return 0
+}
+
+func (m *Snapshot) GetSnapshotId() string {
+	if m != nil {
+		return m.SnapshotId
+	}
+	return ""
+}
+
+func (m *Snapshot) GetSourceVolumeId() string {
+	if m != nil {
+		return m.SourceVolumeId
+	}
+	return ""
+}
+
+func (m *Snapshot) GetCreationTime() *timestamp.Timestamp {
+	if m != nil {
+		return m.CreationTime
+	}
+	return nil
+}
+
+func (m *Snapshot) GetReadyToUse() bool {
+	if m != nil {
+		return m.ReadyToUse
+	}
+	return false
+}
+
+type DeleteSnapshotRequest struct {
+	// The ID of the snapshot to be deleted.
+	// This field is REQUIRED.
+	SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
+	// Secrets required by plugin to complete snapshot deletion request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets              map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *DeleteSnapshotRequest) Reset()         { *m = DeleteSnapshotRequest{} }
+func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteSnapshotRequest) ProtoMessage()    {}
+func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{35}
+}
+
+func (m *DeleteSnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeleteSnapshotRequest.Unmarshal(m, b)
+}
+func (m *DeleteSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeleteSnapshotRequest.Marshal(b, m, deterministic)
+}
+func (m *DeleteSnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteSnapshotRequest.Merge(m, src)
+}
+func (m *DeleteSnapshotRequest) XXX_Size() int {
+	return xxx_messageInfo_DeleteSnapshotRequest.Size(m)
+}
+func (m *DeleteSnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteSnapshotRequest proto.InternalMessageInfo
+
+func (m *DeleteSnapshotRequest) GetSnapshotId() string {
+	if m != nil {
+		return m.SnapshotId
+	}
+	return ""
+}
+
+func (m *DeleteSnapshotRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+type DeleteSnapshotResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeleteSnapshotResponse) Reset()         { *m = DeleteSnapshotResponse{} }
+func (m *DeleteSnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteSnapshotResponse) ProtoMessage()    {}
+func (*DeleteSnapshotResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{36}
+}
+
+func (m *DeleteSnapshotResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeleteSnapshotResponse.Unmarshal(m, b)
+}
+func (m *DeleteSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeleteSnapshotResponse.Marshal(b, m, deterministic)
+}
+func (m *DeleteSnapshotResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteSnapshotResponse.Merge(m, src)
+}
+func (m *DeleteSnapshotResponse) XXX_Size() int {
+	return xxx_messageInfo_DeleteSnapshotResponse.Size(m)
+}
+func (m *DeleteSnapshotResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteSnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteSnapshotResponse proto.InternalMessageInfo
+
+// List all snapshots on the storage system regardless of how they were
+// created.
+type ListSnapshotsRequest struct {
+	// If specified (non-zero value), the Plugin MUST NOT return more
+	// entries than this number in the response. If the actual number of
+	// entries is more than this number, the Plugin MUST set `next_token`
+	// in the response which can be used to get the next page of entries
+	// in the subsequent `ListSnapshots` call. This field is OPTIONAL. If
+	// not specified (zero value), it means there is no restriction on the
+	// number of entries that can be returned.
+	// The value of this field MUST NOT be negative.
+	MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"`
+	// A token to specify where to start paginating. Set this field to
+	// `next_token` returned by a previous `ListSnapshots` call to get the
+	// next page of entries. This field is OPTIONAL.
+	// An empty string is equal to an unspecified field value.
+	StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken,proto3" json:"starting_token,omitempty"`
+	// Identity information for the source volume. This field is OPTIONAL.
+	// It can be used to list snapshots by volume.
+	SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"`
+	// Identity information for a specific snapshot. This field is
+	// OPTIONAL. It can be used to list only a specific snapshot.
+	// ListSnapshots will return with current snapshot information
+	// and will not block if the snapshot is being processed after
+	// it is cut.
+	SnapshotId string `protobuf:"bytes,4,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"`
+	// Secrets required by plugin to complete ListSnapshot request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets              map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ListSnapshotsRequest) Reset()         { *m = ListSnapshotsRequest{} }
+func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListSnapshotsRequest) ProtoMessage()    {}
+func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{37}
+}
+
+func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListSnapshotsRequest.Unmarshal(m, b)
+}
+func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic)
+}
+func (m *ListSnapshotsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListSnapshotsRequest.Merge(m, src)
+}
+func (m *ListSnapshotsRequest) XXX_Size() int {
+	return xxx_messageInfo_ListSnapshotsRequest.Size(m)
+}
+func (m *ListSnapshotsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo
+
+func (m *ListSnapshotsRequest) GetMaxEntries() int32 {
+	if m != nil {
+		return m.MaxEntries
+	}
+	return 0
+}
+
+func (m *ListSnapshotsRequest) GetStartingToken() string {
+	if m != nil {
+		return m.StartingToken
+	}
+	return ""
+}
+
+func (m *ListSnapshotsRequest) GetSourceVolumeId() string {
+	if m != nil {
+		return m.SourceVolumeId
+	}
+	return ""
+}
+
+func (m *ListSnapshotsRequest) GetSnapshotId() string {
+	if m != nil {
+		return m.SnapshotId
+	}
+	return ""
+}
+
+func (m *ListSnapshotsRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+type ListSnapshotsResponse struct {
+	Entries []*ListSnapshotsResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+	// This token allows you to get the next page of entries for
+	// `ListSnapshots` request. If the number of entries is larger than
+	// `max_entries`, use the `next_token` as a value for the
+	// `starting_token` field in the next `ListSnapshots` request. This
+	// field is OPTIONAL.
+	// An empty string is equal to an unspecified field value.
+	NextToken            string   `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ListSnapshotsResponse) Reset()         { *m = ListSnapshotsResponse{} }
+func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListSnapshotsResponse) ProtoMessage()    {}
+func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{38}
+}
+
+func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListSnapshotsResponse.Unmarshal(m, b)
+}
+func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic)
+}
+func (m *ListSnapshotsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListSnapshotsResponse.Merge(m, src)
+}
+func (m *ListSnapshotsResponse) XXX_Size() int {
+	return xxx_messageInfo_ListSnapshotsResponse.Size(m)
+}
+func (m *ListSnapshotsResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo
+
+func (m *ListSnapshotsResponse) GetEntries() []*ListSnapshotsResponse_Entry {
+	if m != nil {
+		return m.Entries
+	}
+	return nil
+}
+
+func (m *ListSnapshotsResponse) GetNextToken() string {
+	if m != nil {
+		return m.NextToken
+	}
+	return ""
+}
+
+type ListSnapshotsResponse_Entry struct {
+	Snapshot             *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *ListSnapshotsResponse_Entry) Reset()         { *m = ListSnapshotsResponse_Entry{} }
+func (m *ListSnapshotsResponse_Entry) String() string { return proto.CompactTextString(m) }
+func (*ListSnapshotsResponse_Entry) ProtoMessage()    {}
+func (*ListSnapshotsResponse_Entry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{38, 0}
+}
+
+func (m *ListSnapshotsResponse_Entry) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListSnapshotsResponse_Entry.Unmarshal(m, b)
+}
+func (m *ListSnapshotsResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListSnapshotsResponse_Entry.Marshal(b, m, deterministic)
+}
+func (m *ListSnapshotsResponse_Entry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListSnapshotsResponse_Entry.Merge(m, src)
+}
+func (m *ListSnapshotsResponse_Entry) XXX_Size() int {
+	return xxx_messageInfo_ListSnapshotsResponse_Entry.Size(m)
+}
+func (m *ListSnapshotsResponse_Entry) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListSnapshotsResponse_Entry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListSnapshotsResponse_Entry proto.InternalMessageInfo
+
+func (m *ListSnapshotsResponse_Entry) GetSnapshot() *Snapshot {
+	if m != nil {
+		return m.Snapshot
+	}
+	return nil
+}
+
+type ControllerExpandVolumeRequest struct {
+	// The ID of the volume to expand. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// This allows CO to specify the capacity requirements of the volume
+	// after expansion. This field is REQUIRED.
+	CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"`
+	// Secrets required by the plugin for expanding the volume.
+	// This field is OPTIONAL.
+	Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Volume capability describing how the CO intends to use this volume.
+	// This allows SP to determine if volume is being used as a block
+	// device or mounted file system. For example - if volume is
+	// being used as a block device - the SP MAY set
+	// node_expansion_required to false in ControllerExpandVolumeResponse
+	// to skip invocation of NodeExpandVolume on the node by the CO.
+	// This is an OPTIONAL field.
+	VolumeCapability     *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *ControllerExpandVolumeRequest) Reset()         { *m = ControllerExpandVolumeRequest{} }
+func (m *ControllerExpandVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*ControllerExpandVolumeRequest) ProtoMessage()    {}
+func (*ControllerExpandVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{39}
+}
+
+func (m *ControllerExpandVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerExpandVolumeRequest.Unmarshal(m, b)
+}
+func (m *ControllerExpandVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerExpandVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *ControllerExpandVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerExpandVolumeRequest.Merge(m, src)
+}
+func (m *ControllerExpandVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_ControllerExpandVolumeRequest.Size(m)
+}
+func (m *ControllerExpandVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerExpandVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerExpandVolumeRequest proto.InternalMessageInfo
+
+func (m *ControllerExpandVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *ControllerExpandVolumeRequest) GetCapacityRange() *CapacityRange {
+	if m != nil {
+		return m.CapacityRange
+	}
+	return nil
+}
+
+func (m *ControllerExpandVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+func (m *ControllerExpandVolumeRequest) GetVolumeCapability() *VolumeCapability {
+	if m != nil {
+		return m.VolumeCapability
+	}
+	return nil
+}
+
+type ControllerExpandVolumeResponse struct {
+	// Capacity of volume after expansion. This field is REQUIRED.
+	CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"`
+	// Whether node expansion is required for the volume. When true
+	// the CO MUST make NodeExpandVolume RPC call on the node. This field
+	// is REQUIRED.
+	NodeExpansionRequired bool     `protobuf:"varint,2,opt,name=node_expansion_required,json=nodeExpansionRequired,proto3" json:"node_expansion_required,omitempty"`
+	XXX_NoUnkeyedLiteral  struct{} `json:"-"`
+	XXX_unrecognized      []byte   `json:"-"`
+	XXX_sizecache         int32    `json:"-"`
+}
+
+func (m *ControllerExpandVolumeResponse) Reset()         { *m = ControllerExpandVolumeResponse{} }
+func (m *ControllerExpandVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*ControllerExpandVolumeResponse) ProtoMessage()    {}
+func (*ControllerExpandVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{40}
+}
+
+func (m *ControllerExpandVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ControllerExpandVolumeResponse.Unmarshal(m, b)
+}
+func (m *ControllerExpandVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ControllerExpandVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *ControllerExpandVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ControllerExpandVolumeResponse.Merge(m, src)
+}
+func (m *ControllerExpandVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_ControllerExpandVolumeResponse.Size(m)
+}
+func (m *ControllerExpandVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ControllerExpandVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ControllerExpandVolumeResponse proto.InternalMessageInfo
+
+func (m *ControllerExpandVolumeResponse) GetCapacityBytes() int64 {
+	if m != nil {
+		return m.CapacityBytes
+	}
+	return 0
+}
+
+func (m *ControllerExpandVolumeResponse) GetNodeExpansionRequired() bool {
+	if m != nil {
+		return m.NodeExpansionRequired
+	}
+	return false
+}
+
+type NodeStageVolumeRequest struct {
+	// The ID of the volume to publish. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The CO SHALL set this field to the value returned by
+	// `ControllerPublishVolume` if the corresponding Controller Plugin
+	// has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be
+	// left unset if the corresponding Controller Plugin does not have
+	// this capability. This is an OPTIONAL field.
+	PublishContext map[string]string `protobuf:"bytes,2,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// The path to which the volume MAY be staged. It MUST be an
+	// absolute path in the root filesystem of the process serving this
+	// request, and MUST be a directory. The CO SHALL ensure that there
+	// is only one `staging_target_path` per volume. The CO SHALL ensure
+	// that the path is directory and that the process serving the
+	// request has `read` and `write` permission to that directory. The
+	// CO SHALL be responsible for creating the directory if it does not
+	// exist.
+	// This is a REQUIRED field.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"`
+	// Volume capability describing how the CO intends to use this volume.
+	// SP MUST ensure the CO can use the staged volume as described.
+	// Otherwise SP MUST return the appropriate gRPC error code.
+	// This is a REQUIRED field.
+	VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"`
+	// Secrets required by plugin to complete node stage volume request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Volume context as returned by SP in
+	// CreateVolumeResponse.Volume.volume_context.
+	// This field is OPTIONAL and MUST match the volume_context of the
+	// volume identified by `volume_id`.
+	VolumeContext        map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *NodeStageVolumeRequest) Reset()         { *m = NodeStageVolumeRequest{} }
+func (m *NodeStageVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeStageVolumeRequest) ProtoMessage()    {}
+func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{41}
+}
+
+func (m *NodeStageVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeStageVolumeRequest.Unmarshal(m, b)
+}
+func (m *NodeStageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeStageVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeStageVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeStageVolumeRequest.Merge(m, src)
+}
+func (m *NodeStageVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeStageVolumeRequest.Size(m)
+}
+func (m *NodeStageVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeStageVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeStageVolumeRequest proto.InternalMessageInfo
+
+func (m *NodeStageVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *NodeStageVolumeRequest) GetPublishContext() map[string]string {
+	if m != nil {
+		return m.PublishContext
+	}
+	return nil
+}
+
+func (m *NodeStageVolumeRequest) GetStagingTargetPath() string {
+	if m != nil {
+		return m.StagingTargetPath
+	}
+	return ""
+}
+
+func (m *NodeStageVolumeRequest) GetVolumeCapability() *VolumeCapability {
+	if m != nil {
+		return m.VolumeCapability
+	}
+	return nil
+}
+
+func (m *NodeStageVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+func (m *NodeStageVolumeRequest) GetVolumeContext() map[string]string {
+	if m != nil {
+		return m.VolumeContext
+	}
+	return nil
+}
+
+type NodeStageVolumeResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeStageVolumeResponse) Reset()         { *m = NodeStageVolumeResponse{} }
+func (m *NodeStageVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeStageVolumeResponse) ProtoMessage()    {}
+func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{42}
+}
+
+func (m *NodeStageVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeStageVolumeResponse.Unmarshal(m, b)
+}
+func (m *NodeStageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeStageVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeStageVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeStageVolumeResponse.Merge(m, src)
+}
+func (m *NodeStageVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeStageVolumeResponse.Size(m)
+}
+func (m *NodeStageVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeStageVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeStageVolumeResponse proto.InternalMessageInfo
+
+type NodeUnstageVolumeRequest struct {
+	// The ID of the volume. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The path at which the volume was staged. It MUST be an absolute
+	// path in the root filesystem of the process serving this request.
+	// This is a REQUIRED field.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	StagingTargetPath    string   `protobuf:"bytes,2,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeUnstageVolumeRequest) Reset()         { *m = NodeUnstageVolumeRequest{} }
+func (m *NodeUnstageVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeUnstageVolumeRequest) ProtoMessage()    {}
+func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{43}
+}
+
+func (m *NodeUnstageVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeUnstageVolumeRequest.Unmarshal(m, b)
+}
+func (m *NodeUnstageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeUnstageVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeUnstageVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeUnstageVolumeRequest.Merge(m, src)
+}
+func (m *NodeUnstageVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeUnstageVolumeRequest.Size(m)
+}
+func (m *NodeUnstageVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeUnstageVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeUnstageVolumeRequest proto.InternalMessageInfo
+
+func (m *NodeUnstageVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *NodeUnstageVolumeRequest) GetStagingTargetPath() string {
+	if m != nil {
+		return m.StagingTargetPath
+	}
+	return ""
+}
+
+type NodeUnstageVolumeResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeUnstageVolumeResponse) Reset()         { *m = NodeUnstageVolumeResponse{} }
+func (m *NodeUnstageVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeUnstageVolumeResponse) ProtoMessage()    {}
+func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{44}
+}
+
+func (m *NodeUnstageVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeUnstageVolumeResponse.Unmarshal(m, b)
+}
+func (m *NodeUnstageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeUnstageVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeUnstageVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeUnstageVolumeResponse.Merge(m, src)
+}
+func (m *NodeUnstageVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeUnstageVolumeResponse.Size(m)
+}
+func (m *NodeUnstageVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeUnstageVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeUnstageVolumeResponse proto.InternalMessageInfo
+
+type NodePublishVolumeRequest struct {
+	// The ID of the volume to publish. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The CO SHALL set this field to the value returned by
+	// `ControllerPublishVolume` if the corresponding Controller Plugin
+	// has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be
+	// left unset if the corresponding Controller Plugin does not have
+	// this capability. This is an OPTIONAL field.
+	PublishContext map[string]string `protobuf:"bytes,2,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// The path to which the volume was staged by `NodeStageVolume`.
+	// It MUST be an absolute path in the root filesystem of the process
+	// serving this request.
+	// It MUST be set if the Node Plugin implements the
+	// `STAGE_UNSTAGE_VOLUME` node capability.
+	// This is an OPTIONAL field.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"`
+	// The path to which the volume will be published. It MUST be an
+	// absolute path in the root filesystem of the process serving this
+	// request. The CO SHALL ensure uniqueness of target_path per volume.
+	// The CO SHALL ensure that the parent directory of this path exists
+	// and that the process serving the request has `read` and `write`
+	// permissions to that parent directory.
+	// For volumes with an access type of block, the SP SHALL place the
+	// block device at target_path.
+	// For volumes with an access type of mount, the SP SHALL place the
+	// mounted directory at target_path.
+	// Creation of target_path is the responsibility of the SP.
+	// This is a REQUIRED field.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"`
+	// Volume capability describing how the CO intends to use this volume.
+	// SP MUST ensure the CO can use the published volume as described.
+	// Otherwise SP MUST return the appropriate gRPC error code.
+	// This is a REQUIRED field.
+	VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"`
+	// Indicates SP MUST publish the volume in readonly mode.
+	// This field is REQUIRED.
+	Readonly bool `protobuf:"varint,6,opt,name=readonly,proto3" json:"readonly,omitempty"`
+	// Secrets required by plugin to complete node publish volume request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets map[string]string `protobuf:"bytes,7,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Volume context as returned by SP in
+	// CreateVolumeResponse.Volume.volume_context.
+	// This field is OPTIONAL and MUST match the volume_context of the
+	// volume identified by `volume_id`.
+	VolumeContext        map[string]string `protobuf:"bytes,8,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *NodePublishVolumeRequest) Reset()         { *m = NodePublishVolumeRequest{} }
+func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*NodePublishVolumeRequest) ProtoMessage()    {}
+func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{45}
+}
+
+func (m *NodePublishVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodePublishVolumeRequest.Unmarshal(m, b)
+}
+func (m *NodePublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodePublishVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *NodePublishVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodePublishVolumeRequest.Merge(m, src)
+}
+func (m *NodePublishVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_NodePublishVolumeRequest.Size(m)
+}
+func (m *NodePublishVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodePublishVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodePublishVolumeRequest proto.InternalMessageInfo
+
+func (m *NodePublishVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *NodePublishVolumeRequest) GetPublishContext() map[string]string {
+	if m != nil {
+		return m.PublishContext
+	}
+	return nil
+}
+
+func (m *NodePublishVolumeRequest) GetStagingTargetPath() string {
+	if m != nil {
+		return m.StagingTargetPath
+	}
+	return ""
+}
+
+func (m *NodePublishVolumeRequest) GetTargetPath() string {
+	if m != nil {
+		return m.TargetPath
+	}
+	return ""
+}
+
+func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability {
+	if m != nil {
+		return m.VolumeCapability
+	}
+	return nil
+}
+
+func (m *NodePublishVolumeRequest) GetReadonly() bool {
+	if m != nil {
+		return m.Readonly
+	}
+	return false
+}
+
+func (m *NodePublishVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+func (m *NodePublishVolumeRequest) GetVolumeContext() map[string]string {
+	if m != nil {
+		return m.VolumeContext
+	}
+	return nil
+}
+
+type NodePublishVolumeResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodePublishVolumeResponse) Reset()         { *m = NodePublishVolumeResponse{} }
+func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*NodePublishVolumeResponse) ProtoMessage()    {}
+func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{46}
+}
+
+func (m *NodePublishVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodePublishVolumeResponse.Unmarshal(m, b)
+}
+func (m *NodePublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodePublishVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *NodePublishVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodePublishVolumeResponse.Merge(m, src)
+}
+func (m *NodePublishVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_NodePublishVolumeResponse.Size(m)
+}
+func (m *NodePublishVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodePublishVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodePublishVolumeResponse proto.InternalMessageInfo
+
+type NodeUnpublishVolumeRequest struct {
+	// The ID of the volume. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The path at which the volume was published. It MUST be an absolute
+	// path in the root filesystem of the process serving this request.
+	// The SP MUST delete the file or directory it created at this path.
+	// This is a REQUIRED field.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	TargetPath           string   `protobuf:"bytes,2,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeUnpublishVolumeRequest) Reset()         { *m = NodeUnpublishVolumeRequest{} }
+func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeUnpublishVolumeRequest) ProtoMessage()    {}
+func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{47}
+}
+
+func (m *NodeUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeUnpublishVolumeRequest.Unmarshal(m, b)
+}
+func (m *NodeUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeUnpublishVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeUnpublishVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeUnpublishVolumeRequest.Merge(m, src)
+}
+func (m *NodeUnpublishVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeUnpublishVolumeRequest.Size(m)
+}
+func (m *NodeUnpublishVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeUnpublishVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeUnpublishVolumeRequest proto.InternalMessageInfo
+
+func (m *NodeUnpublishVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *NodeUnpublishVolumeRequest) GetTargetPath() string {
+	if m != nil {
+		return m.TargetPath
+	}
+	return ""
+}
+
+type NodeUnpublishVolumeResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeUnpublishVolumeResponse) Reset()         { *m = NodeUnpublishVolumeResponse{} }
+func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeUnpublishVolumeResponse) ProtoMessage()    {}
+func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{48}
+}
+
+func (m *NodeUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeUnpublishVolumeResponse.Unmarshal(m, b)
+}
+func (m *NodeUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeUnpublishVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeUnpublishVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeUnpublishVolumeResponse.Merge(m, src)
+}
+func (m *NodeUnpublishVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeUnpublishVolumeResponse.Size(m)
+}
+func (m *NodeUnpublishVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeUnpublishVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeUnpublishVolumeResponse proto.InternalMessageInfo
+
+type NodeGetVolumeStatsRequest struct {
+	// The ID of the volume. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// It can be any valid path where volume was previously
+	// staged or published.
+	// It MUST be an absolute path in the root filesystem of
+	// the process serving this request.
+	// This is a REQUIRED field.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	VolumePath string `protobuf:"bytes,2,opt,name=volume_path,json=volumePath,proto3" json:"volume_path,omitempty"`
+	// The path where the volume is staged, if the plugin has the
+	// STAGE_UNSTAGE_VOLUME capability, otherwise empty.
+	// If not empty, it MUST be an absolute path in the root
+	// filesystem of the process serving this request.
+	// This field is OPTIONAL.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	StagingTargetPath    string   `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeGetVolumeStatsRequest) Reset()         { *m = NodeGetVolumeStatsRequest{} }
+func (m *NodeGetVolumeStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeGetVolumeStatsRequest) ProtoMessage()    {}
+func (*NodeGetVolumeStatsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{49}
+}
+
+func (m *NodeGetVolumeStatsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeGetVolumeStatsRequest.Unmarshal(m, b)
+}
+func (m *NodeGetVolumeStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeGetVolumeStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeGetVolumeStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeGetVolumeStatsRequest.Merge(m, src)
+}
+func (m *NodeGetVolumeStatsRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeGetVolumeStatsRequest.Size(m)
+}
+func (m *NodeGetVolumeStatsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeGetVolumeStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeGetVolumeStatsRequest proto.InternalMessageInfo
+
+func (m *NodeGetVolumeStatsRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *NodeGetVolumeStatsRequest) GetVolumePath() string {
+	if m != nil {
+		return m.VolumePath
+	}
+	return ""
+}
+
+func (m *NodeGetVolumeStatsRequest) GetStagingTargetPath() string {
+	if m != nil {
+		return m.StagingTargetPath
+	}
+	return ""
+}
+
+type NodeGetVolumeStatsResponse struct {
+	// This field is OPTIONAL.
+	Usage []*VolumeUsage `protobuf:"bytes,1,rep,name=usage,proto3" json:"usage,omitempty"`
+	// Information about the current condition of the volume.
+	// This field is OPTIONAL.
+	// This field MUST be specified if the VOLUME_CONDITION node
+	// capability is supported.
+	VolumeCondition      *VolumeCondition `protobuf:"bytes,2,opt,name=volume_condition,json=volumeCondition,proto3" json:"volume_condition,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *NodeGetVolumeStatsResponse) Reset()         { *m = NodeGetVolumeStatsResponse{} }
+func (m *NodeGetVolumeStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeGetVolumeStatsResponse) ProtoMessage()    {}
+func (*NodeGetVolumeStatsResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{50}
+}
+
+func (m *NodeGetVolumeStatsResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeGetVolumeStatsResponse.Unmarshal(m, b)
+}
+func (m *NodeGetVolumeStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeGetVolumeStatsResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeGetVolumeStatsResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeGetVolumeStatsResponse.Merge(m, src)
+}
+func (m *NodeGetVolumeStatsResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeGetVolumeStatsResponse.Size(m)
+}
+func (m *NodeGetVolumeStatsResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeGetVolumeStatsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeGetVolumeStatsResponse proto.InternalMessageInfo
+
+func (m *NodeGetVolumeStatsResponse) GetUsage() []*VolumeUsage {
+	if m != nil {
+		return m.Usage
+	}
+	return nil
+}
+
+func (m *NodeGetVolumeStatsResponse) GetVolumeCondition() *VolumeCondition {
+	if m != nil {
+		return m.VolumeCondition
+	}
+	return nil
+}
+
+type VolumeUsage struct {
+	// The available capacity in specified Unit. This field is OPTIONAL.
+	// The value of this field MUST NOT be negative.
+	Available int64 `protobuf:"varint,1,opt,name=available,proto3" json:"available,omitempty"`
+	// The total capacity in specified Unit. This field is REQUIRED.
+	// The value of this field MUST NOT be negative.
+	Total int64 `protobuf:"varint,2,opt,name=total,proto3" json:"total,omitempty"`
+	// The used capacity in specified Unit. This field is OPTIONAL.
+	// The value of this field MUST NOT be negative.
+	Used int64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"`
+	// Units by which values are measured. This field is REQUIRED.
+	Unit                 VolumeUsage_Unit `protobuf:"varint,4,opt,name=unit,proto3,enum=csi.v1.VolumeUsage_Unit" json:"unit,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *VolumeUsage) Reset()         { *m = VolumeUsage{} }
+func (m *VolumeUsage) String() string { return proto.CompactTextString(m) }
+func (*VolumeUsage) ProtoMessage()    {}
+func (*VolumeUsage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{51}
+}
+
+func (m *VolumeUsage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeUsage.Unmarshal(m, b)
+}
+func (m *VolumeUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeUsage.Marshal(b, m, deterministic)
+}
+func (m *VolumeUsage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeUsage.Merge(m, src)
+}
+func (m *VolumeUsage) XXX_Size() int {
+	return xxx_messageInfo_VolumeUsage.Size(m)
+}
+func (m *VolumeUsage) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeUsage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeUsage proto.InternalMessageInfo
+
+func (m *VolumeUsage) GetAvailable() int64 {
+	if m != nil {
+		return m.Available
+	}
+	return 0
+}
+
+func (m *VolumeUsage) GetTotal() int64 {
+	if m != nil {
+		return m.Total
+	}
+	return 0
+}
+
+func (m *VolumeUsage) GetUsed() int64 {
+	if m != nil {
+		return m.Used
+	}
+	return 0
+}
+
+func (m *VolumeUsage) GetUnit() VolumeUsage_Unit {
+	if m != nil {
+		return m.Unit
+	}
+	return VolumeUsage_UNKNOWN
+}
+
+// VolumeCondition represents the current condition of a volume.
+type VolumeCondition struct {
+	// Normal volumes are available for use and operating optimally.
+	// An abnormal volume does not meet these criteria.
+	// This field is REQUIRED.
+	Abnormal bool `protobuf:"varint,1,opt,name=abnormal,proto3" json:"abnormal,omitempty"`
+	// The message describing the condition of the volume.
+	// This field is REQUIRED.
+	Message              string   `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *VolumeCondition) Reset()         { *m = VolumeCondition{} }
+func (m *VolumeCondition) String() string { return proto.CompactTextString(m) }
+func (*VolumeCondition) ProtoMessage()    {}
+func (*VolumeCondition) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{52}
+}
+
+func (m *VolumeCondition) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_VolumeCondition.Unmarshal(m, b)
+}
+func (m *VolumeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_VolumeCondition.Marshal(b, m, deterministic)
+}
+func (m *VolumeCondition) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeCondition.Merge(m, src)
+}
+func (m *VolumeCondition) XXX_Size() int {
+	return xxx_messageInfo_VolumeCondition.Size(m)
+}
+func (m *VolumeCondition) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeCondition proto.InternalMessageInfo
+
+func (m *VolumeCondition) GetAbnormal() bool {
+	if m != nil {
+		return m.Abnormal
+	}
+	return false
+}
+
+func (m *VolumeCondition) GetMessage() string {
+	if m != nil {
+		return m.Message
+	}
+	return ""
+}
+
+type NodeGetCapabilitiesRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeGetCapabilitiesRequest) Reset()         { *m = NodeGetCapabilitiesRequest{} }
+func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeGetCapabilitiesRequest) ProtoMessage()    {}
+func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{53}
+}
+
+func (m *NodeGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeGetCapabilitiesRequest.Unmarshal(m, b)
+}
+func (m *NodeGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeGetCapabilitiesRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeGetCapabilitiesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeGetCapabilitiesRequest.Merge(m, src)
+}
+func (m *NodeGetCapabilitiesRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeGetCapabilitiesRequest.Size(m)
+}
+func (m *NodeGetCapabilitiesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeGetCapabilitiesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeGetCapabilitiesRequest proto.InternalMessageInfo
+
+type NodeGetCapabilitiesResponse struct {
+	// All the capabilities that the node service supports. This field
+	// is OPTIONAL.
+	Capabilities         []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
+}
+
+func (m *NodeGetCapabilitiesResponse) Reset()         { *m = NodeGetCapabilitiesResponse{} }
+func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeGetCapabilitiesResponse) ProtoMessage()    {}
+func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{54}
+}
+
+func (m *NodeGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeGetCapabilitiesResponse.Unmarshal(m, b)
+}
+func (m *NodeGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeGetCapabilitiesResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeGetCapabilitiesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeGetCapabilitiesResponse.Merge(m, src)
+}
+func (m *NodeGetCapabilitiesResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeGetCapabilitiesResponse.Size(m)
+}
+func (m *NodeGetCapabilitiesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeGetCapabilitiesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeGetCapabilitiesResponse proto.InternalMessageInfo
+
+func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability {
+	if m != nil {
+		return m.Capabilities
+	}
+	return nil
+}
+
+// Specifies a capability of the node service.
+type NodeServiceCapability struct {
+	// Types that are valid to be assigned to Type:
+	//	*NodeServiceCapability_Rpc
+	Type                 isNodeServiceCapability_Type `protobuf_oneof:"type"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
+}
+
+func (m *NodeServiceCapability) Reset()         { *m = NodeServiceCapability{} }
+func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) }
+func (*NodeServiceCapability) ProtoMessage()    {}
+func (*NodeServiceCapability) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{55}
+}
+
+func (m *NodeServiceCapability) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeServiceCapability.Unmarshal(m, b)
+}
+func (m *NodeServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeServiceCapability.Marshal(b, m, deterministic)
+}
+func (m *NodeServiceCapability) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeServiceCapability.Merge(m, src)
+}
+func (m *NodeServiceCapability) XXX_Size() int {
+	return xxx_messageInfo_NodeServiceCapability.Size(m)
+}
+func (m *NodeServiceCapability) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeServiceCapability.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeServiceCapability proto.InternalMessageInfo
+
+type isNodeServiceCapability_Type interface {
+	isNodeServiceCapability_Type()
+}
+
+type NodeServiceCapability_Rpc struct {
+	Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"`
+}
+
+func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {}
+
+func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type {
+	if m != nil {
+		return m.Type
+	}
+	return nil
+}
+
+func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC {
+	if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok {
+		return x.Rpc
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*NodeServiceCapability) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*NodeServiceCapability_Rpc)(nil),
+	}
+}
+
+type NodeServiceCapability_RPC struct {
+	Type                 NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.NodeServiceCapability_RPC_Type" json:"type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
+	XXX_unrecognized     []byte                         `json:"-"`
+	XXX_sizecache        int32                          `json:"-"`
+}
+
+func (m *NodeServiceCapability_RPC) Reset()         { *m = NodeServiceCapability_RPC{} }
+func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) }
+func (*NodeServiceCapability_RPC) ProtoMessage()    {}
+func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{55, 0}
+}
+
+func (m *NodeServiceCapability_RPC) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeServiceCapability_RPC.Unmarshal(m, b)
+}
+func (m *NodeServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeServiceCapability_RPC.Marshal(b, m, deterministic)
+}
+func (m *NodeServiceCapability_RPC) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeServiceCapability_RPC.Merge(m, src)
+}
+func (m *NodeServiceCapability_RPC) XXX_Size() int {
+	return xxx_messageInfo_NodeServiceCapability_RPC.Size(m)
+}
+func (m *NodeServiceCapability_RPC) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeServiceCapability_RPC.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeServiceCapability_RPC proto.InternalMessageInfo
+
+func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type {
+	if m != nil {
+		return m.Type
+	}
+	return NodeServiceCapability_RPC_UNKNOWN
+}
+
+type NodeGetInfoRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeGetInfoRequest) Reset()         { *m = NodeGetInfoRequest{} }
+func (m *NodeGetInfoRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeGetInfoRequest) ProtoMessage()    {}
+func (*NodeGetInfoRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{56}
+}
+
+func (m *NodeGetInfoRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeGetInfoRequest.Unmarshal(m, b)
+}
+func (m *NodeGetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeGetInfoRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeGetInfoRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeGetInfoRequest.Merge(m, src)
+}
+func (m *NodeGetInfoRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeGetInfoRequest.Size(m)
+}
+func (m *NodeGetInfoRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeGetInfoRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeGetInfoRequest proto.InternalMessageInfo
+
+type NodeGetInfoResponse struct {
+	// The identifier of the node as understood by the SP.
+	// This field is REQUIRED.
+	// This field MUST contain enough information to uniquely identify
+	// this specific node vs all other nodes supported by this plugin.
+	// This field SHALL be used by the CO in subsequent calls, including
+	// `ControllerPublishVolume`, to refer to this node.
+	// The SP is NOT responsible for global uniqueness of node_id across
+	// multiple SPs.
+	// This field overrides the general CSI size limit.
+	// The size of this field SHALL NOT exceed 256 bytes. The general
+	// CSI size limit, 128 byte, is RECOMMENDED for best backwards
+	// compatibility.
+	NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+	// Maximum number of volumes that controller can publish to the node.
+	// If value is not set or zero CO SHALL decide how many volumes of
+	// this type can be published by the controller to the node. The
+	// plugin MUST NOT set negative values here.
+	// This field is OPTIONAL.
+	MaxVolumesPerNode int64 `protobuf:"varint,2,opt,name=max_volumes_per_node,json=maxVolumesPerNode,proto3" json:"max_volumes_per_node,omitempty"`
+	// Specifies where (regions, zones, racks, etc.) the node is
+	// accessible from.
+	// A plugin that returns this field MUST also set the
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability.
+	// COs MAY use this information along with the topology information
+	// returned in CreateVolumeResponse to ensure that a given volume is
+	// accessible from a given node when scheduling workloads.
+	// This field is OPTIONAL. If it is not specified, the CO MAY assume
+	// the node is not subject to any topological constraint, and MAY
+	// schedule workloads that reference any volume V, such that there are
+	// no topological constraints declared for V.
+	//
+	// Example 1:
+	//   accessible_topology =
+	//     {"region": "R1", "zone": "Z2"}
+	// Indicates the node exists within the "region" "R1" and the "zone"
+	// "Z2".
+	AccessibleTopology   *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *NodeGetInfoResponse) Reset()         { *m = NodeGetInfoResponse{} }
+func (m *NodeGetInfoResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeGetInfoResponse) ProtoMessage()    {}
+func (*NodeGetInfoResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{57}
+}
+
+func (m *NodeGetInfoResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeGetInfoResponse.Unmarshal(m, b)
+}
+func (m *NodeGetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeGetInfoResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeGetInfoResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeGetInfoResponse.Merge(m, src)
+}
+func (m *NodeGetInfoResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeGetInfoResponse.Size(m)
+}
+func (m *NodeGetInfoResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeGetInfoResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeGetInfoResponse proto.InternalMessageInfo
+
+func (m *NodeGetInfoResponse) GetNodeId() string {
+	if m != nil {
+		return m.NodeId
+	}
+	return ""
+}
+
+func (m *NodeGetInfoResponse) GetMaxVolumesPerNode() int64 {
+	if m != nil {
+		return m.MaxVolumesPerNode
+	}
+	return 0
+}
+
+func (m *NodeGetInfoResponse) GetAccessibleTopology() *Topology {
+	if m != nil {
+		return m.AccessibleTopology
+	}
+	return nil
+}
+
+type NodeExpandVolumeRequest struct {
+	// The ID of the volume. This field is REQUIRED.
+	VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"`
+	// The path on which volume is available. This field is REQUIRED.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	VolumePath string `protobuf:"bytes,2,opt,name=volume_path,json=volumePath,proto3" json:"volume_path,omitempty"`
+	// This allows CO to specify the capacity requirements of the volume
+	// after expansion. If capacity_range is omitted then a plugin MAY
+	// inspect the file system of the volume to determine the maximum
+	// capacity to which the volume can be expanded. In such cases a
+	// plugin MAY expand the volume to its maximum capacity.
+	// This field is OPTIONAL.
+	CapacityRange *CapacityRange `protobuf:"bytes,3,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"`
+	// The path where the volume is staged, if the plugin has the
+	// STAGE_UNSTAGE_VOLUME capability, otherwise empty.
+	// If not empty, it MUST be an absolute path in the root
+	// filesystem of the process serving this request.
+	// This field is OPTIONAL.
+	// This field overrides the general CSI size limit.
+	// SP SHOULD support the maximum path length allowed by the operating
+	// system/filesystem, but, at a minimum, SP MUST accept a max path
+	// length of at least 128 bytes.
+	StagingTargetPath string `protobuf:"bytes,4,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"`
+	// Volume capability describing how the CO intends to use this volume.
+	// This allows SP to determine if volume is being used as a block
+	// device or mounted file system. For example - if volume is being
+	// used as a block device the SP MAY choose to skip expanding the
+	// filesystem in NodeExpandVolume implementation but still perform
+	// rest of the housekeeping needed for expanding the volume. If
+	// volume_capability is omitted the SP MAY determine
+	// access_type from given volume_path for the volume and perform
+	// node expansion. This is an OPTIONAL field.
+	VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"`
+	// Secrets required by plugin to complete node expand volume request.
+	// This field is OPTIONAL. Refer to the `Secrets Requirements`
+	// section on how to use this field.
+	Secrets              map[string]string `protobuf:"bytes,6,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *NodeExpandVolumeRequest) Reset()         { *m = NodeExpandVolumeRequest{} }
+func (m *NodeExpandVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*NodeExpandVolumeRequest) ProtoMessage()    {}
+func (*NodeExpandVolumeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{58}
+}
+
+func (m *NodeExpandVolumeRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeExpandVolumeRequest.Unmarshal(m, b)
+}
+func (m *NodeExpandVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeExpandVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *NodeExpandVolumeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeExpandVolumeRequest.Merge(m, src)
+}
+func (m *NodeExpandVolumeRequest) XXX_Size() int {
+	return xxx_messageInfo_NodeExpandVolumeRequest.Size(m)
+}
+func (m *NodeExpandVolumeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeExpandVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeExpandVolumeRequest proto.InternalMessageInfo
+
+func (m *NodeExpandVolumeRequest) GetVolumeId() string {
+	if m != nil {
+		return m.VolumeId
+	}
+	return ""
+}
+
+func (m *NodeExpandVolumeRequest) GetVolumePath() string {
+	if m != nil {
+		return m.VolumePath
+	}
+	return ""
+}
+
+func (m *NodeExpandVolumeRequest) GetCapacityRange() *CapacityRange {
+	if m != nil {
+		return m.CapacityRange
+	}
+	return nil
+}
+
+func (m *NodeExpandVolumeRequest) GetStagingTargetPath() string {
+	if m != nil {
+		return m.StagingTargetPath
+	}
+	return ""
+}
+
+func (m *NodeExpandVolumeRequest) GetVolumeCapability() *VolumeCapability {
+	if m != nil {
+		return m.VolumeCapability
+	}
+	return nil
+}
+
+func (m *NodeExpandVolumeRequest) GetSecrets() map[string]string {
+	if m != nil {
+		return m.Secrets
+	}
+	return nil
+}
+
+type NodeExpandVolumeResponse struct {
+	// The capacity of the volume in bytes. This field is OPTIONAL.
+	CapacityBytes        int64    `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *NodeExpandVolumeResponse) Reset()         { *m = NodeExpandVolumeResponse{} }
+func (m *NodeExpandVolumeResponse) String() string { return proto.CompactTextString(m) }
+func (*NodeExpandVolumeResponse) ProtoMessage()    {}
+func (*NodeExpandVolumeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9cdb00adce470e01, []int{59}
+}
+
+func (m *NodeExpandVolumeResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_NodeExpandVolumeResponse.Unmarshal(m, b)
+}
+func (m *NodeExpandVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_NodeExpandVolumeResponse.Marshal(b, m, deterministic)
+}
+func (m *NodeExpandVolumeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeExpandVolumeResponse.Merge(m, src)
+}
+func (m *NodeExpandVolumeResponse) XXX_Size() int {
+	return xxx_messageInfo_NodeExpandVolumeResponse.Size(m)
+}
+func (m *NodeExpandVolumeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeExpandVolumeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeExpandVolumeResponse proto.InternalMessageInfo
+
+func (m *NodeExpandVolumeResponse) GetCapacityBytes() int64 {
+	if m != nil {
+		return m.CapacityBytes
+	}
+	return 0
+}
+
+var E_AlphaEnum = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1060,
+	Name:          "csi.v1.alpha_enum",
+	Tag:           "varint,1060,opt,name=alpha_enum",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+var E_AlphaEnumValue = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.EnumValueOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1060,
+	Name:          "csi.v1.alpha_enum_value",
+	Tag:           "varint,1060,opt,name=alpha_enum_value",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+var E_CsiSecret = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1059,
+	Name:          "csi.v1.csi_secret",
+	Tag:           "varint,1059,opt,name=csi_secret",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+var E_AlphaField = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1060,
+	Name:          "csi.v1.alpha_field",
+	Tag:           "varint,1060,opt,name=alpha_field",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+var E_AlphaMessage = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1060,
+	Name:          "csi.v1.alpha_message",
+	Tag:           "varint,1060,opt,name=alpha_message",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+var E_AlphaMethod = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MethodOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1060,
+	Name:          "csi.v1.alpha_method",
+	Tag:           "varint,1060,opt,name=alpha_method",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+var E_AlphaService = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.ServiceOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         1060,
+	Name:          "csi.v1.alpha_service",
+	Tag:           "varint,1060,opt,name=alpha_service",
+	Filename:      "github.com/container-storage-interface/spec/csi.proto",
+}
+
+func init() {
+	proto.RegisterEnum("csi.v1.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value)
+	proto.RegisterEnum("csi.v1.PluginCapability_VolumeExpansion_Type", PluginCapability_VolumeExpansion_Type_name, PluginCapability_VolumeExpansion_Type_value)
+	proto.RegisterEnum("csi.v1.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value)
+	proto.RegisterEnum("csi.v1.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value)
+	proto.RegisterEnum("csi.v1.VolumeUsage_Unit", VolumeUsage_Unit_name, VolumeUsage_Unit_value)
+	proto.RegisterEnum("csi.v1.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value)
+	proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v1.GetPluginInfoRequest")
+	proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v1.GetPluginInfoResponse")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetPluginInfoResponse.ManifestEntry")
+	proto.RegisterType((*GetPluginCapabilitiesRequest)(nil), "csi.v1.GetPluginCapabilitiesRequest")
+	proto.RegisterType((*GetPluginCapabilitiesResponse)(nil), "csi.v1.GetPluginCapabilitiesResponse")
+	proto.RegisterType((*PluginCapability)(nil), "csi.v1.PluginCapability")
+	proto.RegisterType((*PluginCapability_Service)(nil), "csi.v1.PluginCapability.Service")
+	proto.RegisterType((*PluginCapability_VolumeExpansion)(nil), "csi.v1.PluginCapability.VolumeExpansion")
+	proto.RegisterType((*ProbeRequest)(nil), "csi.v1.ProbeRequest")
+	proto.RegisterType((*ProbeResponse)(nil), "csi.v1.ProbeResponse")
+	proto.RegisterType((*CreateVolumeRequest)(nil), "csi.v1.CreateVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeRequest.ParametersEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeRequest.SecretsEntry")
+	proto.RegisterType((*VolumeContentSource)(nil), "csi.v1.VolumeContentSource")
+	proto.RegisterType((*VolumeContentSource_SnapshotSource)(nil), "csi.v1.VolumeContentSource.SnapshotSource")
+	proto.RegisterType((*VolumeContentSource_VolumeSource)(nil), "csi.v1.VolumeContentSource.VolumeSource")
+	proto.RegisterType((*CreateVolumeResponse)(nil), "csi.v1.CreateVolumeResponse")
+	proto.RegisterType((*VolumeCapability)(nil), "csi.v1.VolumeCapability")
+	proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.v1.VolumeCapability.BlockVolume")
+	proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.v1.VolumeCapability.MountVolume")
+	proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.v1.VolumeCapability.AccessMode")
+	proto.RegisterType((*CapacityRange)(nil), "csi.v1.CapacityRange")
+	proto.RegisterType((*Volume)(nil), "csi.v1.Volume")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.Volume.VolumeContextEntry")
+	proto.RegisterType((*TopologyRequirement)(nil), "csi.v1.TopologyRequirement")
+	proto.RegisterType((*Topology)(nil), "csi.v1.Topology")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.Topology.SegmentsEntry")
+	proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.v1.DeleteVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteVolumeRequest.SecretsEntry")
+	proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.v1.DeleteVolumeResponse")
+	proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.v1.ControllerPublishVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeRequest.SecretsEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry")
+	proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.v1.ControllerPublishVolumeResponse")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeResponse.PublishContextEntry")
+	proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.v1.ControllerUnpublishVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerUnpublishVolumeRequest.SecretsEntry")
+	proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.v1.ControllerUnpublishVolumeResponse")
+	proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry")
+	proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse")
+	proto.RegisterType((*ValidateVolumeCapabilitiesResponse_Confirmed)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry")
+	proto.RegisterType((*ListVolumesRequest)(nil), "csi.v1.ListVolumesRequest")
+	proto.RegisterType((*ListVolumesResponse)(nil), "csi.v1.ListVolumesResponse")
+	proto.RegisterType((*ListVolumesResponse_VolumeStatus)(nil), "csi.v1.ListVolumesResponse.VolumeStatus")
+	proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.v1.ListVolumesResponse.Entry")
+	proto.RegisterType((*ControllerGetVolumeRequest)(nil), "csi.v1.ControllerGetVolumeRequest")
+	proto.RegisterType((*ControllerGetVolumeResponse)(nil), "csi.v1.ControllerGetVolumeResponse")
+	proto.RegisterType((*ControllerGetVolumeResponse_VolumeStatus)(nil), "csi.v1.ControllerGetVolumeResponse.VolumeStatus")
+	proto.RegisterType((*GetCapacityRequest)(nil), "csi.v1.GetCapacityRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetCapacityRequest.ParametersEntry")
+	proto.RegisterType((*GetCapacityResponse)(nil), "csi.v1.GetCapacityResponse")
+	proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.v1.ControllerGetCapabilitiesRequest")
+	proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.v1.ControllerGetCapabilitiesResponse")
+	proto.RegisterType((*ControllerServiceCapability)(nil), "csi.v1.ControllerServiceCapability")
+	proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.v1.ControllerServiceCapability.RPC")
+	proto.RegisterType((*CreateSnapshotRequest)(nil), "csi.v1.CreateSnapshotRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateSnapshotRequest.ParametersEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateSnapshotRequest.SecretsEntry")
+	proto.RegisterType((*CreateSnapshotResponse)(nil), "csi.v1.CreateSnapshotResponse")
+	proto.RegisterType((*Snapshot)(nil), "csi.v1.Snapshot")
+	proto.RegisterType((*DeleteSnapshotRequest)(nil), "csi.v1.DeleteSnapshotRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteSnapshotRequest.SecretsEntry")
+	proto.RegisterType((*DeleteSnapshotResponse)(nil), "csi.v1.DeleteSnapshotResponse")
+	proto.RegisterType((*ListSnapshotsRequest)(nil), "csi.v1.ListSnapshotsRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ListSnapshotsRequest.SecretsEntry")
+	proto.RegisterType((*ListSnapshotsResponse)(nil), "csi.v1.ListSnapshotsResponse")
+	proto.RegisterType((*ListSnapshotsResponse_Entry)(nil), "csi.v1.ListSnapshotsResponse.Entry")
+	proto.RegisterType((*ControllerExpandVolumeRequest)(nil), "csi.v1.ControllerExpandVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerExpandVolumeRequest.SecretsEntry")
+	proto.RegisterType((*ControllerExpandVolumeResponse)(nil), "csi.v1.ControllerExpandVolumeResponse")
+	proto.RegisterType((*NodeStageVolumeRequest)(nil), "csi.v1.NodeStageVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.PublishContextEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.SecretsEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.VolumeContextEntry")
+	proto.RegisterType((*NodeStageVolumeResponse)(nil), "csi.v1.NodeStageVolumeResponse")
+	proto.RegisterType((*NodeUnstageVolumeRequest)(nil), "csi.v1.NodeUnstageVolumeRequest")
+	proto.RegisterType((*NodeUnstageVolumeResponse)(nil), "csi.v1.NodeUnstageVolumeResponse")
+	proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.v1.NodePublishVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.PublishContextEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.SecretsEntry")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.VolumeContextEntry")
+	proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.v1.NodePublishVolumeResponse")
+	proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.v1.NodeUnpublishVolumeRequest")
+	proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.v1.NodeUnpublishVolumeResponse")
+	proto.RegisterType((*NodeGetVolumeStatsRequest)(nil), "csi.v1.NodeGetVolumeStatsRequest")
+	proto.RegisterType((*NodeGetVolumeStatsResponse)(nil), "csi.v1.NodeGetVolumeStatsResponse")
+	proto.RegisterType((*VolumeUsage)(nil), "csi.v1.VolumeUsage")
+	proto.RegisterType((*VolumeCondition)(nil), "csi.v1.VolumeCondition")
+	proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.v1.NodeGetCapabilitiesRequest")
+	proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.v1.NodeGetCapabilitiesResponse")
+	proto.RegisterType((*NodeServiceCapability)(nil), "csi.v1.NodeServiceCapability")
+	proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.v1.NodeServiceCapability.RPC")
+	proto.RegisterType((*NodeGetInfoRequest)(nil), "csi.v1.NodeGetInfoRequest")
+	proto.RegisterType((*NodeGetInfoResponse)(nil), "csi.v1.NodeGetInfoResponse")
+	proto.RegisterType((*NodeExpandVolumeRequest)(nil), "csi.v1.NodeExpandVolumeRequest")
+	proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeExpandVolumeRequest.SecretsEntry")
+	proto.RegisterType((*NodeExpandVolumeResponse)(nil), "csi.v1.NodeExpandVolumeResponse")
+	proto.RegisterExtension(E_AlphaEnum)
+	proto.RegisterExtension(E_AlphaEnumValue)
+	proto.RegisterExtension(E_CsiSecret)
+	proto.RegisterExtension(E_AlphaField)
+	proto.RegisterExtension(E_AlphaMessage)
+	proto.RegisterExtension(E_AlphaMethod)
+	proto.RegisterExtension(E_AlphaService)
+}
+
+func init() {
+	proto.RegisterFile("github.com/container-storage-interface/spec/csi.proto", fileDescriptor_9cdb00adce470e01)
+}
+
+var fileDescriptor_9cdb00adce470e01 = []byte{
+	// 3797 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x3b, 0x4b, 0x6c, 0x1b, 0x49,
+	0x76, 0x6a, 0xfe, 0x24, 0x3d, 0x4a, 0x32, 0x5d, 0xfa, 0x98, 0x6e, 0x49, 0x96, 0xdc, 0x1e, 0x7b,
+	0x65, 0x8f, 0x4d, 0xaf, 0xb5, 0x63, 0x23, 0x23, 0x7b, 0x76, 0x87, 0xa4, 0x68, 0x89, 0x6b, 0x8a,
+	0xd4, 0x34, 0x29, 0x7b, 0xed, 0x64, 0xd0, 0xd3, 0x22, 0x4b, 0x74, 0x63, 0xc8, 0x6e, 0x4e, 0x77,
+	0x53, 0x91, 0xe6, 0x92, 0x20, 0x41, 0x0e, 0x41, 0x2e, 0xb9, 0xed, 0xe4, 0xb6, 0x48, 0xf6, 0x98,
+	0xc5, 0x22, 0x08, 0x82, 0x1c, 0x03, 0xe4, 0x18, 0x20, 0x9b, 0xdc, 0x12, 0xe4, 0xb2, 0xb7, 0x20,
+	0x58, 0x24, 0xc0, 0x5c, 0x72, 0xc9, 0x21, 0x08, 0xba, 0xaa, 0xba, 0xd9, 0x5f, 0x7e, 0x2c, 0x19,
+	0x73, 0xc8, 0x49, 0xec, 0x57, 0xef, 0xbd, 0x7a, 0x55, 0xf5, 0xde, 0xab, 0xf7, 0x29, 0xc1, 0xe3,
+	0xb6, 0x62, 0xbe, 0xed, 0x1f, 0xe7, 0x9a, 0x5a, 0xf7, 0x61, 0x53, 0x53, 0x4d, 0x59, 0x51, 0xb1,
+	0xfe, 0xc0, 0x30, 0x35, 0x5d, 0x6e, 0xe3, 0x07, 0x8a, 0x6a, 0x62, 0xfd, 0x44, 0x6e, 0xe2, 0x87,
+	0x46, 0x0f, 0x37, 0x1f, 0x36, 0x0d, 0x25, 0xd7, 0xd3, 0x35, 0x53, 0x43, 0x29, 0xeb, 0xe7, 0xe9,
+	0x23, 0x7e, 0xb3, 0xad, 0x69, 0xed, 0x0e, 0x7e, 0x48, 0xa0, 0xc7, 0xfd, 0x93, 0x87, 0x2d, 0x6c,
+	0x34, 0x75, 0xa5, 0x67, 0x6a, 0x3a, 0xc5, 0xe4, 0x37, 0xfc, 0x18, 0xa6, 0xd2, 0xc5, 0x86, 0x29,
+	0x77, 0x7b, 0x0c, 0xe1, 0x86, 0x1f, 0xe1, 0x77, 0x75, 0xb9, 0xd7, 0xc3, 0xba, 0x41, 0xc7, 0x85,
+	0x15, 0x58, 0xda, 0xc3, 0xe6, 0x61, 0xa7, 0xdf, 0x56, 0xd4, 0xb2, 0x7a, 0xa2, 0x89, 0xf8, 0xab,
+	0x3e, 0x36, 0x4c, 0xe1, 0x5f, 0x39, 0x58, 0xf6, 0x0d, 0x18, 0x3d, 0x4d, 0x35, 0x30, 0x42, 0x90,
+	0x50, 0xe5, 0x2e, 0xce, 0x72, 0x9b, 0xdc, 0xd6, 0xac, 0x48, 0x7e, 0xa3, 0xdb, 0xb0, 0x70, 0x8a,
+	0xd5, 0x96, 0xa6, 0x4b, 0xa7, 0x58, 0x37, 0x14, 0x4d, 0xcd, 0xc6, 0xc8, 0xe8, 0x3c, 0x85, 0xbe,
+	0xa4, 0x40, 0xb4, 0x07, 0x33, 0x5d, 0x59, 0x55, 0x4e, 0xb0, 0x61, 0x66, 0xe3, 0x9b, 0xf1, 0xad,
+	0xf4, 0xf6, 0x87, 0x39, 0xba, 0xd4, 0x5c, 0xe8, 0x5c, 0xb9, 0x03, 0x86, 0x5d, 0x52, 0x4d, 0xfd,
+	0x5c, 0x74, 0x88, 0xf9, 0xa7, 0x30, 0xef, 0x19, 0x42, 0x19, 0x88, 0x7f, 0x89, 0xcf, 0x99, 0x4c,
+	0xd6, 0x4f, 0xb4, 0x04, 0xc9, 0x53, 0xb9, 0xd3, 0xc7, 0x4c, 0x12, 0xfa, 0xb1, 0x13, 0xfb, 0x2d,
+	0x4e, 0xb8, 0x01, 0x6b, 0xce, 0x6c, 0x45, 0xb9, 0x27, 0x1f, 0x2b, 0x1d, 0xc5, 0x54, 0xb0, 0x61,
+	0x2f, 0xfd, 0x73, 0x58, 0x8f, 0x18, 0x67, 0x3b, 0xf0, 0x0c, 0xe6, 0x9a, 0x2e, 0x78, 0x96, 0x23,
+	0x4b, 0xc9, 0xda, 0x4b, 0xf1, 0x51, 0x9e, 0x8b, 0x1e, 0x6c, 0xe1, 0x57, 0x71, 0xc8, 0xf8, 0x51,
+	0xd0, 0x33, 0x98, 0x36, 0xb0, 0x7e, 0xaa, 0x34, 0xe9, 0xbe, 0xa6, 0xb7, 0x37, 0xa3, 0xb8, 0xe5,
+	0xea, 0x14, 0x6f, 0x7f, 0x4a, 0xb4, 0x49, 0xd0, 0x11, 0x64, 0x4e, 0xb5, 0x4e, 0xbf, 0x8b, 0x25,
+	0x7c, 0xd6, 0x93, 0x55, 0xe7, 0x00, 0xd2, 0xdb, 0x5b, 0x91, 0x6c, 0x5e, 0x12, 0x82, 0x92, 0x8d,
+	0xbf, 0x3f, 0x25, 0x5e, 0x39, 0xf5, 0x82, 0xf8, 0x9f, 0x72, 0x30, 0xcd, 0x66, 0x43, 0x1f, 0x43,
+	0xc2, 0x3c, 0xef, 0x51, 0xe9, 0x16, 0xb6, 0x6f, 0x8f, 0x92, 0x2e, 0xd7, 0x38, 0xef, 0x61, 0x91,
+	0x90, 0x08, 0x9f, 0x41, 0xc2, 0xfa, 0x42, 0x69, 0x98, 0x3e, 0xaa, 0xbe, 0xa8, 0xd6, 0x5e, 0x55,
+	0x33, 0x53, 0x68, 0x05, 0x50, 0xb1, 0x56, 0x6d, 0x88, 0xb5, 0x4a, 0xa5, 0x24, 0x4a, 0xf5, 0x92,
+	0xf8, 0xb2, 0x5c, 0x2c, 0x65, 0x38, 0xf4, 0x01, 0x6c, 0xbe, 0xac, 0x55, 0x8e, 0x0e, 0x4a, 0x52,
+	0xbe, 0x58, 0x2c, 0xd5, 0xeb, 0xe5, 0x42, 0xb9, 0x52, 0x6e, 0xbc, 0x96, 0x8a, 0xb5, 0x6a, 0xbd,
+	0x21, 0xe6, 0xcb, 0xd5, 0x46, 0x3d, 0x13, 0xe3, 0xff, 0x80, 0x83, 0x2b, 0xbe, 0x05, 0xa0, 0xbc,
+	0x47, 0xc2, 0x07, 0xe3, 0x2e, 0xdc, 0x2d, 0xe9, 0xfd, 0x30, 0x49, 0x01, 0x52, 0xb5, 0x6a, 0xa5,
+	0x5c, 0xb5, 0xa4, 0x4b, 0xc3, 0x74, 0xed, 0xf9, 0x73, 0xf2, 0x11, 0x2b, 0xa4, 0xe8, 0x84, 0xc2,
+	0x02, 0xcc, 0x1d, 0xea, 0xda, 0x31, 0xb6, 0xf5, 0x27, 0x0f, 0xf3, 0xec, 0x9b, 0xe9, 0xcb, 0xf7,
+	0x21, 0xa9, 0x63, 0xb9, 0x75, 0xce, 0x8e, 0x96, 0xcf, 0x51, 0x9b, 0xcc, 0xd9, 0x36, 0x99, 0x2b,
+	0x68, 0x5a, 0xe7, 0xa5, 0xa5, 0x9f, 0x22, 0x45, 0x14, 0xbe, 0x4d, 0xc0, 0x62, 0x51, 0xc7, 0xb2,
+	0x89, 0xa9, 0xb4, 0x8c, 0x75, 0xa8, 0xed, 0x3d, 0x83, 0x05, 0x4b, 0xbf, 0x9a, 0x8a, 0x79, 0x2e,
+	0xe9, 0xb2, 0xda, 0xc6, 0xec, 0xe8, 0x97, 0xed, 0x1d, 0x28, 0xb2, 0x51, 0xd1, 0x1a, 0x14, 0xe7,
+	0x9b, 0xee, 0x4f, 0x54, 0x86, 0x45, 0xa6, 0x3a, 0x1e, 0x95, 0x8e, 0x7b, 0x55, 0x9a, 0x4a, 0xe1,
+	0x52, 0x69, 0x74, 0xea, 0x85, 0x28, 0xd8, 0x40, 0x2f, 0x00, 0x7a, 0xb2, 0x2e, 0x77, 0xb1, 0x89,
+	0x75, 0x23, 0x9b, 0xf0, 0xda, 0x77, 0xc8, 0x6a, 0x72, 0x87, 0x0e, 0x36, 0xb5, 0x6f, 0x17, 0x39,
+	0xda, 0xb3, 0x0c, 0xa2, 0xa9, 0x63, 0xd3, 0xc8, 0x26, 0x09, 0xa7, 0xad, 0x61, 0x9c, 0xea, 0x14,
+	0x95, 0xb0, 0x29, 0xc4, 0xbf, 0x29, 0x70, 0xa2, 0x4d, 0x8d, 0x6a, 0xb0, 0x6c, 0x2f, 0x50, 0x53,
+	0x4d, 0xac, 0x9a, 0x92, 0xa1, 0xf5, 0xf5, 0x26, 0xce, 0xa6, 0xc8, 0x2e, 0xad, 0xfa, 0x96, 0x48,
+	0x71, 0xea, 0x04, 0x45, 0x64, 0x5b, 0xe3, 0x01, 0xa2, 0x37, 0xc0, 0xcb, 0xcd, 0x26, 0x36, 0x0c,
+	0x85, 0xee, 0x85, 0xa4, 0xe3, 0xaf, 0xfa, 0x8a, 0x8e, 0xbb, 0x58, 0x35, 0x8d, 0xec, 0xb4, 0x97,
+	0x6b, 0x43, 0xeb, 0x69, 0x1d, 0xad, 0x7d, 0x2e, 0x0e, 0x70, 0xc4, 0xeb, 0x1e, 0x72, 0xd7, 0x88,
+	0xc1, 0x7f, 0x02, 0x57, 0x7c, 0x9b, 0x32, 0x89, 0x67, 0xe3, 0x77, 0x60, 0xce, 0xbd, 0x13, 0x13,
+	0x79, 0xc5, 0x3f, 0x89, 0xc1, 0x62, 0xc8, 0x1e, 0xa0, 0x7d, 0x98, 0x31, 0x54, 0xb9, 0x67, 0xbc,
+	0xd5, 0x4c, 0xa6, 0xbf, 0xf7, 0x86, 0x6c, 0x59, 0xae, 0xce, 0x70, 0xe9, 0xe7, 0xfe, 0x94, 0xe8,
+	0x50, 0xa3, 0x02, 0xa4, 0xe8, 0x7e, 0xfa, 0x7d, 0x53, 0x18, 0x1f, 0x0a, 0x73, 0xb8, 0x30, 0x4a,
+	0xfe, 0x11, 0x2c, 0x78, 0x67, 0x40, 0x1b, 0x90, 0xb6, 0x67, 0x90, 0x94, 0x16, 0x5b, 0x2b, 0xd8,
+	0xa0, 0x72, 0x8b, 0xff, 0x10, 0xe6, 0xdc, 0xcc, 0xd0, 0x2a, 0xcc, 0x32, 0x85, 0x70, 0xd0, 0x67,
+	0x28, 0xa0, 0xdc, 0x72, 0x6c, 0xfa, 0x87, 0xb0, 0xe4, 0xd5, 0x33, 0x66, 0xca, 0x77, 0x9c, 0x35,
+	0xd0, 0xbd, 0x58, 0xf0, 0xae, 0xc1, 0x96, 0x53, 0xf8, 0x79, 0x12, 0x32, 0x7e, 0xa3, 0x41, 0xcf,
+	0x20, 0x79, 0xdc, 0xd1, 0x9a, 0x5f, 0x32, 0xda, 0x0f, 0xa2, 0xac, 0x2b, 0x57, 0xb0, 0xb0, 0x28,
+	0x74, 0x7f, 0x4a, 0xa4, 0x44, 0x16, 0x75, 0x57, 0xeb, 0xab, 0x26, 0xdb, 0xbd, 0x68, 0xea, 0x03,
+	0x0b, 0x6b, 0x40, 0x4d, 0x88, 0xd0, 0x2e, 0xa4, 0xa9, 0xda, 0x49, 0x5d, 0xad, 0x85, 0xb3, 0x71,
+	0xc2, 0xe3, 0x56, 0x24, 0x8f, 0x3c, 0xc1, 0x3d, 0xd0, 0x5a, 0x58, 0x04, 0xd9, 0xf9, 0xcd, 0xcf,
+	0x43, 0xda, 0x25, 0x1b, 0xff, 0x35, 0xa4, 0x5d, 0x93, 0xa1, 0x6b, 0x30, 0x7d, 0x62, 0x48, 0x8e,
+	0x13, 0x9e, 0x15, 0x53, 0x27, 0x06, 0xf1, 0xa7, 0x1b, 0x90, 0x26, 0x52, 0x48, 0x27, 0x1d, 0xb9,
+	0x6d, 0x64, 0x63, 0x9b, 0x71, 0xeb, 0x8c, 0x08, 0xe8, 0xb9, 0x05, 0x41, 0x8f, 0x80, 0x39, 0x14,
+	0x89, 0xe2, 0xb5, 0x75, 0xad, 0xdf, 0x23, 0x42, 0xce, 0x16, 0xe2, 0x3f, 0x2b, 0x70, 0x22, 0xbb,
+	0xdf, 0xc8, 0x6c, 0x7b, 0xd6, 0x20, 0xff, 0xd7, 0x31, 0x80, 0x81, 0x94, 0xe8, 0x19, 0x24, 0xc8,
+	0xc2, 0xa8, 0xf7, 0xdf, 0x1a, 0x63, 0x61, 0x39, 0xb2, 0x3a, 0x42, 0x25, 0xfc, 0x3b, 0x07, 0x09,
+	0xc2, 0xc6, 0x7f, 0x47, 0xd5, 0xcb, 0xd5, 0xbd, 0x4a, 0x49, 0xaa, 0xd6, 0x76, 0x4b, 0xd2, 0x2b,
+	0xb1, 0xdc, 0x28, 0x89, 0x19, 0x0e, 0xad, 0xc2, 0x35, 0x37, 0x5c, 0x2c, 0xe5, 0x77, 0x4b, 0xa2,
+	0x54, 0xab, 0x56, 0x5e, 0x67, 0x62, 0x88, 0x87, 0x95, 0x83, 0xa3, 0x4a, 0xa3, 0x1c, 0x1c, 0x8b,
+	0xa3, 0x35, 0xc8, 0xba, 0xc6, 0x18, 0x0f, 0xc6, 0x36, 0x61, 0xb1, 0x75, 0x8d, 0xd2, 0x9f, 0x6c,
+	0x30, 0x89, 0x04, 0xb8, 0xee, 0x9e, 0xd3, 0x4b, 0x9b, 0xe2, 0xad, 0x4d, 0x42, 0x37, 0x21, 0xeb,
+	0xc6, 0xf1, 0x70, 0x98, 0x26, 0x28, 0x85, 0x79, 0x47, 0x0d, 0x88, 0x9a, 0xbf, 0x82, 0x79, 0xcf,
+	0xed, 0x60, 0x05, 0x72, 0xcc, 0x9d, 0xb5, 0xa4, 0xe3, 0x73, 0x93, 0x04, 0x37, 0xdc, 0x56, 0x5c,
+	0x9c, 0xb7, 0xa1, 0x05, 0x0b, 0x68, 0x1d, 0x68, 0x47, 0xe9, 0x2a, 0x26, 0xc3, 0x89, 0x11, 0x1c,
+	0x20, 0x20, 0x82, 0x20, 0xfc, 0x3a, 0x06, 0x29, 0xa6, 0x15, 0xb7, 0x5d, 0xf7, 0x93, 0x87, 0xa5,
+	0x0d, 0xa5, 0x2c, 0x3d, 0x66, 0x19, 0xf3, 0x9a, 0x25, 0xda, 0x87, 0x05, 0xb7, 0x13, 0x3f, 0xb3,
+	0xc3, 0xc7, 0x9b, 0xde, 0x73, 0x76, 0x7b, 0x92, 0x33, 0x16, 0x34, 0xce, 0x9f, 0xba, 0x61, 0xa8,
+	0x00, 0x0b, 0xbe, 0x7b, 0x20, 0x31, 0xfa, 0x1e, 0x98, 0x6f, 0x7a, 0x5c, 0x62, 0x1e, 0x16, 0x6d,
+	0x17, 0xde, 0xc1, 0x92, 0xc9, 0x5c, 0x3c, 0xbb, 0xa7, 0x32, 0x01, 0xd7, 0x8f, 0x06, 0xc8, 0x36,
+	0x8c, 0xff, 0x14, 0x50, 0x50, 0xd6, 0x89, 0xfc, 0x75, 0x1f, 0x16, 0x43, 0x2e, 0x17, 0x94, 0x83,
+	0x59, 0x72, 0x54, 0x86, 0x62, 0x62, 0x16, 0x98, 0x06, 0x25, 0x1a, 0xa0, 0x58, 0xf8, 0x3d, 0x1d,
+	0x9f, 0x60, 0x5d, 0xc7, 0x2d, 0x62, 0x98, 0xa1, 0xf8, 0x0e, 0x8a, 0xf0, 0x87, 0x1c, 0xcc, 0xd8,
+	0x70, 0xb4, 0x03, 0x33, 0x06, 0x6e, 0xd3, 0x8b, 0x8f, 0xce, 0x75, 0xc3, 0x4f, 0x9b, 0xab, 0x33,
+	0x04, 0x16, 0xc2, 0xdb, 0xf8, 0x56, 0x08, 0xef, 0x19, 0x9a, 0x68, 0xf1, 0x7f, 0xcb, 0xc1, 0xe2,
+	0x2e, 0xee, 0x60, 0x7f, 0x7c, 0x34, 0xcc, 0xb7, 0xbb, 0x43, 0x8a, 0x98, 0x37, 0xa4, 0x08, 0x61,
+	0x35, 0x24, 0xa4, 0xb8, 0xd0, 0x35, 0xbb, 0x02, 0x4b, 0xde, 0xd9, 0xe8, 0xc5, 0x22, 0xfc, 0x57,
+	0x1c, 0x6e, 0x58, 0xba, 0xa0, 0x6b, 0x9d, 0x0e, 0xd6, 0x0f, 0xfb, 0xc7, 0x1d, 0xc5, 0x78, 0x3b,
+	0xc1, 0xe2, 0xae, 0xc1, 0xb4, 0xaa, 0xb5, 0x5c, 0xc6, 0x93, 0xb2, 0x3e, 0xcb, 0x2d, 0x54, 0x82,
+	0xab, 0xfe, 0x00, 0xef, 0x9c, 0xb9, 0xff, 0xe8, 0xf0, 0x2e, 0x73, 0xea, 0xbf, 0xbb, 0x78, 0x98,
+	0xb1, 0x42, 0x53, 0x4d, 0xed, 0x9c, 0x13, 0x8b, 0x99, 0x11, 0x9d, 0x6f, 0x24, 0xfa, 0x63, 0xb5,
+	0x1f, 0x38, 0xb1, 0xda, 0xd0, 0x15, 0x0d, 0x0b, 0xdb, 0xbe, 0x08, 0x58, 0x7c, 0x8a, 0xb0, 0xfe,
+	0x78, 0x4c, 0xd6, 0x23, 0x3d, 0xc1, 0x45, 0x4e, 0xf1, 0x12, 0xcc, 0xf7, 0x1f, 0x38, 0xd8, 0x88,
+	0x5c, 0x02, 0x0b, 0x36, 0x5a, 0x70, 0xa5, 0x47, 0x07, 0x9c, 0x4d, 0xa0, 0x56, 0xf6, 0x74, 0xe4,
+	0x26, 0xb0, 0xfc, 0x99, 0x41, 0x3d, 0xdb, 0xb0, 0xd0, 0xf3, 0x00, 0xf9, 0x3c, 0x2c, 0x86, 0xa0,
+	0x4d, 0xb4, 0x98, 0xdf, 0x70, 0xb0, 0x39, 0x10, 0xe5, 0x48, 0xed, 0x5d, 0x9e, 0xfa, 0x36, 0x06,
+	0xba, 0x45, 0x5d, 0xfe, 0xe3, 0xe0, 0xda, 0xc3, 0x27, 0x7c, 0x5f, 0x16, 0x7c, 0x0b, 0x6e, 0x0e,
+	0x99, 0x9a, 0x99, 0xf3, 0xaf, 0x13, 0x70, 0xf3, 0xa5, 0xdc, 0x51, 0x5a, 0x4e, 0x08, 0x19, 0x52,
+	0x69, 0x18, 0xbe, 0x25, 0xcd, 0x80, 0x05, 0x50, 0xaf, 0xf5, 0xcc, 0xb1, 0xda, 0x51, 0xfc, 0xc7,
+	0xb8, 0x0e, 0x2f, 0x31, 0xfd, 0x7b, 0x1d, 0x92, 0xfe, 0x7d, 0x3c, 0xbe, 0xac, 0xc3, 0x92, 0xc1,
+	0x23, 0xbf, 0x83, 0x79, 0x32, 0x3e, 0xdf, 0x21, 0x5a, 0x70, 0x61, 0x2b, 0xfe, 0x2e, 0xf3, 0xb5,
+	0xbf, 0x4f, 0x80, 0x30, 0x6c, 0xf5, 0xcc, 0x87, 0x88, 0x30, 0xdb, 0xd4, 0xd4, 0x13, 0x45, 0xef,
+	0xe2, 0x16, 0xcb, 0x3b, 0x3e, 0x1a, 0x67, 0xf3, 0x98, 0x03, 0x29, 0xda, 0xb4, 0xe2, 0x80, 0x0d,
+	0xca, 0xc2, 0x74, 0x17, 0x1b, 0x86, 0xdc, 0xb6, 0xc5, 0xb2, 0x3f, 0xf9, 0x5f, 0xc4, 0x61, 0xd6,
+	0x21, 0x41, 0x6a, 0x40, 0x83, 0xa9, 0xfb, 0xda, 0x7b, 0x17, 0x01, 0xde, 0x5d, 0x99, 0x63, 0xef,
+	0xa0, 0xcc, 0x2d, 0x8f, 0x32, 0x53, 0x73, 0xd8, 0x7d, 0x27, 0xb1, 0x87, 0xe8, 0xf5, 0x77, 0xae,
+	0x80, 0xc2, 0xef, 0x00, 0xaa, 0x28, 0x06, 0xcb, 0xdf, 0x1c, 0xb7, 0x64, 0xa5, 0x6b, 0xf2, 0x99,
+	0x84, 0x55, 0x53, 0x57, 0x58, 0xb8, 0x9e, 0x14, 0xa1, 0x2b, 0x9f, 0x95, 0x28, 0xc4, 0x0a, 0xe9,
+	0x0d, 0x53, 0xd6, 0x4d, 0x45, 0x6d, 0x4b, 0xa6, 0xf6, 0x25, 0x76, 0xca, 0xbd, 0x36, 0xb4, 0x61,
+	0x01, 0x85, 0xff, 0x8c, 0xc1, 0xa2, 0x87, 0x3d, 0xd3, 0xc9, 0xa7, 0x30, 0x3d, 0xe0, 0xed, 0x09,
+	0xe3, 0x43, 0xb0, 0x73, 0x74, 0xdb, 0x6c, 0x0a, 0xb4, 0x0e, 0xa0, 0xe2, 0x33, 0xd3, 0x33, 0xef,
+	0xac, 0x05, 0x21, 0x73, 0xf2, 0x7f, 0xc4, 0x39, 0xe9, 0xbe, 0x29, 0x9b, 0x7d, 0x03, 0xdd, 0x07,
+	0xc4, 0x5c, 0x34, 0x6e, 0x49, 0xec, 0x8e, 0xa1, 0xf3, 0xce, 0x8a, 0x19, 0x67, 0xa4, 0x4a, 0x6e,
+	0x1b, 0x03, 0xed, 0x39, 0x95, 0xd4, 0xa6, 0xa6, 0xb6, 0x14, 0x73, 0x50, 0x49, 0xbd, 0x16, 0x48,
+	0x10, 0xe8, 0x30, 0xcd, 0x4f, 0xaf, 0x9c, 0x7a, 0xa1, 0xfc, 0x57, 0x90, 0xa4, 0xc7, 0x31, 0x66,
+	0xc5, 0x00, 0x7d, 0x0a, 0x29, 0x83, 0x48, 0xec, 0xaf, 0x8e, 0x84, 0xed, 0x89, 0x7b, 0x85, 0x22,
+	0xa3, 0x13, 0x7e, 0x08, 0xfc, 0xe0, 0x62, 0xda, 0xc3, 0xe6, 0xf8, 0xd7, 0xef, 0x8e, 0xb5, 0x06,
+	0xe1, 0xa7, 0x31, 0x58, 0x0d, 0x65, 0x30, 0x59, 0xed, 0x03, 0xed, 0xfb, 0x56, 0xf2, 0xfd, 0xe0,
+	0x8d, 0x1d, 0x60, 0x1e, 0xba, 0x22, 0xfe, 0xf7, 0x2f, 0x76, 0x98, 0x85, 0x89, 0x0f, 0x33, 0x70,
+	0x8e, 0x74, 0x67, 0x7e, 0x11, 0x03, 0xb4, 0x87, 0x4d, 0x27, 0x55, 0x66, 0x5b, 0x1a, 0xe1, 0x6f,
+	0xb8, 0x77, 0xf0, 0x37, 0x3f, 0xf6, 0xf8, 0x1b, 0xea, 0xb1, 0xee, 0xb9, 0x7a, 0x23, 0xbe, 0xa9,
+	0x87, 0xde, 0x96, 0x11, 0xe9, 0x29, 0x8d, 0xf9, 0xc7, 0x4b, 0x4f, 0x2f, 0xe8, 0x56, 0xfe, 0x83,
+	0x83, 0x45, 0x8f, 0xd0, 0x4c, 0x83, 0x1e, 0x00, 0x92, 0x4f, 0x65, 0xa5, 0x23, 0x5b, 0x82, 0xd9,
+	0xe9, 0x3f, 0x2b, 0x07, 0x5c, 0x75, 0x46, 0x6c, 0x32, 0x74, 0x08, 0x8b, 0x5d, 0xf9, 0x4c, 0xe9,
+	0xf6, 0xbb, 0x12, 0xdb, 0x67, 0x43, 0xf9, 0xda, 0xae, 0x1e, 0xae, 0x06, 0xaa, 0xe8, 0x65, 0xd5,
+	0x7c, 0xf2, 0x11, 0x29, 0xa3, 0x53, 0x9b, 0xbc, 0xca, 0x88, 0x99, 0x06, 0x29, 0x5f, 0x63, 0xc2,
+	0x51, 0x51, 0x03, 0x1c, 0xe3, 0x63, 0x73, 0xa4, 0xc4, 0x03, 0x8e, 0x82, 0xe0, 0x8e, 0x7c, 0xd9,
+	0x9a, 0xfd, 0x0d, 0xa5, 0x8e, 0x3b, 0x62, 0x0c, 0xe0, 0xb0, 0xbd, 0xd9, 0x0b, 0x6d, 0x2a, 0xdd,
+	0x0a, 0xda, 0x0e, 0xeb, 0xb0, 0x44, 0xf6, 0x97, 0xfe, 0x37, 0xee, 0x36, 0xe3, 0x00, 0x36, 0x7a,
+	0x0a, 0x71, 0xbd, 0xd7, 0x64, 0x36, 0xfc, 0xbd, 0x31, 0xf8, 0xe7, 0xc4, 0xc3, 0xe2, 0xfe, 0x94,
+	0x68, 0x51, 0xf1, 0x7f, 0x16, 0x87, 0xb8, 0x78, 0x58, 0x44, 0x9f, 0x7a, 0x9a, 0x2d, 0xf7, 0xc7,
+	0xe4, 0xe2, 0xee, 0xb5, 0xfc, 0x53, 0x2c, 0xac, 0xd9, 0x92, 0x85, 0xa5, 0xa2, 0x58, 0xca, 0x37,
+	0x4a, 0xd2, 0x6e, 0xa9, 0x52, 0x6a, 0x94, 0x24, 0xda, 0x0c, 0xca, 0x70, 0x68, 0x0d, 0xb2, 0x87,
+	0x47, 0x85, 0x4a, 0xb9, 0xbe, 0x2f, 0x1d, 0x55, 0xed, 0x5f, 0x6c, 0x34, 0x86, 0x32, 0x30, 0x57,
+	0x29, 0xd7, 0x1b, 0x0c, 0x50, 0xcf, 0xc4, 0x2d, 0xc8, 0x5e, 0xa9, 0x21, 0x15, 0xf3, 0x87, 0xf9,
+	0x62, 0xb9, 0xf1, 0x3a, 0x93, 0x40, 0x3c, 0xac, 0x78, 0x79, 0xd7, 0xab, 0xf9, 0xc3, 0xfa, 0x7e,
+	0xad, 0x91, 0x49, 0x22, 0x04, 0x0b, 0x84, 0xde, 0x06, 0xd5, 0x33, 0x29, 0x8b, 0x43, 0xb1, 0x52,
+	0xab, 0x3a, 0x32, 0x4c, 0xa3, 0x25, 0xc8, 0xd8, 0x33, 0x8b, 0xa5, 0xfc, 0x2e, 0xa9, 0xea, 0xcd,
+	0xa0, 0xab, 0x30, 0x5f, 0xfa, 0xc9, 0x61, 0xbe, 0xba, 0x6b, 0x23, 0xce, 0xa2, 0x4d, 0x58, 0x73,
+	0x8b, 0x23, 0x31, 0xaa, 0xd2, 0x2e, 0xa9, 0xcc, 0xd5, 0x33, 0x80, 0xae, 0x43, 0x86, 0xf5, 0xb9,
+	0x8a, 0xb5, 0xea, 0x6e, 0xb9, 0x51, 0xae, 0x55, 0x33, 0x69, 0x5a, 0xc6, 0x5b, 0x04, 0xb0, 0x24,
+	0x67, 0xcc, 0xe6, 0x46, 0xd7, 0xf6, 0xe6, 0x69, 0x6d, 0xcf, 0xae, 0x5d, 0xff, 0x26, 0x06, 0xcb,
+	0xb4, 0x78, 0x6d, 0x97, 0xca, 0x6d, 0x87, 0xb5, 0x05, 0x19, 0x5a, 0xf4, 0x92, 0xfc, 0x57, 0xc1,
+	0x02, 0x85, 0xbf, 0xb4, 0x93, 0x0f, 0xbb, 0xd1, 0x14, 0x73, 0x35, 0x9a, 0xca, 0xfe, 0x54, 0xec,
+	0x9e, 0xb7, 0x25, 0xe3, 0x9b, 0x6d, 0x58, 0x76, 0x7f, 0x10, 0x92, 0x2b, 0x3c, 0x18, 0xce, 0x6d,
+	0x58, 0x1c, 0x75, 0x91, 0x54, 0xfe, 0x82, 0xae, 0xee, 0x39, 0xac, 0xf8, 0xe5, 0x65, 0x06, 0x7d,
+	0x3f, 0xd0, 0x38, 0x71, 0x7c, 0xaf, 0x83, 0xeb, 0x60, 0x08, 0xff, 0xc2, 0xc1, 0x8c, 0x0d, 0xb6,
+	0x62, 0x1c, 0xcb, 0x2f, 0x79, 0xca, 0xa5, 0xb3, 0x16, 0xc4, 0xa9, 0xbe, 0xba, 0x5b, 0x1e, 0x31,
+	0x7f, 0xcb, 0x23, 0xf4, 0x9c, 0xe3, 0xa1, 0xe7, 0xfc, 0x23, 0x98, 0x6f, 0x5a, 0xe2, 0x2b, 0x9a,
+	0x2a, 0x99, 0x4a, 0xd7, 0xae, 0x86, 0x06, 0x5b, 0x94, 0x0d, 0xfb, 0x5d, 0x81, 0x38, 0x67, 0x13,
+	0x58, 0x20, 0xb4, 0x09, 0x73, 0xa4, 0x65, 0x29, 0x99, 0x9a, 0xd4, 0x37, 0x70, 0x36, 0x49, 0x6a,
+	0x43, 0x40, 0x60, 0x0d, 0xed, 0xc8, 0xc0, 0xc2, 0xdf, 0x71, 0xb0, 0x4c, 0x4b, 0x5e, 0x7e, 0x75,
+	0x1c, 0xd5, 0xba, 0x71, 0x6b, 0x9c, 0xef, 0x4a, 0x0c, 0x65, 0xf8, 0xbe, 0x32, 0xfe, 0x2c, 0xac,
+	0xf8, 0xe7, 0x63, 0x69, 0xfe, 0x2f, 0x63, 0xb0, 0x64, 0xc5, 0x67, 0xf6, 0xc0, 0x65, 0x87, 0xd0,
+	0x13, 0x9c, 0xa4, 0x6f, 0x33, 0x13, 0x81, 0xcd, 0xdc, 0xf7, 0x27, 0xd1, 0x77, 0xdd, 0x11, 0xa6,
+	0x7f, 0x05, 0xef, 0x6b, 0x2f, 0xff, 0x92, 0x83, 0x65, 0xdf, 0x7c, 0xcc, 0x5e, 0x3e, 0xf1, 0x67,
+	0x05, 0xb7, 0x22, 0xe4, 0x7b, 0xa7, 0xbc, 0xe0, 0xb1, 0x1d, 0x8f, 0x4f, 0x66, 0x96, 0xff, 0x1c,
+	0x83, 0xf5, 0xc1, 0xa5, 0x46, 0x1e, 0x0d, 0xb4, 0x26, 0x28, 0x6b, 0x5d, 0xac, 0x37, 0xff, 0x99,
+	0xdf, 0xe1, 0x6e, 0x07, 0xef, 0xd9, 0x10, 0x91, 0x86, 0x39, 0xde, 0xd0, 0x6a, 0x70, 0x62, 0xd2,
+	0x6a, 0xf0, 0x85, 0x34, 0xe0, 0xf7, 0xdc, 0x85, 0x6e, 0xaf, 0xf8, 0x4c, 0x13, 0xc6, 0xec, 0x18,
+	0x3d, 0x81, 0x6b, 0x24, 0x05, 0x70, 0xde, 0xbc, 0xd8, 0x9d, 0x78, 0xea, 0x12, 0x67, 0xc4, 0x65,
+	0x6b, 0xd8, 0x79, 0xe8, 0xc1, 0xba, 0x24, 0x2d, 0xe1, 0xdb, 0x04, 0xac, 0x58, 0x29, 0x42, 0xdd,
+	0x94, 0xdb, 0x93, 0xf4, 0x0f, 0x7e, 0x3b, 0x58, 0x8e, 0x8d, 0x79, 0x8f, 0x25, 0x9c, 0xeb, 0x38,
+	0x55, 0x58, 0x94, 0x83, 0x45, 0xc3, 0x94, 0xdb, 0xc4, 0x1d, 0xc8, 0x7a, 0x1b, 0x9b, 0x52, 0x4f,
+	0x36, 0xdf, 0x32, 0x5b, 0xbf, 0xca, 0x86, 0x1a, 0x64, 0xe4, 0x50, 0x36, 0xdf, 0x5e, 0xd2, 0x41,
+	0xa2, 0x1f, 0xfb, 0x9d, 0xc2, 0x87, 0x23, 0xd6, 0x32, 0x44, 0xb7, 0x7e, 0x12, 0x51, 0xb2, 0x7f,
+	0x34, 0x82, 0xe5, 0xe8, 0x52, 0xfd, 0xc5, 0x4b, 0xd4, 0xdf, 0x71, 0xb5, 0xff, 0x3a, 0x5c, 0x0b,
+	0x2c, 0x9e, 0x5d, 0x21, 0x6d, 0xc8, 0x5a, 0x43, 0x47, 0xaa, 0x31, 0xa1, 0x3a, 0x46, 0x68, 0x4c,
+	0x2c, 0x42, 0x63, 0x84, 0x55, 0xb8, 0x1e, 0x32, 0x11, 0x93, 0xe2, 0x6f, 0x92, 0x54, 0x8c, 0xc9,
+	0x1b, 0x4f, 0x9f, 0x47, 0x59, 0xc5, 0x47, 0xee, 0x63, 0x0f, 0xed, 0xd1, 0xbc, 0x0f, 0xbb, 0xd8,
+	0x80, 0xb4, 0x1b, 0x8f, 0x5d, 0x83, 0xe6, 0x08, 0xc3, 0x49, 0x5e, 0xa8, 0x1f, 0x96, 0xf2, 0xf5,
+	0xc3, 0x2a, 0x03, 0xa3, 0x9a, 0xf6, 0x86, 0xb6, 0x91, 0x5b, 0x31, 0xc4, 0xac, 0xde, 0x04, 0xcc,
+	0x6a, 0xc6, 0xdb, 0x64, 0x8b, 0x64, 0xfa, 0xff, 0xc0, 0xb0, 0x98, 0x52, 0x87, 0x76, 0xbf, 0x84,
+	0x37, 0xc0, 0x53, 0x8d, 0x9f, 0xbc, 0x1f, 0xe5, 0x53, 0xa3, 0x98, 0x5f, 0x8d, 0x84, 0x75, 0x58,
+	0x0d, 0xe5, 0xcd, 0xa6, 0xfe, 0x63, 0x8e, 0x0a, 0xe6, 0x14, 0xba, 0xea, 0xa6, 0x6c, 0x1a, 0xe3,
+	0x4e, 0xcd, 0x06, 0xdd, 0x53, 0x53, 0x10, 0xd1, 0xe0, 0x09, 0x4d, 0x42, 0xf8, 0x53, 0x8e, 0xee,
+	0x83, 0x5f, 0x16, 0x76, 0xdb, 0xde, 0x85, 0x64, 0x9f, 0xd4, 0xf2, 0x69, 0xd4, 0xb5, 0xe8, 0x35,
+	0x82, 0x23, 0x6b, 0x48, 0xa4, 0x18, 0x97, 0x56, 0x1d, 0x15, 0x7e, 0xc9, 0x41, 0xda, 0xc5, 0x1f,
+	0xad, 0xc1, 0xac, 0x53, 0xfe, 0xb1, 0xf3, 0x1d, 0x07, 0x60, 0x1d, 0xbf, 0xa9, 0x99, 0x72, 0x87,
+	0xbd, 0x33, 0xa1, 0x1f, 0x56, 0x8a, 0xda, 0x37, 0x30, 0x0d, 0x87, 0xe3, 0x22, 0xf9, 0x8d, 0xee,
+	0x43, 0xa2, 0xaf, 0x2a, 0x26, 0x31, 0xfb, 0x05, 0xbf, 0x3d, 0x93, 0xa9, 0x72, 0x47, 0xaa, 0x62,
+	0x8a, 0x04, 0x4b, 0xb8, 0x07, 0x09, 0xeb, 0xcb, 0x5b, 0x81, 0x98, 0x85, 0x64, 0xe1, 0x75, 0xa3,
+	0x54, 0xcf, 0x70, 0x08, 0x20, 0x55, 0xa6, 0xf9, 0x7a, 0x4c, 0xa8, 0xd8, 0x0f, 0x4e, 0x9d, 0x45,
+	0x58, 0x2e, 0x40, 0x3e, 0x56, 0x35, 0xbd, 0x2b, 0x77, 0x88, 0xcc, 0x33, 0xa2, 0xf3, 0x1d, 0xdd,
+	0x22, 0xa1, 0x05, 0xc5, 0x35, 0xe7, 0x44, 0xc2, 0xea, 0x45, 0x5f, 0x50, 0xdd, 0x8a, 0xaa, 0x14,
+	0xe5, 0x43, 0x2b, 0x45, 0xeb, 0x9e, 0x5b, 0x76, 0x44, 0x8d, 0xe8, 0x57, 0x31, 0x58, 0x0e, 0xc5,
+	0x43, 0x8f, 0xdd, 0xd5, 0xa1, 0x9b, 0x43, 0x79, 0xba, 0xeb, 0x42, 0xff, 0xcd, 0xd1, 0xba, 0xd0,
+	0x8e, 0xa7, 0x2e, 0x74, 0x67, 0x24, 0xbd, 0xbb, 0x22, 0xf4, 0x57, 0x5c, 0x44, 0x45, 0xa8, 0xde,
+	0xc8, 0xef, 0x95, 0xa4, 0xa3, 0x2a, 0xfd, 0xeb, 0x54, 0x84, 0x96, 0x20, 0x33, 0xa8, 0x93, 0x48,
+	0xf5, 0x46, 0xbe, 0x51, 0xcf, 0xc4, 0x82, 0xd5, 0x98, 0x78, 0x68, 0xad, 0x25, 0x31, 0xba, 0xac,
+	0x92, 0xa4, 0x28, 0xab, 0x80, 0x18, 0xf5, 0x41, 0xed, 0xa8, 0xda, 0x90, 0xf6, 0xc4, 0xda, 0xd1,
+	0x21, 0x7b, 0x72, 0xe5, 0xd4, 0x5c, 0x96, 0x00, 0xb1, 0x23, 0x73, 0x3f, 0xa2, 0xff, 0x73, 0x0e,
+	0x16, 0x3d, 0x60, 0x76, 0x82, 0xae, 0x6e, 0x37, 0xe7, 0xe9, 0x76, 0x3f, 0x84, 0x25, 0x2b, 0x6d,
+	0xa4, 0xe6, 0x62, 0x48, 0x3d, 0xac, 0x93, 0x2a, 0x37, 0x53, 0xfc, 0xab, 0x5d, 0xf9, 0x8c, 0x75,
+	0x02, 0x0e, 0xb1, 0x6e, 0x31, 0xbe, 0x84, 0x5a, 0xaf, 0xf0, 0x4d, 0x9c, 0x06, 0x27, 0x13, 0x27,
+	0x37, 0x23, 0x1d, 0x55, 0x30, 0xfb, 0x89, 0x4f, 0x90, 0xfd, 0x44, 0xb8, 0xb9, 0xc4, 0x44, 0x11,
+	0xf1, 0xe4, 0x17, 0x7b, 0x75, 0x70, 0x79, 0xd3, 0xf0, 0xf5, 0xbe, 0x5b, 0x89, 0x47, 0xa6, 0x5b,
+	0xa9, 0x6f, 0x0a, 0xdc, 0xcf, 0x2e, 0x2b, 0x59, 0xce, 0xd3, 0xa0, 0xec, 0x02, 0x49, 0xd2, 0xf6,
+	0xff, 0x70, 0x30, 0x53, 0x6e, 0x61, 0xd5, 0xa4, 0x6b, 0x9b, 0xf7, 0xfc, 0x9f, 0x05, 0x5a, 0x8b,
+	0xf8, 0xf7, 0x0b, 0xb2, 0x30, 0x7e, 0x7d, 0xe8, 0x3f, 0x67, 0x08, 0x53, 0xe8, 0xc4, 0xf5, 0x3f,
+	0x22, 0x9e, 0x76, 0xc6, 0x07, 0x01, 0xca, 0x10, 0x3f, 0xc7, 0xdf, 0x1e, 0x81, 0xe5, 0xcc, 0xf3,
+	0x04, 0x92, 0xe4, 0x45, 0x3d, 0x5a, 0x72, 0x5e, 0xf5, 0xbb, 0x1e, 0xdc, 0xf3, 0xcb, 0x3e, 0xa8,
+	0x4d, 0xb7, 0xfd, 0x8f, 0xb3, 0x00, 0x83, 0x5c, 0x13, 0xbd, 0x80, 0x39, 0xf7, 0xa3, 0x5e, 0xb4,
+	0x3a, 0xe4, 0x49, 0x39, 0xbf, 0x16, 0x3e, 0xe8, 0xc8, 0xf4, 0x02, 0xe6, 0xdc, 0x0f, 0xb9, 0x06,
+	0xcc, 0x42, 0x1e, 0x93, 0x0d, 0x98, 0x85, 0xbe, 0xfd, 0x9a, 0x42, 0x1d, 0xb8, 0x16, 0xf1, 0x94,
+	0x07, 0xdd, 0x19, 0xef, 0xc1, 0x13, 0xff, 0xbd, 0x31, 0xdf, 0x04, 0x09, 0x53, 0x48, 0x87, 0xeb,
+	0x91, 0x2f, 0x58, 0xd0, 0xd6, 0xb8, 0xef, 0x6b, 0xf8, 0xbb, 0x63, 0x60, 0x3a, 0x73, 0xf6, 0x81,
+	0x8f, 0x6e, 0x9b, 0xa3, 0xbb, 0x63, 0xbf, 0xe7, 0xe0, 0xef, 0x8d, 0xdf, 0x85, 0x17, 0xa6, 0xd0,
+	0x3e, 0xa4, 0x5d, 0xfd, 0x53, 0xc4, 0x87, 0x36, 0x55, 0x29, 0xe3, 0xd5, 0x21, 0x0d, 0x57, 0xca,
+	0xc9, 0xd5, 0xd2, 0x1a, 0x70, 0x0a, 0x36, 0xe7, 0x06, 0x9c, 0x42, 0x7a, 0x60, 0xfe, 0xed, 0xf7,
+	0x5d, 0xf2, 0x61, 0xdb, 0x1f, 0x1e, 0x25, 0x84, 0x6d, 0x7f, 0x44, 0xc4, 0x20, 0x4c, 0xa1, 0xcf,
+	0x60, 0xc1, 0x5b, 0xa6, 0x46, 0xeb, 0x43, 0xcb, 0xed, 0xfc, 0x8d, 0xa8, 0x61, 0x37, 0x4b, 0x6f,
+	0x55, 0x74, 0xc0, 0x32, 0xb4, 0x3a, 0x3b, 0x60, 0x19, 0x51, 0x4c, 0x9d, 0xb2, 0xfc, 0x93, 0xa7,
+	0xd6, 0x37, 0xf0, 0x4f, 0x61, 0x25, 0xca, 0x81, 0x7f, 0x0a, 0x2d, 0x10, 0x0a, 0x53, 0x48, 0x81,
+	0x95, 0xf0, 0x52, 0x13, 0xba, 0x3d, 0x56, 0x25, 0x8d, 0xbf, 0x33, 0x0a, 0xcd, 0x99, 0xaa, 0x09,
+	0x8b, 0x21, 0xed, 0x6d, 0x24, 0x0c, 0xed, 0x7d, 0xd3, 0x49, 0x6e, 0x8d, 0xd1, 0x1f, 0x17, 0xac,
+	0x68, 0x63, 0xfb, 0xdf, 0x92, 0x90, 0x20, 0xd7, 0x7e, 0x03, 0xae, 0xf8, 0xea, 0x09, 0xe8, 0xc6,
+	0xf0, 0x2a, 0x0b, 0xbf, 0x11, 0x39, 0xee, 0xac, 0xe1, 0x0d, 0x5c, 0x0d, 0x54, 0x08, 0xd0, 0xa6,
+	0x9b, 0x2e, 0xac, 0x4a, 0xc1, 0xdf, 0x1c, 0x82, 0xe1, 0xe7, 0xed, 0xf5, 0x6d, 0x9b, 0xa3, 0x52,
+	0x58, 0x2f, 0xef, 0x28, 0x7f, 0xf6, 0x05, 0x8d, 0xb2, 0xfc, 0x9e, 0x4c, 0xf0, 0xca, 0x15, 0xea,
+	0xc3, 0x6e, 0x0d, 0xc5, 0x71, 0x66, 0xf8, 0xdc, 0x09, 0xef, 0x5c, 0x19, 0x14, 0xf2, 0x08, 0x17,
+	0x9a, 0xe9, 0xf1, 0xc2, 0x30, 0x14, 0x87, 0xfd, 0x2b, 0xc8, 0xf8, 0xef, 0x79, 0xb4, 0x31, 0x22,
+	0xec, 0xe0, 0x37, 0xa3, 0x11, 0xfc, 0x3b, 0xe3, 0x77, 0x32, 0x7e, 0xa9, 0xc2, 0xdc, 0xcb, 0xad,
+	0xa1, 0x38, 0x6e, 0xb7, 0xe8, 0x8a, 0x70, 0x07, 0x6e, 0x31, 0x18, 0x0d, 0x0f, 0xdc, 0x62, 0x48,
+	0x48, 0x2c, 0x4c, 0xed, 0x3c, 0x03, 0x90, 0x3b, 0xbd, 0xb7, 0xb2, 0x84, 0xd5, 0x7e, 0x17, 0xad,
+	0x05, 0x3a, 0x50, 0x25, 0xb5, 0xdf, 0xad, 0xf5, 0xac, 0xcc, 0xcb, 0xc8, 0xfe, 0x7c, 0x86, 0xe4,
+	0x5b, 0xb3, 0x84, 0xc0, 0x1a, 0xd8, 0xa9, 0x40, 0x66, 0x40, 0x2d, 0x91, 0x10, 0x0a, 0xdd, 0x0c,
+	0xe5, 0x41, 0xfa, 0xf9, 0x3e, 0x46, 0x0b, 0x0e, 0x23, 0x32, 0xba, 0xf3, 0x09, 0x40, 0xd3, 0x50,
+	0x24, 0x1a, 0xc3, 0xa1, 0xf5, 0x00, 0x9f, 0xe7, 0x0a, 0xee, 0xb4, 0x6c, 0x1e, 0x7f, 0xc1, 0x84,
+	0x69, 0x1a, 0x0a, 0x8d, 0xf4, 0x76, 0x7e, 0x04, 0x69, 0x2a, 0xcc, 0x89, 0x85, 0x37, 0x8a, 0x9e,
+	0xc9, 0x40, 0x57, 0x4f, 0x46, 0x76, 0x4a, 0x30, 0x4f, 0x19, 0xb0, 0xac, 0x11, 0x6d, 0x04, 0x58,
+	0x1c, 0xd0, 0x11, 0x1f, 0x93, 0x39, 0x42, 0xc6, 0xc6, 0x76, 0x0a, 0x30, 0x67, 0xb3, 0x31, 0xdf,
+	0x6a, 0x2d, 0x74, 0x23, 0x84, 0x8b, 0x35, 0xe0, 0x63, 0x92, 0x66, 0x4c, 0xac, 0xa1, 0x81, 0x28,
+	0xf6, 0x3f, 0x9b, 0x06, 0x45, 0x61, 0x99, 0x5d, 0xa8, 0x28, 0x6c, 0xac, 0x90, 0x7c, 0x13, 0x6f,
+	0x1a, 0xca, 0x71, 0x8a, 0x10, 0xfd, 0xe0, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x5e, 0xa7, 0xda,
+	0x94, 0x19, 0x3d, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// IdentityClient is the client API for Identity service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type IdentityClient interface {
+	GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error)
+	GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error)
+	Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error)
+}
+
+type identityClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewIdentityClient(cc *grpc.ClientConn) IdentityClient {
+	return &identityClient{cc}
+}
+
+func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) {
+	out := new(GetPluginInfoResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Identity/GetPluginInfo", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *identityClient) GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) {
+	out := new(GetPluginCapabilitiesResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Identity/GetPluginCapabilities", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *identityClient) Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) {
+	out := new(ProbeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Identity/Probe", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// IdentityServer is the server API for Identity service.
+type IdentityServer interface {
+	GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error)
+	GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error)
+	Probe(context.Context, *ProbeRequest) (*ProbeResponse, error)
+}
+
+// UnimplementedIdentityServer can be embedded to have forward compatible implementations.
+type UnimplementedIdentityServer struct {
+}
+
+func (*UnimplementedIdentityServer) GetPluginInfo(ctx context.Context, req *GetPluginInfoRequest) (*GetPluginInfoResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetPluginInfo not implemented")
+}
+func (*UnimplementedIdentityServer) GetPluginCapabilities(ctx context.Context, req *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetPluginCapabilities not implemented")
+}
+func (*UnimplementedIdentityServer) Probe(ctx context.Context, req *ProbeRequest) (*ProbeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Probe not implemented")
+}
+
+func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) {
+	s.RegisterService(&_Identity_serviceDesc, srv)
+}
+
+func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetPluginInfoRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(IdentityServer).GetPluginInfo(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Identity/GetPluginInfo",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Identity_GetPluginCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetPluginCapabilitiesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(IdentityServer).GetPluginCapabilities(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Identity/GetPluginCapabilities",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(IdentityServer).GetPluginCapabilities(ctx, req.(*GetPluginCapabilitiesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Identity_Probe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ProbeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(IdentityServer).Probe(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Identity/Probe",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(IdentityServer).Probe(ctx, req.(*ProbeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Identity_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "csi.v1.Identity",
+	HandlerType: (*IdentityServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GetPluginInfo",
+			Handler:    _Identity_GetPluginInfo_Handler,
+		},
+		{
+			MethodName: "GetPluginCapabilities",
+			Handler:    _Identity_GetPluginCapabilities_Handler,
+		},
+		{
+			MethodName: "Probe",
+			Handler:    _Identity_Probe_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/container-storage-interface/spec/csi.proto",
+}
+
+// ControllerClient is the client API for Controller service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ControllerClient interface {
+	CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error)
+	DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error)
+	ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error)
+	ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error)
+	ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error)
+	ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error)
+	GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error)
+	ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error)
+	CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error)
+	DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error)
+	ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error)
+	ControllerExpandVolume(ctx context.Context, in *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error)
+	ControllerGetVolume(ctx context.Context, in *ControllerGetVolumeRequest, opts ...grpc.CallOption) (*ControllerGetVolumeResponse, error)
+}
+
+type controllerClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewControllerClient(cc *grpc.ClientConn) ControllerClient {
+	return &controllerClient{cc}
+}
+
+func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) {
+	out := new(CreateVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/CreateVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) {
+	out := new(DeleteVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/DeleteVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) {
+	out := new(ControllerPublishVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerPublishVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) {
+	out := new(ControllerUnpublishVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerUnpublishVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) {
+	out := new(ValidateVolumeCapabilitiesResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ValidateVolumeCapabilities", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) {
+	out := new(ListVolumesResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ListVolumes", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) {
+	out := new(GetCapacityResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/GetCapacity", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) {
+	out := new(ControllerGetCapabilitiesResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerGetCapabilities", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) {
+	out := new(CreateSnapshotResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/CreateSnapshot", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) {
+	out := new(DeleteSnapshotResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/DeleteSnapshot", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) {
+	out := new(ListSnapshotsResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ListSnapshots", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ControllerExpandVolume(ctx context.Context, in *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error) {
+	out := new(ControllerExpandVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerExpandVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *controllerClient) ControllerGetVolume(ctx context.Context, in *ControllerGetVolumeRequest, opts ...grpc.CallOption) (*ControllerGetVolumeResponse, error) {
+	out := new(ControllerGetVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerGetVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// ControllerServer is the server API for Controller service.
+type ControllerServer interface {
+	CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error)
+	DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error)
+	ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error)
+	ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error)
+	ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error)
+	ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error)
+	GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error)
+	ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error)
+	CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error)
+	DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error)
+	ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error)
+	ControllerExpandVolume(context.Context, *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error)
+	ControllerGetVolume(context.Context, *ControllerGetVolumeRequest) (*ControllerGetVolumeResponse, error)
+}
+
+// UnimplementedControllerServer can be embedded to have forward compatible implementations.
+type UnimplementedControllerServer struct {
+}
+
+func (*UnimplementedControllerServer) CreateVolume(ctx context.Context, req *CreateVolumeRequest) (*CreateVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method CreateVolume not implemented")
+}
+func (*UnimplementedControllerServer) DeleteVolume(ctx context.Context, req *DeleteVolumeRequest) (*DeleteVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteVolume not implemented")
+}
+func (*UnimplementedControllerServer) ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ControllerPublishVolume not implemented")
+}
+func (*UnimplementedControllerServer) ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ControllerUnpublishVolume not implemented")
+}
+func (*UnimplementedControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ValidateVolumeCapabilities not implemented")
+}
+func (*UnimplementedControllerServer) ListVolumes(ctx context.Context, req *ListVolumesRequest) (*ListVolumesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListVolumes not implemented")
+}
+func (*UnimplementedControllerServer) GetCapacity(ctx context.Context, req *GetCapacityRequest) (*GetCapacityResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetCapacity not implemented")
+}
+func (*UnimplementedControllerServer) ControllerGetCapabilities(ctx context.Context, req *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ControllerGetCapabilities not implemented")
+}
+func (*UnimplementedControllerServer) CreateSnapshot(ctx context.Context, req *CreateSnapshotRequest) (*CreateSnapshotResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method CreateSnapshot not implemented")
+}
+func (*UnimplementedControllerServer) DeleteSnapshot(ctx context.Context, req *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented")
+}
+func (*UnimplementedControllerServer) ListSnapshots(ctx context.Context, req *ListSnapshotsRequest) (*ListSnapshotsResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented")
+}
+func (*UnimplementedControllerServer) ControllerExpandVolume(ctx context.Context, req *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ControllerExpandVolume not implemented")
+}
+func (*UnimplementedControllerServer) ControllerGetVolume(ctx context.Context, req *ControllerGetVolumeRequest) (*ControllerGetVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ControllerGetVolume not implemented")
+}
+
+func RegisterControllerServer(s *grpc.Server, srv ControllerServer) {
+	s.RegisterService(&_Controller_serviceDesc, srv)
+}
+
+func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CreateVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).CreateVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/CreateVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).DeleteVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/DeleteVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ControllerPublishVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ControllerPublishVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ControllerPublishVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ControllerUnpublishVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ControllerUnpublishVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ValidateVolumeCapabilitiesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ValidateVolumeCapabilities",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListVolumesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ListVolumes(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ListVolumes",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetCapacityRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).GetCapacity(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/GetCapacity",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ControllerGetCapabilitiesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ControllerGetCapabilities(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ControllerGetCapabilities",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CreateSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).CreateSnapshot(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/CreateSnapshot",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).DeleteSnapshot(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/DeleteSnapshot",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListSnapshotsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ListSnapshots(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ListSnapshots",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ControllerExpandVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ControllerExpandVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ControllerExpandVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ControllerExpandVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ControllerExpandVolume(ctx, req.(*ControllerExpandVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Controller_ControllerGetVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ControllerGetVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ControllerServer).ControllerGetVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Controller/ControllerGetVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ControllerServer).ControllerGetVolume(ctx, req.(*ControllerGetVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Controller_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "csi.v1.Controller",
+	HandlerType: (*ControllerServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "CreateVolume",
+			Handler:    _Controller_CreateVolume_Handler,
+		},
+		{
+			MethodName: "DeleteVolume",
+			Handler:    _Controller_DeleteVolume_Handler,
+		},
+		{
+			MethodName: "ControllerPublishVolume",
+			Handler:    _Controller_ControllerPublishVolume_Handler,
+		},
+		{
+			MethodName: "ControllerUnpublishVolume",
+			Handler:    _Controller_ControllerUnpublishVolume_Handler,
+		},
+		{
+			MethodName: "ValidateVolumeCapabilities",
+			Handler:    _Controller_ValidateVolumeCapabilities_Handler,
+		},
+		{
+			MethodName: "ListVolumes",
+			Handler:    _Controller_ListVolumes_Handler,
+		},
+		{
+			MethodName: "GetCapacity",
+			Handler:    _Controller_GetCapacity_Handler,
+		},
+		{
+			MethodName: "ControllerGetCapabilities",
+			Handler:    _Controller_ControllerGetCapabilities_Handler,
+		},
+		{
+			MethodName: "CreateSnapshot",
+			Handler:    _Controller_CreateSnapshot_Handler,
+		},
+		{
+			MethodName: "DeleteSnapshot",
+			Handler:    _Controller_DeleteSnapshot_Handler,
+		},
+		{
+			MethodName: "ListSnapshots",
+			Handler:    _Controller_ListSnapshots_Handler,
+		},
+		{
+			MethodName: "ControllerExpandVolume",
+			Handler:    _Controller_ControllerExpandVolume_Handler,
+		},
+		{
+			MethodName: "ControllerGetVolume",
+			Handler:    _Controller_ControllerGetVolume_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/container-storage-interface/spec/csi.proto",
+}
+
+// NodeClient is the client API for Node service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type NodeClient interface {
+	NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error)
+	NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error)
+	NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error)
+	NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error)
+	NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error)
+	NodeExpandVolume(ctx context.Context, in *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error)
+	NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error)
+	NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error)
+}
+
+type nodeClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewNodeClient(cc *grpc.ClientConn) NodeClient {
+	return &nodeClient{cc}
+}
+
+func (c *nodeClient) NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) {
+	out := new(NodeStageVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeStageVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) {
+	out := new(NodeUnstageVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeUnstageVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) {
+	out := new(NodePublishVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodePublishVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) {
+	out := new(NodeUnpublishVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeUnpublishVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error) {
+	out := new(NodeGetVolumeStatsResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetVolumeStats", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodeExpandVolume(ctx context.Context, in *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error) {
+	out := new(NodeExpandVolumeResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeExpandVolume", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) {
+	out := new(NodeGetCapabilitiesResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetCapabilities", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *nodeClient) NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) {
+	out := new(NodeGetInfoResponse)
+	err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetInfo", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// NodeServer is the server API for Node service.
+type NodeServer interface {
+	NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error)
+	NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error)
+	NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error)
+	NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error)
+	NodeGetVolumeStats(context.Context, *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error)
+	NodeExpandVolume(context.Context, *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error)
+	NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error)
+	NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error)
+}
+
+// UnimplementedNodeServer can be embedded to have forward compatible implementations.
+type UnimplementedNodeServer struct {
+}
+
+func (*UnimplementedNodeServer) NodeStageVolume(ctx context.Context, req *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeStageVolume not implemented")
+}
+func (*UnimplementedNodeServer) NodeUnstageVolume(ctx context.Context, req *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeUnstageVolume not implemented")
+}
+func (*UnimplementedNodeServer) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodePublishVolume not implemented")
+}
+func (*UnimplementedNodeServer) NodeUnpublishVolume(ctx context.Context, req *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeUnpublishVolume not implemented")
+}
+func (*UnimplementedNodeServer) NodeGetVolumeStats(ctx context.Context, req *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeGetVolumeStats not implemented")
+}
+func (*UnimplementedNodeServer) NodeExpandVolume(ctx context.Context, req *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeExpandVolume not implemented")
+}
+func (*UnimplementedNodeServer) NodeGetCapabilities(ctx context.Context, req *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeGetCapabilities not implemented")
+}
+func (*UnimplementedNodeServer) NodeGetInfo(ctx context.Context, req *NodeGetInfoRequest) (*NodeGetInfoResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method NodeGetInfo not implemented")
+}
+
+func RegisterNodeServer(s *grpc.Server, srv NodeServer) {
+	s.RegisterService(&_Node_serviceDesc, srv)
+}
+
+func _Node_NodeStageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeStageVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeStageVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeStageVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeStageVolume(ctx, req.(*NodeStageVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodeUnstageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeUnstageVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeUnstageVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeUnstageVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeUnstageVolume(ctx, req.(*NodeUnstageVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodePublishVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodePublishVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodePublishVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeUnpublishVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeUnpublishVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeUnpublishVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodeGetVolumeStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeGetVolumeStatsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeGetVolumeStats(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeGetVolumeStats",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeGetVolumeStats(ctx, req.(*NodeGetVolumeStatsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodeExpandVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeExpandVolumeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeExpandVolume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeExpandVolume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeExpandVolume(ctx, req.(*NodeExpandVolumeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeGetCapabilitiesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeGetCapabilities(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeGetCapabilities",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Node_NodeGetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(NodeGetInfoRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NodeServer).NodeGetInfo(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/csi.v1.Node/NodeGetInfo",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NodeServer).NodeGetInfo(ctx, req.(*NodeGetInfoRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Node_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "csi.v1.Node",
+	HandlerType: (*NodeServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "NodeStageVolume",
+			Handler:    _Node_NodeStageVolume_Handler,
+		},
+		{
+			MethodName: "NodeUnstageVolume",
+			Handler:    _Node_NodeUnstageVolume_Handler,
+		},
+		{
+			MethodName: "NodePublishVolume",
+			Handler:    _Node_NodePublishVolume_Handler,
+		},
+		{
+			MethodName: "NodeUnpublishVolume",
+			Handler:    _Node_NodeUnpublishVolume_Handler,
+		},
+		{
+			MethodName: "NodeGetVolumeStats",
+			Handler:    _Node_NodeGetVolumeStats_Handler,
+		},
+		{
+			MethodName: "NodeExpandVolume",
+			Handler:    _Node_NodeExpandVolume_Handler,
+		},
+		{
+			MethodName: "NodeGetCapabilities",
+			Handler:    _Node_NodeGetCapabilities_Handler,
+		},
+		{
+			MethodName: "NodeGetInfo",
+			Handler:    _Node_NodeGetInfo_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/container-storage-interface/spec/csi.proto",
+}

+ 0 - 5
vendor/github.com/coreos/etcd/NOTICE

@@ -1,5 +0,0 @@
-CoreOS Project
-Copyright 2014 CoreOS, Inc
-
-This product includes software developed at CoreOS, Inc.
-(http://www.coreos.com/).

+ 0 - 284
vendor/github.com/coreos/etcd/raft/progress.go

@@ -1,284 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import "fmt"
-
-const (
-	ProgressStateProbe ProgressStateType = iota
-	ProgressStateReplicate
-	ProgressStateSnapshot
-)
-
-type ProgressStateType uint64
-
-var prstmap = [...]string{
-	"ProgressStateProbe",
-	"ProgressStateReplicate",
-	"ProgressStateSnapshot",
-}
-
-func (st ProgressStateType) String() string { return prstmap[uint64(st)] }
-
-// Progress represents a follower’s progress in the view of the leader. Leader maintains
-// progresses of all followers, and sends entries to the follower based on its progress.
-type Progress struct {
-	Match, Next uint64
-	// State defines how the leader should interact with the follower.
-	//
-	// When in ProgressStateProbe, leader sends at most one replication message
-	// per heartbeat interval. It also probes actual progress of the follower.
-	//
-	// When in ProgressStateReplicate, leader optimistically increases next
-	// to the latest entry sent after sending replication message. This is
-	// an optimized state for fast replicating log entries to the follower.
-	//
-	// When in ProgressStateSnapshot, leader should have sent out snapshot
-	// before and stops sending any replication message.
-	State ProgressStateType
-
-	// Paused is used in ProgressStateProbe.
-	// When Paused is true, raft should pause sending replication message to this peer.
-	Paused bool
-	// PendingSnapshot is used in ProgressStateSnapshot.
-	// If there is a pending snapshot, the pendingSnapshot will be set to the
-	// index of the snapshot. If pendingSnapshot is set, the replication process of
-	// this Progress will be paused. raft will not resend snapshot until the pending one
-	// is reported to be failed.
-	PendingSnapshot uint64
-
-	// RecentActive is true if the progress is recently active. Receiving any messages
-	// from the corresponding follower indicates the progress is active.
-	// RecentActive can be reset to false after an election timeout.
-	RecentActive bool
-
-	// inflights is a sliding window for the inflight messages.
-	// Each inflight message contains one or more log entries.
-	// The max number of entries per message is defined in raft config as MaxSizePerMsg.
-	// Thus inflight effectively limits both the number of inflight messages
-	// and the bandwidth each Progress can use.
-	// When inflights is full, no more message should be sent.
-	// When a leader sends out a message, the index of the last
-	// entry should be added to inflights. The index MUST be added
-	// into inflights in order.
-	// When a leader receives a reply, the previous inflights should
-	// be freed by calling inflights.freeTo with the index of the last
-	// received entry.
-	ins *inflights
-
-	// IsLearner is true if this progress is tracked for a learner.
-	IsLearner bool
-}
-
-func (pr *Progress) resetState(state ProgressStateType) {
-	pr.Paused = false
-	pr.PendingSnapshot = 0
-	pr.State = state
-	pr.ins.reset()
-}
-
-func (pr *Progress) becomeProbe() {
-	// If the original state is ProgressStateSnapshot, progress knows that
-	// the pending snapshot has been sent to this peer successfully, then
-	// probes from pendingSnapshot + 1.
-	if pr.State == ProgressStateSnapshot {
-		pendingSnapshot := pr.PendingSnapshot
-		pr.resetState(ProgressStateProbe)
-		pr.Next = max(pr.Match+1, pendingSnapshot+1)
-	} else {
-		pr.resetState(ProgressStateProbe)
-		pr.Next = pr.Match + 1
-	}
-}
-
-func (pr *Progress) becomeReplicate() {
-	pr.resetState(ProgressStateReplicate)
-	pr.Next = pr.Match + 1
-}
-
-func (pr *Progress) becomeSnapshot(snapshoti uint64) {
-	pr.resetState(ProgressStateSnapshot)
-	pr.PendingSnapshot = snapshoti
-}
-
-// maybeUpdate returns false if the given n index comes from an outdated message.
-// Otherwise it updates the progress and returns true.
-func (pr *Progress) maybeUpdate(n uint64) bool {
-	var updated bool
-	if pr.Match < n {
-		pr.Match = n
-		updated = true
-		pr.resume()
-	}
-	if pr.Next < n+1 {
-		pr.Next = n + 1
-	}
-	return updated
-}
-
-func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 }
-
-// maybeDecrTo returns false if the given to index comes from an out of order message.
-// Otherwise it decreases the progress next index to min(rejected, last) and returns true.
-func (pr *Progress) maybeDecrTo(rejected, last uint64) bool {
-	if pr.State == ProgressStateReplicate {
-		// the rejection must be stale if the progress has matched and "rejected"
-		// is smaller than "match".
-		if rejected <= pr.Match {
-			return false
-		}
-		// directly decrease next to match + 1
-		pr.Next = pr.Match + 1
-		return true
-	}
-
-	// the rejection must be stale if "rejected" does not match next - 1
-	if pr.Next-1 != rejected {
-		return false
-	}
-
-	if pr.Next = min(rejected, last+1); pr.Next < 1 {
-		pr.Next = 1
-	}
-	pr.resume()
-	return true
-}
-
-func (pr *Progress) pause()  { pr.Paused = true }
-func (pr *Progress) resume() { pr.Paused = false }
-
-// IsPaused returns whether sending log entries to this node has been
-// paused. A node may be paused because it has rejected recent
-// MsgApps, is currently waiting for a snapshot, or has reached the
-// MaxInflightMsgs limit.
-func (pr *Progress) IsPaused() bool {
-	switch pr.State {
-	case ProgressStateProbe:
-		return pr.Paused
-	case ProgressStateReplicate:
-		return pr.ins.full()
-	case ProgressStateSnapshot:
-		return true
-	default:
-		panic("unexpected state")
-	}
-}
-
-func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 }
-
-// needSnapshotAbort returns true if snapshot progress's Match
-// is equal or higher than the pendingSnapshot.
-func (pr *Progress) needSnapshotAbort() bool {
-	return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot
-}
-
-func (pr *Progress) String() string {
-	return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot)
-}
-
-type inflights struct {
-	// the starting index in the buffer
-	start int
-	// number of inflights in the buffer
-	count int
-
-	// the size of the buffer
-	size int
-
-	// buffer contains the index of the last entry
-	// inside one message.
-	buffer []uint64
-}
-
-func newInflights(size int) *inflights {
-	return &inflights{
-		size: size,
-	}
-}
-
-// add adds an inflight into inflights
-func (in *inflights) add(inflight uint64) {
-	if in.full() {
-		panic("cannot add into a full inflights")
-	}
-	next := in.start + in.count
-	size := in.size
-	if next >= size {
-		next -= size
-	}
-	if next >= len(in.buffer) {
-		in.growBuf()
-	}
-	in.buffer[next] = inflight
-	in.count++
-}
-
-// grow the inflight buffer by doubling up to inflights.size. We grow on demand
-// instead of preallocating to inflights.size to handle systems which have
-// thousands of Raft groups per process.
-func (in *inflights) growBuf() {
-	newSize := len(in.buffer) * 2
-	if newSize == 0 {
-		newSize = 1
-	} else if newSize > in.size {
-		newSize = in.size
-	}
-	newBuffer := make([]uint64, newSize)
-	copy(newBuffer, in.buffer)
-	in.buffer = newBuffer
-}
-
-// freeTo frees the inflights smaller or equal to the given `to` flight.
-func (in *inflights) freeTo(to uint64) {
-	if in.count == 0 || to < in.buffer[in.start] {
-		// out of the left side of the window
-		return
-	}
-
-	idx := in.start
-	var i int
-	for i = 0; i < in.count; i++ {
-		if to < in.buffer[idx] { // found the first large inflight
-			break
-		}
-
-		// increase index and maybe rotate
-		size := in.size
-		if idx++; idx >= size {
-			idx -= size
-		}
-	}
-	// free i inflights and set new start index
-	in.count -= i
-	in.start = idx
-	if in.count == 0 {
-		// inflights is empty, reset the start index so that we don't grow the
-		// buffer unnecessarily.
-		in.start = 0
-	}
-}
-
-func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }
-
-// full returns true if the inflights is full.
-func (in *inflights) full() bool {
-	return in.count == in.size
-}
-
-// resets frees all inflights.
-func (in *inflights) reset() {
-	in.count = 0
-	in.start = 0
-}

+ 0 - 1407
vendor/github.com/coreos/etcd/raft/raft.go

@@ -1,1407 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"math"
-	"math/rand"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-// None is a placeholder node ID used when there is no leader.
-const None uint64 = 0
-const noLimit = math.MaxUint64
-
-// Possible values for StateType.
-const (
-	StateFollower StateType = iota
-	StateCandidate
-	StateLeader
-	StatePreCandidate
-	numStates
-)
-
-type ReadOnlyOption int
-
-const (
-	// ReadOnlySafe guarantees the linearizability of the read only request by
-	// communicating with the quorum. It is the default and suggested option.
-	ReadOnlySafe ReadOnlyOption = iota
-	// ReadOnlyLeaseBased ensures linearizability of the read only request by
-	// relying on the leader lease. It can be affected by clock drift.
-	// If the clock drift is unbounded, leader might keep the lease longer than it
-	// should (clock can move backward/pause without any bound). ReadIndex is not safe
-	// in that case.
-	ReadOnlyLeaseBased
-)
-
-// Possible values for CampaignType
-const (
-	// campaignPreElection represents the first phase of a normal election when
-	// Config.PreVote is true.
-	campaignPreElection CampaignType = "CampaignPreElection"
-	// campaignElection represents a normal (time-based) election (the second phase
-	// of the election when Config.PreVote is true).
-	campaignElection CampaignType = "CampaignElection"
-	// campaignTransfer represents the type of leader transfer
-	campaignTransfer CampaignType = "CampaignTransfer"
-)
-
-// lockedRand is a small wrapper around rand.Rand to provide
-// synchronization. Only the methods needed by the code are exposed
-// (e.g. Intn).
-type lockedRand struct {
-	mu   sync.Mutex
-	rand *rand.Rand
-}
-
-func (r *lockedRand) Intn(n int) int {
-	r.mu.Lock()
-	v := r.rand.Intn(n)
-	r.mu.Unlock()
-	return v
-}
-
-var globalRand = &lockedRand{
-	rand: rand.New(rand.NewSource(time.Now().UnixNano())),
-}
-
-// CampaignType represents the type of campaigning
-// the reason we use the type of string instead of uint64
-// is because it's simpler to compare and fill in raft entries
-type CampaignType string
-
-// StateType represents the role of a node in a cluster.
-type StateType uint64
-
-var stmap = [...]string{
-	"StateFollower",
-	"StateCandidate",
-	"StateLeader",
-	"StatePreCandidate",
-}
-
-func (st StateType) String() string {
-	return stmap[uint64(st)]
-}
-
-// Config contains the parameters to start a raft.
-type Config struct {
-	// ID is the identity of the local raft. ID cannot be 0.
-	ID uint64
-
-	// peers contains the IDs of all nodes (including self) in the raft cluster. It
-	// should only be set when starting a new raft cluster. Restarting raft from
-	// previous configuration will panic if peers is set. peer is private and only
-	// used for testing right now.
-	peers []uint64
-
-	// learners contains the IDs of all leaner nodes (including self if the local node is a leaner) in the raft cluster.
-	// learners only receives entries from the leader node. It does not vote or promote itself.
-	learners []uint64
-
-	// ElectionTick is the number of Node.Tick invocations that must pass between
-	// elections. That is, if a follower does not receive any message from the
-	// leader of current term before ElectionTick has elapsed, it will become
-	// candidate and start an election. ElectionTick must be greater than
-	// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
-	// unnecessary leader switching.
-	ElectionTick int
-	// HeartbeatTick is the number of Node.Tick invocations that must pass between
-	// heartbeats. That is, a leader sends heartbeat messages to maintain its
-	// leadership every HeartbeatTick ticks.
-	HeartbeatTick int
-
-	// Storage is the storage for raft. raft generates entries and states to be
-	// stored in storage. raft reads the persisted entries and states out of
-	// Storage when it needs. raft reads out the previous state and configuration
-	// out of storage when restarting.
-	Storage Storage
-	// Applied is the last applied index. It should only be set when restarting
-	// raft. raft will not return entries to the application smaller or equal to
-	// Applied. If Applied is unset when restarting, raft might return previous
-	// applied entries. This is a very application dependent configuration.
-	Applied uint64
-
-	// MaxSizePerMsg limits the max size of each append message. Smaller value
-	// lowers the raft recovery cost(initial probing and message lost during normal
-	// operation). On the other side, it might affect the throughput during normal
-	// replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per
-	// message.
-	MaxSizePerMsg uint64
-	// MaxInflightMsgs limits the max number of in-flight append messages during
-	// optimistic replication phase. The application transportation layer usually
-	// has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
-	// overflowing that sending buffer. TODO (xiangli): feedback to application to
-	// limit the proposal rate?
-	MaxInflightMsgs int
-
-	// CheckQuorum specifies if the leader should check quorum activity. Leader
-	// steps down when quorum is not active for an electionTimeout.
-	CheckQuorum bool
-
-	// PreVote enables the Pre-Vote algorithm described in raft thesis section
-	// 9.6. This prevents disruption when a node that has been partitioned away
-	// rejoins the cluster.
-	PreVote bool
-
-	// ReadOnlyOption specifies how the read only request is processed.
-	//
-	// ReadOnlySafe guarantees the linearizability of the read only request by
-	// communicating with the quorum. It is the default and suggested option.
-	//
-	// ReadOnlyLeaseBased ensures linearizability of the read only request by
-	// relying on the leader lease. It can be affected by clock drift.
-	// If the clock drift is unbounded, leader might keep the lease longer than it
-	// should (clock can move backward/pause without any bound). ReadIndex is not safe
-	// in that case.
-	// CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
-	ReadOnlyOption ReadOnlyOption
-
-	// Logger is the logger used for raft log. For multinode which can host
-	// multiple raft group, each raft group can have its own logger
-	Logger Logger
-
-	// DisableProposalForwarding set to true means that followers will drop
-	// proposals, rather than forwarding them to the leader. One use case for
-	// this feature would be in a situation where the Raft leader is used to
-	// compute the data of a proposal, for example, adding a timestamp from a
-	// hybrid logical clock to data in a monotonically increasing way. Forwarding
-	// should be disabled to prevent a follower with an innaccurate hybrid
-	// logical clock from assigning the timestamp and then forwarding the data
-	// to the leader.
-	DisableProposalForwarding bool
-}
-
-func (c *Config) validate() error {
-	if c.ID == None {
-		return errors.New("cannot use none as id")
-	}
-
-	if c.HeartbeatTick <= 0 {
-		return errors.New("heartbeat tick must be greater than 0")
-	}
-
-	if c.ElectionTick <= c.HeartbeatTick {
-		return errors.New("election tick must be greater than heartbeat tick")
-	}
-
-	if c.Storage == nil {
-		return errors.New("storage cannot be nil")
-	}
-
-	if c.MaxInflightMsgs <= 0 {
-		return errors.New("max inflight messages must be greater than 0")
-	}
-
-	if c.Logger == nil {
-		c.Logger = raftLogger
-	}
-
-	if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
-		return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
-	}
-
-	return nil
-}
-
-type raft struct {
-	id uint64
-
-	Term uint64
-	Vote uint64
-
-	readStates []ReadState
-
-	// the log
-	raftLog *raftLog
-
-	maxInflight int
-	maxMsgSize  uint64
-	prs         map[uint64]*Progress
-	learnerPrs  map[uint64]*Progress
-
-	state StateType
-
-	// isLearner is true if the local raft node is a learner.
-	isLearner bool
-
-	votes map[uint64]bool
-
-	msgs []pb.Message
-
-	// the leader id
-	lead uint64
-	// leadTransferee is id of the leader transfer target when its value is not zero.
-	// Follow the procedure defined in raft thesis 3.10.
-	leadTransferee uint64
-	// New configuration is ignored if there exists unapplied configuration.
-	pendingConf bool
-
-	readOnly *readOnly
-
-	// number of ticks since it reached last electionTimeout when it is leader
-	// or candidate.
-	// number of ticks since it reached last electionTimeout or received a
-	// valid message from current leader when it is a follower.
-	electionElapsed int
-
-	// number of ticks since it reached last heartbeatTimeout.
-	// only leader keeps heartbeatElapsed.
-	heartbeatElapsed int
-
-	checkQuorum bool
-	preVote     bool
-
-	heartbeatTimeout int
-	electionTimeout  int
-	// randomizedElectionTimeout is a random number between
-	// [electiontimeout, 2 * electiontimeout - 1]. It gets reset
-	// when raft changes its state to follower or candidate.
-	randomizedElectionTimeout int
-	disableProposalForwarding bool
-
-	tick func()
-	step stepFunc
-
-	logger Logger
-}
-
-func newRaft(c *Config) *raft {
-	if err := c.validate(); err != nil {
-		panic(err.Error())
-	}
-	raftlog := newLog(c.Storage, c.Logger)
-	hs, cs, err := c.Storage.InitialState()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	peers := c.peers
-	learners := c.learners
-	if len(cs.Nodes) > 0 || len(cs.Learners) > 0 {
-		if len(peers) > 0 || len(learners) > 0 {
-			// TODO(bdarnell): the peers argument is always nil except in
-			// tests; the argument should be removed and these tests should be
-			// updated to specify their nodes through a snapshot.
-			panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)")
-		}
-		peers = cs.Nodes
-		learners = cs.Learners
-	}
-	r := &raft{
-		id:                        c.ID,
-		lead:                      None,
-		isLearner:                 false,
-		raftLog:                   raftlog,
-		maxMsgSize:                c.MaxSizePerMsg,
-		maxInflight:               c.MaxInflightMsgs,
-		prs:                       make(map[uint64]*Progress),
-		learnerPrs:                make(map[uint64]*Progress),
-		electionTimeout:           c.ElectionTick,
-		heartbeatTimeout:          c.HeartbeatTick,
-		logger:                    c.Logger,
-		checkQuorum:               c.CheckQuorum,
-		preVote:                   c.PreVote,
-		readOnly:                  newReadOnly(c.ReadOnlyOption),
-		disableProposalForwarding: c.DisableProposalForwarding,
-	}
-	for _, p := range peers {
-		r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
-	}
-	for _, p := range learners {
-		if _, ok := r.prs[p]; ok {
-			panic(fmt.Sprintf("node %x is in both learner and peer list", p))
-		}
-		r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true}
-		if r.id == p {
-			r.isLearner = true
-		}
-	}
-
-	if !isHardStateEqual(hs, emptyState) {
-		r.loadState(hs)
-	}
-	if c.Applied > 0 {
-		raftlog.appliedTo(c.Applied)
-	}
-	r.becomeFollower(r.Term, None)
-
-	var nodesStrs []string
-	for _, n := range r.nodes() {
-		nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
-	}
-
-	r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
-		r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm())
-	return r
-}
-
-func (r *raft) hasLeader() bool { return r.lead != None }
-
-func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} }
-
-func (r *raft) hardState() pb.HardState {
-	return pb.HardState{
-		Term:   r.Term,
-		Vote:   r.Vote,
-		Commit: r.raftLog.committed,
-	}
-}
-
-func (r *raft) quorum() int { return len(r.prs)/2 + 1 }
-
-func (r *raft) nodes() []uint64 {
-	nodes := make([]uint64, 0, len(r.prs)+len(r.learnerPrs))
-	for id := range r.prs {
-		nodes = append(nodes, id)
-	}
-	for id := range r.learnerPrs {
-		nodes = append(nodes, id)
-	}
-	sort.Sort(uint64Slice(nodes))
-	return nodes
-}
-
-// send persists state to stable storage and then sends to its mailbox.
-func (r *raft) send(m pb.Message) {
-	m.From = r.id
-	if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
-		if m.Term == 0 {
-			// All {pre-,}campaign messages need to have the term set when
-			// sending.
-			// - MsgVote: m.Term is the term the node is campaigning for,
-			//   non-zero as we increment the term when campaigning.
-			// - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
-			//   granted, non-zero for the same reason MsgVote is
-			// - MsgPreVote: m.Term is the term the node will campaign,
-			//   non-zero as we use m.Term to indicate the next term we'll be
-			//   campaigning for
-			// - MsgPreVoteResp: m.Term is the term received in the original
-			//   MsgPreVote if the pre-vote was granted, non-zero for the
-			//   same reasons MsgPreVote is
-			panic(fmt.Sprintf("term should be set when sending %s", m.Type))
-		}
-	} else {
-		if m.Term != 0 {
-			panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term))
-		}
-		// do not attach term to MsgProp, MsgReadIndex
-		// proposals are a way to forward to the leader and
-		// should be treated as local message.
-		// MsgReadIndex is also forwarded to leader.
-		if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
-			m.Term = r.Term
-		}
-	}
-	r.msgs = append(r.msgs, m)
-}
-
-func (r *raft) getProgress(id uint64) *Progress {
-	if pr, ok := r.prs[id]; ok {
-		return pr
-	}
-
-	return r.learnerPrs[id]
-}
-
-// sendAppend sends RPC, with entries to the given peer.
-func (r *raft) sendAppend(to uint64) {
-	pr := r.getProgress(to)
-	if pr.IsPaused() {
-		return
-	}
-	m := pb.Message{}
-	m.To = to
-
-	term, errt := r.raftLog.term(pr.Next - 1)
-	ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
-
-	if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
-		if !pr.RecentActive {
-			r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
-			return
-		}
-
-		m.Type = pb.MsgSnap
-		snapshot, err := r.raftLog.snapshot()
-		if err != nil {
-			if err == ErrSnapshotTemporarilyUnavailable {
-				r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
-				return
-			}
-			panic(err) // TODO(bdarnell)
-		}
-		if IsEmptySnap(snapshot) {
-			panic("need non-empty snapshot")
-		}
-		m.Snapshot = snapshot
-		sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
-		r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
-			r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
-		pr.becomeSnapshot(sindex)
-		r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
-	} else {
-		m.Type = pb.MsgApp
-		m.Index = pr.Next - 1
-		m.LogTerm = term
-		m.Entries = ents
-		m.Commit = r.raftLog.committed
-		if n := len(m.Entries); n != 0 {
-			switch pr.State {
-			// optimistically increase the next when in ProgressStateReplicate
-			case ProgressStateReplicate:
-				last := m.Entries[n-1].Index
-				pr.optimisticUpdate(last)
-				pr.ins.add(last)
-			case ProgressStateProbe:
-				pr.pause()
-			default:
-				r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State)
-			}
-		}
-	}
-	r.send(m)
-}
-
-// sendHeartbeat sends an empty MsgApp
-func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
-	// Attach the commit as min(to.matched, r.committed).
-	// When the leader sends out heartbeat message,
-	// the receiver(follower) might not be matched with the leader
-	// or it might not have all the committed entries.
-	// The leader MUST NOT forward the follower's commit to
-	// an unmatched index.
-	commit := min(r.getProgress(to).Match, r.raftLog.committed)
-	m := pb.Message{
-		To:      to,
-		Type:    pb.MsgHeartbeat,
-		Commit:  commit,
-		Context: ctx,
-	}
-
-	r.send(m)
-}
-
-func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) {
-	for id, pr := range r.prs {
-		f(id, pr)
-	}
-
-	for id, pr := range r.learnerPrs {
-		f(id, pr)
-	}
-}
-
-// bcastAppend sends RPC, with entries to all peers that are not up-to-date
-// according to the progress recorded in r.prs.
-func (r *raft) bcastAppend() {
-	r.forEachProgress(func(id uint64, _ *Progress) {
-		if id == r.id {
-			return
-		}
-
-		r.sendAppend(id)
-	})
-}
-
-// bcastHeartbeat sends RPC, without entries to all the peers.
-func (r *raft) bcastHeartbeat() {
-	lastCtx := r.readOnly.lastPendingRequestCtx()
-	if len(lastCtx) == 0 {
-		r.bcastHeartbeatWithCtx(nil)
-	} else {
-		r.bcastHeartbeatWithCtx([]byte(lastCtx))
-	}
-}
-
-func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
-	r.forEachProgress(func(id uint64, _ *Progress) {
-		if id == r.id {
-			return
-		}
-		r.sendHeartbeat(id, ctx)
-	})
-}
-
-// maybeCommit attempts to advance the commit index. Returns true if
-// the commit index changed (in which case the caller should call
-// r.bcastAppend).
-func (r *raft) maybeCommit() bool {
-	// TODO(bmizerany): optimize.. Currently naive
-	mis := make(uint64Slice, 0, len(r.prs))
-	for _, p := range r.prs {
-		mis = append(mis, p.Match)
-	}
-	sort.Sort(sort.Reverse(mis))
-	mci := mis[r.quorum()-1]
-	return r.raftLog.maybeCommit(mci, r.Term)
-}
-
-func (r *raft) reset(term uint64) {
-	if r.Term != term {
-		r.Term = term
-		r.Vote = None
-	}
-	r.lead = None
-
-	r.electionElapsed = 0
-	r.heartbeatElapsed = 0
-	r.resetRandomizedElectionTimeout()
-
-	r.abortLeaderTransfer()
-
-	r.votes = make(map[uint64]bool)
-	r.forEachProgress(func(id uint64, pr *Progress) {
-		*pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner}
-		if id == r.id {
-			pr.Match = r.raftLog.lastIndex()
-		}
-	})
-
-	r.pendingConf = false
-	r.readOnly = newReadOnly(r.readOnly.option)
-}
-
-func (r *raft) appendEntry(es ...pb.Entry) {
-	li := r.raftLog.lastIndex()
-	for i := range es {
-		es[i].Term = r.Term
-		es[i].Index = li + 1 + uint64(i)
-	}
-	r.raftLog.append(es...)
-	r.getProgress(r.id).maybeUpdate(r.raftLog.lastIndex())
-	// Regardless of maybeCommit's return, our caller will call bcastAppend.
-	r.maybeCommit()
-}
-
-// tickElection is run by followers and candidates after r.electionTimeout.
-func (r *raft) tickElection() {
-	r.electionElapsed++
-
-	if r.promotable() && r.pastElectionTimeout() {
-		r.electionElapsed = 0
-		r.Step(pb.Message{From: r.id, Type: pb.MsgHup})
-	}
-}
-
-// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
-func (r *raft) tickHeartbeat() {
-	r.heartbeatElapsed++
-	r.electionElapsed++
-
-	if r.electionElapsed >= r.electionTimeout {
-		r.electionElapsed = 0
-		if r.checkQuorum {
-			r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
-		}
-		// If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
-		if r.state == StateLeader && r.leadTransferee != None {
-			r.abortLeaderTransfer()
-		}
-	}
-
-	if r.state != StateLeader {
-		return
-	}
-
-	if r.heartbeatElapsed >= r.heartbeatTimeout {
-		r.heartbeatElapsed = 0
-		r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
-	}
-}
-
-func (r *raft) becomeFollower(term uint64, lead uint64) {
-	r.step = stepFollower
-	r.reset(term)
-	r.tick = r.tickElection
-	r.lead = lead
-	r.state = StateFollower
-	r.logger.Infof("%x became follower at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomeCandidate() {
-	// TODO(xiangli) remove the panic when the raft implementation is stable
-	if r.state == StateLeader {
-		panic("invalid transition [leader -> candidate]")
-	}
-	r.step = stepCandidate
-	r.reset(r.Term + 1)
-	r.tick = r.tickElection
-	r.Vote = r.id
-	r.state = StateCandidate
-	r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomePreCandidate() {
-	// TODO(xiangli) remove the panic when the raft implementation is stable
-	if r.state == StateLeader {
-		panic("invalid transition [leader -> pre-candidate]")
-	}
-	// Becoming a pre-candidate changes our step functions and state,
-	// but doesn't change anything else. In particular it does not increase
-	// r.Term or change r.Vote.
-	r.step = stepCandidate
-	r.votes = make(map[uint64]bool)
-	r.tick = r.tickElection
-	r.lead = None
-	r.state = StatePreCandidate
-	r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomeLeader() {
-	// TODO(xiangli) remove the panic when the raft implementation is stable
-	if r.state == StateFollower {
-		panic("invalid transition [follower -> leader]")
-	}
-	r.step = stepLeader
-	r.reset(r.Term)
-	r.tick = r.tickHeartbeat
-	r.lead = r.id
-	r.state = StateLeader
-	ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit)
-	if err != nil {
-		r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err)
-	}
-
-	nconf := numOfPendingConf(ents)
-	if nconf > 1 {
-		panic("unexpected multiple uncommitted config entry")
-	}
-	if nconf == 1 {
-		r.pendingConf = true
-	}
-
-	r.appendEntry(pb.Entry{Data: nil})
-	r.logger.Infof("%x became leader at term %d", r.id, r.Term)
-}
-
-func (r *raft) campaign(t CampaignType) {
-	var term uint64
-	var voteMsg pb.MessageType
-	if t == campaignPreElection {
-		r.becomePreCandidate()
-		voteMsg = pb.MsgPreVote
-		// PreVote RPCs are sent for the next term before we've incremented r.Term.
-		term = r.Term + 1
-	} else {
-		r.becomeCandidate()
-		voteMsg = pb.MsgVote
-		term = r.Term
-	}
-	if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) {
-		// We won the election after voting for ourselves (which must mean that
-		// this is a single-node cluster). Advance to the next state.
-		if t == campaignPreElection {
-			r.campaign(campaignElection)
-		} else {
-			r.becomeLeader()
-		}
-		return
-	}
-	for id := range r.prs {
-		if id == r.id {
-			continue
-		}
-		r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
-			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term)
-
-		var ctx []byte
-		if t == campaignTransfer {
-			ctx = []byte(t)
-		}
-		r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
-	}
-}
-
-func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) {
-	if v {
-		r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
-	} else {
-		r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
-	}
-	if _, ok := r.votes[id]; !ok {
-		r.votes[id] = v
-	}
-	for _, vv := range r.votes {
-		if vv {
-			granted++
-		}
-	}
-	return granted
-}
-
-func (r *raft) Step(m pb.Message) error {
-	// Handle the message term, which may result in our stepping down to a follower.
-	switch {
-	case m.Term == 0:
-		// local message
-	case m.Term > r.Term:
-		if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
-			force := bytes.Equal(m.Context, []byte(campaignTransfer))
-			inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
-			if !force && inLease {
-				// If a server receives a RequestVote request within the minimum election timeout
-				// of hearing from a current leader, it does not update its term or grant its vote
-				r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
-					r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
-				return nil
-			}
-		}
-		switch {
-		case m.Type == pb.MsgPreVote:
-			// Never change our term in response to a PreVote
-		case m.Type == pb.MsgPreVoteResp && !m.Reject:
-			// We send pre-vote requests with a term in our future. If the
-			// pre-vote is granted, we will increment our term when we get a
-			// quorum. If it is not, the term comes from the node that
-			// rejected our vote so we should become a follower at the new
-			// term.
-		default:
-			r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
-				r.id, r.Term, m.Type, m.From, m.Term)
-			if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
-				r.becomeFollower(m.Term, m.From)
-			} else {
-				r.becomeFollower(m.Term, None)
-			}
-		}
-
-	case m.Term < r.Term:
-		if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
-			// We have received messages from a leader at a lower term. It is possible
-			// that these messages were simply delayed in the network, but this could
-			// also mean that this node has advanced its term number during a network
-			// partition, and it is now unable to either win an election or to rejoin
-			// the majority on the old term. If checkQuorum is false, this will be
-			// handled by incrementing term numbers in response to MsgVote with a
-			// higher term, but if checkQuorum is true we may not advance the term on
-			// MsgVote and must generate other messages to advance the term. The net
-			// result of these two features is to minimize the disruption caused by
-			// nodes that have been removed from the cluster's configuration: a
-			// removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
-			// but it will not receive MsgApp or MsgHeartbeat, so it will not create
-			// disruptive term increases
-			r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
-		} else {
-			// ignore other cases
-			r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
-				r.id, r.Term, m.Type, m.From, m.Term)
-		}
-		return nil
-	}
-
-	switch m.Type {
-	case pb.MsgHup:
-		if r.state != StateLeader {
-			ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
-			if err != nil {
-				r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
-			}
-			if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
-				r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
-				return nil
-			}
-
-			r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
-			if r.preVote {
-				r.campaign(campaignPreElection)
-			} else {
-				r.campaign(campaignElection)
-			}
-		} else {
-			r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
-		}
-
-	case pb.MsgVote, pb.MsgPreVote:
-		if r.isLearner {
-			// TODO: learner may need to vote, in case of node down when confchange.
-			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote",
-				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
-			return nil
-		}
-		// The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should
-		// always equal r.Term.
-		if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
-			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
-				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
-			// When responding to Msg{Pre,}Vote messages we include the term
-			// from the message, not the local term. To see why consider the
-			// case where a single node was previously partitioned away and
-			// it's local term is now of date. If we include the local term
-			// (recall that for pre-votes we don't update the local term), the
-			// (pre-)campaigning node on the other end will proceed to ignore
-			// the message (it ignores all out of date messages).
-			// The term in the original message and current local term are the
-			// same in the case of regular votes, but different for pre-votes.
-			r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
-			if m.Type == pb.MsgVote {
-				// Only record real votes.
-				r.electionElapsed = 0
-				r.Vote = m.From
-			}
-		} else {
-			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
-				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
-			r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
-		}
-
-	default:
-		r.step(r, m)
-	}
-	return nil
-}
-
-type stepFunc func(r *raft, m pb.Message)
-
-func stepLeader(r *raft, m pb.Message) {
-	// These message types do not require any progress for m.From.
-	switch m.Type {
-	case pb.MsgBeat:
-		r.bcastHeartbeat()
-		return
-	case pb.MsgCheckQuorum:
-		if !r.checkQuorumActive() {
-			r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
-			r.becomeFollower(r.Term, None)
-		}
-		return
-	case pb.MsgProp:
-		if len(m.Entries) == 0 {
-			r.logger.Panicf("%x stepped empty MsgProp", r.id)
-		}
-		if _, ok := r.prs[r.id]; !ok {
-			// If we are not currently a member of the range (i.e. this node
-			// was removed from the configuration while serving as leader),
-			// drop any new proposals.
-			return
-		}
-		if r.leadTransferee != None {
-			r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee)
-			return
-		}
-
-		for i, e := range m.Entries {
-			if e.Type == pb.EntryConfChange {
-				if r.pendingConf {
-					r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String())
-					m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
-				}
-				r.pendingConf = true
-			}
-		}
-		r.appendEntry(m.Entries...)
-		r.bcastAppend()
-		return
-	case pb.MsgReadIndex:
-		if r.quorum() > 1 {
-			if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term {
-				// Reject read only request when this leader has not committed any log entry at its term.
-				return
-			}
-
-			// thinking: use an interally defined context instead of the user given context.
-			// We can express this in terms of the term and index instead of a user-supplied value.
-			// This would allow multiple reads to piggyback on the same message.
-			switch r.readOnly.option {
-			case ReadOnlySafe:
-				r.readOnly.addRequest(r.raftLog.committed, m)
-				r.bcastHeartbeatWithCtx(m.Entries[0].Data)
-			case ReadOnlyLeaseBased:
-				ri := r.raftLog.committed
-				if m.From == None || m.From == r.id { // from local member
-					r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
-				} else {
-					r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries})
-				}
-			}
-		} else {
-			r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
-		}
-
-		return
-	}
-
-	// All other message types require a progress for m.From (pr).
-	pr := r.getProgress(m.From)
-	if pr == nil {
-		r.logger.Debugf("%x no progress available for %x", r.id, m.From)
-		return
-	}
-	switch m.Type {
-	case pb.MsgAppResp:
-		pr.RecentActive = true
-
-		if m.Reject {
-			r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d",
-				r.id, m.RejectHint, m.From, m.Index)
-			if pr.maybeDecrTo(m.Index, m.RejectHint) {
-				r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
-				if pr.State == ProgressStateReplicate {
-					pr.becomeProbe()
-				}
-				r.sendAppend(m.From)
-			}
-		} else {
-			oldPaused := pr.IsPaused()
-			if pr.maybeUpdate(m.Index) {
-				switch {
-				case pr.State == ProgressStateProbe:
-					pr.becomeReplicate()
-				case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort():
-					r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
-					pr.becomeProbe()
-				case pr.State == ProgressStateReplicate:
-					pr.ins.freeTo(m.Index)
-				}
-
-				if r.maybeCommit() {
-					r.bcastAppend()
-				} else if oldPaused {
-					// update() reset the wait state on this node. If we had delayed sending
-					// an update before, send it now.
-					r.sendAppend(m.From)
-				}
-				// Transfer leadership is in progress.
-				if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
-					r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From)
-					r.sendTimeoutNow(m.From)
-				}
-			}
-		}
-	case pb.MsgHeartbeatResp:
-		pr.RecentActive = true
-		pr.resume()
-
-		// free one slot for the full inflights window to allow progress.
-		if pr.State == ProgressStateReplicate && pr.ins.full() {
-			pr.ins.freeFirstOne()
-		}
-		if pr.Match < r.raftLog.lastIndex() {
-			r.sendAppend(m.From)
-		}
-
-		if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
-			return
-		}
-
-		ackCount := r.readOnly.recvAck(m)
-		if ackCount < r.quorum() {
-			return
-		}
-
-		rss := r.readOnly.advance(m)
-		for _, rs := range rss {
-			req := rs.req
-			if req.From == None || req.From == r.id { // from local member
-				r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data})
-			} else {
-				r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries})
-			}
-		}
-	case pb.MsgSnapStatus:
-		if pr.State != ProgressStateSnapshot {
-			return
-		}
-		if !m.Reject {
-			pr.becomeProbe()
-			r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
-		} else {
-			pr.snapshotFailure()
-			pr.becomeProbe()
-			r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
-		}
-		// If snapshot finish, wait for the msgAppResp from the remote node before sending
-		// out the next msgApp.
-		// If snapshot failure, wait for a heartbeat interval before next try
-		pr.pause()
-	case pb.MsgUnreachable:
-		// During optimistic replication, if the remote becomes unreachable,
-		// there is huge probability that a MsgApp is lost.
-		if pr.State == ProgressStateReplicate {
-			pr.becomeProbe()
-		}
-		r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
-	case pb.MsgTransferLeader:
-		if pr.IsLearner {
-			r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
-			return
-		}
-		leadTransferee := m.From
-		lastLeadTransferee := r.leadTransferee
-		if lastLeadTransferee != None {
-			if lastLeadTransferee == leadTransferee {
-				r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x",
-					r.id, r.Term, leadTransferee, leadTransferee)
-				return
-			}
-			r.abortLeaderTransfer()
-			r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee)
-		}
-		if leadTransferee == r.id {
-			r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id)
-			return
-		}
-		// Transfer leadership to third party.
-		r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee)
-		// Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed.
-		r.electionElapsed = 0
-		r.leadTransferee = leadTransferee
-		if pr.Match == r.raftLog.lastIndex() {
-			r.sendTimeoutNow(leadTransferee)
-			r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee)
-		} else {
-			r.sendAppend(leadTransferee)
-		}
-	}
-}
-
-// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
-// whether they respond to MsgVoteResp or MsgPreVoteResp.
-func stepCandidate(r *raft, m pb.Message) {
-	// Only handle vote responses corresponding to our candidacy (while in
-	// StateCandidate, we may get stale MsgPreVoteResp messages in this term from
-	// our pre-candidate state).
-	var myVoteRespType pb.MessageType
-	if r.state == StatePreCandidate {
-		myVoteRespType = pb.MsgPreVoteResp
-	} else {
-		myVoteRespType = pb.MsgVoteResp
-	}
-	switch m.Type {
-	case pb.MsgProp:
-		r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
-		return
-	case pb.MsgApp:
-		r.becomeFollower(r.Term, m.From)
-		r.handleAppendEntries(m)
-	case pb.MsgHeartbeat:
-		r.becomeFollower(r.Term, m.From)
-		r.handleHeartbeat(m)
-	case pb.MsgSnap:
-		r.becomeFollower(m.Term, m.From)
-		r.handleSnapshot(m)
-	case myVoteRespType:
-		gr := r.poll(m.From, m.Type, !m.Reject)
-		r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr)
-		switch r.quorum() {
-		case gr:
-			if r.state == StatePreCandidate {
-				r.campaign(campaignElection)
-			} else {
-				r.becomeLeader()
-				r.bcastAppend()
-			}
-		case len(r.votes) - gr:
-			r.becomeFollower(r.Term, None)
-		}
-	case pb.MsgTimeoutNow:
-		r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From)
-	}
-}
-
-func stepFollower(r *raft, m pb.Message) {
-	switch m.Type {
-	case pb.MsgProp:
-		if r.lead == None {
-			r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
-			return
-		} else if r.disableProposalForwarding {
-			r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
-			return
-		}
-		m.To = r.lead
-		r.send(m)
-	case pb.MsgApp:
-		r.electionElapsed = 0
-		r.lead = m.From
-		r.handleAppendEntries(m)
-	case pb.MsgHeartbeat:
-		r.electionElapsed = 0
-		r.lead = m.From
-		r.handleHeartbeat(m)
-	case pb.MsgSnap:
-		r.electionElapsed = 0
-		r.lead = m.From
-		r.handleSnapshot(m)
-	case pb.MsgTransferLeader:
-		if r.lead == None {
-			r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
-			return
-		}
-		m.To = r.lead
-		r.send(m)
-	case pb.MsgTimeoutNow:
-		if r.promotable() {
-			r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
-			// Leadership transfers never use pre-vote even if r.preVote is true; we
-			// know we are not recovering from a partition so there is no need for the
-			// extra round trip.
-			r.campaign(campaignTransfer)
-		} else {
-			r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From)
-		}
-	case pb.MsgReadIndex:
-		if r.lead == None {
-			r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
-			return
-		}
-		m.To = r.lead
-		r.send(m)
-	case pb.MsgReadIndexResp:
-		if len(m.Entries) != 1 {
-			r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
-			return
-		}
-		r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
-	}
-}
-
-func (r *raft) handleAppendEntries(m pb.Message) {
-	if m.Index < r.raftLog.committed {
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
-		return
-	}
-
-	if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok {
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
-	} else {
-		r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x",
-			r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()})
-	}
-}
-
-func (r *raft) handleHeartbeat(m pb.Message) {
-	r.raftLog.commitTo(m.Commit)
-	r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
-}
-
-func (r *raft) handleSnapshot(m pb.Message) {
-	sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term
-	if r.restore(m.Snapshot) {
-		r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
-			r.id, r.raftLog.committed, sindex, sterm)
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
-	} else {
-		r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
-			r.id, r.raftLog.committed, sindex, sterm)
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
-	}
-}
-
-// restore recovers the state machine from a snapshot. It restores the log and the
-// configuration of state machine.
-func (r *raft) restore(s pb.Snapshot) bool {
-	if s.Metadata.Index <= r.raftLog.committed {
-		return false
-	}
-	if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) {
-		r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
-			r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
-		r.raftLog.commitTo(s.Metadata.Index)
-		return false
-	}
-
-	// The normal peer can't become learner.
-	if !r.isLearner {
-		for _, id := range s.Metadata.ConfState.Learners {
-			if id == r.id {
-				r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term)
-				return false
-			}
-		}
-	}
-
-	r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]",
-		r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
-
-	r.raftLog.restore(s)
-	r.prs = make(map[uint64]*Progress)
-	r.learnerPrs = make(map[uint64]*Progress)
-	r.restoreNode(s.Metadata.ConfState.Nodes, false)
-	r.restoreNode(s.Metadata.ConfState.Learners, true)
-	return true
-}
-
-func (r *raft) restoreNode(nodes []uint64, isLearner bool) {
-	for _, n := range nodes {
-		match, next := uint64(0), r.raftLog.lastIndex()+1
-		if n == r.id {
-			match = next - 1
-			r.isLearner = isLearner
-		}
-		r.setProgress(n, match, next, isLearner)
-		r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n))
-	}
-}
-
-// promotable indicates whether state machine can be promoted to leader,
-// which is true when its own id is in progress list.
-func (r *raft) promotable() bool {
-	_, ok := r.prs[r.id]
-	return ok
-}
-
-func (r *raft) addNode(id uint64) {
-	r.addNodeOrLearnerNode(id, false)
-}
-
-func (r *raft) addLearner(id uint64) {
-	r.addNodeOrLearnerNode(id, true)
-}
-
-func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) {
-	r.pendingConf = false
-	pr := r.getProgress(id)
-	if pr == nil {
-		r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner)
-	} else {
-		if isLearner && !pr.IsLearner {
-			// can only change Learner to Voter
-			r.logger.Infof("%x ignored addLeaner: do not support changing %x from raft peer to learner.", r.id, id)
-			return
-		}
-
-		if isLearner == pr.IsLearner {
-			// Ignore any redundant addNode calls (which can happen because the
-			// initial bootstrapping entries are applied twice).
-			return
-		}
-
-		// change Learner to Voter, use origin Learner progress
-		delete(r.learnerPrs, id)
-		pr.IsLearner = false
-		r.prs[id] = pr
-	}
-
-	if r.id == id {
-		r.isLearner = isLearner
-	}
-
-	// When a node is first added, we should mark it as recently active.
-	// Otherwise, CheckQuorum may cause us to step down if it is invoked
-	// before the added node has a chance to communicate with us.
-	pr = r.getProgress(id)
-	pr.RecentActive = true
-}
-
-func (r *raft) removeNode(id uint64) {
-	r.delProgress(id)
-	r.pendingConf = false
-
-	// do not try to commit or abort transferring if there is no nodes in the cluster.
-	if len(r.prs) == 0 && len(r.learnerPrs) == 0 {
-		return
-	}
-
-	// The quorum size is now smaller, so see if any pending entries can
-	// be committed.
-	if r.maybeCommit() {
-		r.bcastAppend()
-	}
-	// If the removed node is the leadTransferee, then abort the leadership transferring.
-	if r.state == StateLeader && r.leadTransferee == id {
-		r.abortLeaderTransfer()
-	}
-}
-
-func (r *raft) resetPendingConf() { r.pendingConf = false }
-
-func (r *raft) setProgress(id, match, next uint64, isLearner bool) {
-	if !isLearner {
-		delete(r.learnerPrs, id)
-		r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
-		return
-	}
-
-	if _, ok := r.prs[id]; ok {
-		panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id))
-	}
-	r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true}
-}
-
-func (r *raft) delProgress(id uint64) {
-	delete(r.prs, id)
-	delete(r.learnerPrs, id)
-}
-
-func (r *raft) loadState(state pb.HardState) {
-	if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
-		r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
-	}
-	r.raftLog.committed = state.Commit
-	r.Term = state.Term
-	r.Vote = state.Vote
-}
-
-// pastElectionTimeout returns true iff r.electionElapsed is greater
-// than or equal to the randomized election timeout in
-// [electiontimeout, 2 * electiontimeout - 1].
-func (r *raft) pastElectionTimeout() bool {
-	return r.electionElapsed >= r.randomizedElectionTimeout
-}
-
-func (r *raft) resetRandomizedElectionTimeout() {
-	r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
-}
-
-// checkQuorumActive returns true if the quorum is active from
-// the view of the local raft state machine. Otherwise, it returns
-// false.
-// checkQuorumActive also resets all RecentActive to false.
-func (r *raft) checkQuorumActive() bool {
-	var act int
-
-	r.forEachProgress(func(id uint64, pr *Progress) {
-		if id == r.id { // self is always active
-			act++
-			return
-		}
-
-		if pr.RecentActive && !pr.IsLearner {
-			act++
-		}
-
-		pr.RecentActive = false
-	})
-
-	return act >= r.quorum()
-}
-
-func (r *raft) sendTimeoutNow(to uint64) {
-	r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow})
-}
-
-func (r *raft) abortLeaderTransfer() {
-	r.leadTransferee = None
-}
-
-func numOfPendingConf(ents []pb.Entry) int {
-	n := 0
-	for i := range ents {
-		if ents[i].Type == pb.EntryConfChange {
-			n++
-		}
-	}
-	return n
-}

+ 0 - 95
vendor/github.com/coreos/etcd/raft/raftpb/raft.proto

@@ -1,95 +0,0 @@
-syntax = "proto2";
-package raftpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-
-enum EntryType {
-	EntryNormal     = 0;
-	EntryConfChange = 1;
-}
-
-message Entry {
-	optional uint64     Term  = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
-	optional uint64     Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
-	optional EntryType  Type  = 1 [(gogoproto.nullable) = false];
-	optional bytes      Data  = 4;
-}
-
-message SnapshotMetadata {
-	optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
-	optional uint64    index      = 2 [(gogoproto.nullable) = false];
-	optional uint64    term       = 3 [(gogoproto.nullable) = false];
-}
-
-message Snapshot {
-	optional bytes            data     = 1;
-	optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
-}
-
-enum MessageType {
-	MsgHup             = 0;
-	MsgBeat            = 1;
-	MsgProp            = 2;
-	MsgApp             = 3;
-	MsgAppResp         = 4;
-	MsgVote            = 5;
-	MsgVoteResp        = 6;
-	MsgSnap            = 7;
-	MsgHeartbeat       = 8;
-	MsgHeartbeatResp   = 9;
-	MsgUnreachable     = 10;
-	MsgSnapStatus      = 11;
-	MsgCheckQuorum     = 12;
-	MsgTransferLeader  = 13;
-	MsgTimeoutNow      = 14;
-	MsgReadIndex       = 15;
-	MsgReadIndexResp   = 16;
-	MsgPreVote         = 17;
-	MsgPreVoteResp     = 18;
-}
-
-message Message {
-	optional MessageType type        = 1  [(gogoproto.nullable) = false];
-	optional uint64      to          = 2  [(gogoproto.nullable) = false];
-	optional uint64      from        = 3  [(gogoproto.nullable) = false];
-	optional uint64      term        = 4  [(gogoproto.nullable) = false];
-	optional uint64      logTerm     = 5  [(gogoproto.nullable) = false];
-	optional uint64      index       = 6  [(gogoproto.nullable) = false];
-	repeated Entry       entries     = 7  [(gogoproto.nullable) = false];
-	optional uint64      commit      = 8  [(gogoproto.nullable) = false];
-	optional Snapshot    snapshot    = 9  [(gogoproto.nullable) = false];
-	optional bool        reject      = 10 [(gogoproto.nullable) = false];
-	optional uint64      rejectHint  = 11 [(gogoproto.nullable) = false];
-	optional bytes       context     = 12;
-}
-
-message HardState {
-	optional uint64 term   = 1 [(gogoproto.nullable) = false];
-	optional uint64 vote   = 2 [(gogoproto.nullable) = false];
-	optional uint64 commit = 3 [(gogoproto.nullable) = false];
-}
-
-message ConfState {
-	repeated uint64 nodes    = 1;
-	repeated uint64 learners = 2;
-}
-
-enum ConfChangeType {
-	ConfChangeAddNode        = 0;
-	ConfChangeRemoveNode     = 1;
-	ConfChangeUpdateNode     = 2;
-	ConfChangeAddLearnerNode = 3;
-}
-
-message ConfChange {
-	optional uint64          ID      = 1 [(gogoproto.nullable) = false];
-	optional ConfChangeType  Type    = 2 [(gogoproto.nullable) = false];
-	optional uint64          NodeID  = 3 [(gogoproto.nullable) = false];
-	optional bytes           Context = 4;
-}

+ 0 - 129
vendor/github.com/coreos/etcd/raft/util.go

@@ -1,129 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"bytes"
-	"fmt"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-func (st StateType) MarshalJSON() ([]byte, error) {
-	return []byte(fmt.Sprintf("%q", st.String())), nil
-}
-
-// uint64Slice implements sort interface
-type uint64Slice []uint64
-
-func (p uint64Slice) Len() int           { return len(p) }
-func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-
-func min(a, b uint64) uint64 {
-	if a > b {
-		return b
-	}
-	return a
-}
-
-func max(a, b uint64) uint64 {
-	if a > b {
-		return a
-	}
-	return b
-}
-
-func IsLocalMsg(msgt pb.MessageType) bool {
-	return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable ||
-		msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum
-}
-
-func IsResponseMsg(msgt pb.MessageType) bool {
-	return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp
-}
-
-// voteResponseType maps vote and prevote message types to their corresponding responses.
-func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
-	switch msgt {
-	case pb.MsgVote:
-		return pb.MsgVoteResp
-	case pb.MsgPreVote:
-		return pb.MsgPreVoteResp
-	default:
-		panic(fmt.Sprintf("not a vote message: %s", msgt))
-	}
-}
-
-// EntryFormatter can be implemented by the application to provide human-readable formatting
-// of entry data. Nil is a valid EntryFormatter and will use a default format.
-type EntryFormatter func([]byte) string
-
-// DescribeMessage returns a concise human-readable description of a
-// Message for debugging.
-func DescribeMessage(m pb.Message, f EntryFormatter) string {
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
-	if m.Reject {
-		fmt.Fprintf(&buf, " Rejected")
-		if m.RejectHint != 0 {
-			fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint)
-		}
-	}
-	if m.Commit != 0 {
-		fmt.Fprintf(&buf, " Commit:%d", m.Commit)
-	}
-	if len(m.Entries) > 0 {
-		fmt.Fprintf(&buf, " Entries:[")
-		for i, e := range m.Entries {
-			if i != 0 {
-				buf.WriteString(", ")
-			}
-			buf.WriteString(DescribeEntry(e, f))
-		}
-		fmt.Fprintf(&buf, "]")
-	}
-	if !IsEmptySnap(m.Snapshot) {
-		fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot)
-	}
-	return buf.String()
-}
-
-// DescribeEntry returns a concise human-readable description of an
-// Entry for debugging.
-func DescribeEntry(e pb.Entry, f EntryFormatter) string {
-	var formatted string
-	if e.Type == pb.EntryNormal && f != nil {
-		formatted = f(e.Data)
-	} else {
-		formatted = fmt.Sprintf("%q", e.Data)
-	}
-	return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted)
-}
-
-func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
-	if len(ents) == 0 {
-		return ents
-	}
-	size := ents[0].Size()
-	var limit int
-	for limit = 1; limit < len(ents); limit++ {
-		size += ents[limit].Size()
-		if uint64(size) > maxSize {
-			break
-		}
-	}
-	return ents[:limit]
-}

+ 0 - 191
vendor/github.com/coreos/go-systemd/LICENSE

@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.

+ 0 - 5
vendor/github.com/coreos/go-systemd/NOTICE

@@ -1,5 +0,0 @@
-CoreOS Project
-Copyright 2018 CoreOS, Inc
-
-This product includes software developed at CoreOS, Inc.
-(http://www.coreos.com/).

+ 0 - 182
vendor/github.com/coreos/go-systemd/journal/journal.go

@@ -1,182 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package journal provides write bindings to the local systemd journal.
-// It is implemented in pure Go and connects to the journal directly over its
-// unix socket.
-//
-// To read from the journal, see the "sdjournal" package, which wraps the
-// sd-journal a C API.
-//
-// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
-package journal
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net"
-	"os"
-	"strconv"
-	"strings"
-	"syscall"
-)
-
-// Priority of a journal message
-type Priority int
-
-const (
-	PriEmerg Priority = iota
-	PriAlert
-	PriCrit
-	PriErr
-	PriWarning
-	PriNotice
-	PriInfo
-	PriDebug
-)
-
-var conn net.Conn
-
-func init() {
-	var err error
-	conn, err = net.Dial("unixgram", "/run/systemd/journal/socket")
-	if err != nil {
-		conn = nil
-	}
-}
-
-// Enabled returns true if the local systemd journal is available for logging
-func Enabled() bool {
-	return conn != nil
-}
-
-// Send a message to the local systemd journal. vars is a map of journald
-// fields to values.  Fields must be composed of uppercase letters, numbers,
-// and underscores, but must not start with an underscore. Within these
-// restrictions, any arbitrary field name may be used.  Some names have special
-// significance: see the journalctl documentation
-// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
-// for more details.  vars may be nil.
-func Send(message string, priority Priority, vars map[string]string) error {
-	if conn == nil {
-		return journalError("could not connect to journald socket")
-	}
-
-	data := new(bytes.Buffer)
-	appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
-	appendVariable(data, "MESSAGE", message)
-	for k, v := range vars {
-		appendVariable(data, k, v)
-	}
-
-	_, err := io.Copy(conn, data)
-	if err != nil && isSocketSpaceError(err) {
-		file, err := tempFd()
-		if err != nil {
-			return journalError(err.Error())
-		}
-		defer file.Close()
-		_, err = io.Copy(file, data)
-		if err != nil {
-			return journalError(err.Error())
-		}
-
-		rights := syscall.UnixRights(int(file.Fd()))
-
-		/* this connection should always be a UnixConn, but better safe than sorry */
-		unixConn, ok := conn.(*net.UnixConn)
-		if !ok {
-			return journalError("can't send file through non-Unix connection")
-		}
-		_, _, err = unixConn.WriteMsgUnix([]byte{}, rights, nil)
-		if err != nil {
-			return journalError(err.Error())
-		}
-	} else if err != nil {
-		return journalError(err.Error())
-	}
-	return nil
-}
-
-// Print prints a message to the local systemd journal using Send().
-func Print(priority Priority, format string, a ...interface{}) error {
-	return Send(fmt.Sprintf(format, a...), priority, nil)
-}
-
-func appendVariable(w io.Writer, name, value string) {
-	if !validVarName(name) {
-		journalError("variable name contains invalid character, ignoring")
-	}
-	if strings.ContainsRune(value, '\n') {
-		/* When the value contains a newline, we write:
-		 * - the variable name, followed by a newline
-		 * - the size (in 64bit little endian format)
-		 * - the data, followed by a newline
-		 */
-		fmt.Fprintln(w, name)
-		binary.Write(w, binary.LittleEndian, uint64(len(value)))
-		fmt.Fprintln(w, value)
-	} else {
-		/* just write the variable and value all on one line */
-		fmt.Fprintf(w, "%s=%s\n", name, value)
-	}
-}
-
-func validVarName(name string) bool {
-	/* The variable name must be in uppercase and consist only of characters,
-	 * numbers and underscores, and may not begin with an underscore. (from the docs)
-	 */
-
-	valid := name[0] != '_'
-	for _, c := range name {
-		valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_'
-	}
-	return valid
-}
-
-func isSocketSpaceError(err error) bool {
-	opErr, ok := err.(*net.OpError)
-	if !ok {
-		return false
-	}
-
-	sysErr, ok := opErr.Err.(syscall.Errno)
-	if !ok {
-		return false
-	}
-
-	return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS
-}
-
-func tempFd() (*os.File, error) {
-	file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
-	if err != nil {
-		return nil, err
-	}
-	err = syscall.Unlink(file.Name())
-	if err != nil {
-		return nil, err
-	}
-	return file, nil
-}
-
-func journalError(s string) error {
-	s = "journal error: " + s
-	fmt.Fprintln(os.Stderr, s)
-	return errors.New(s)
-}

+ 0 - 5
vendor/github.com/coreos/pkg/NOTICE

@@ -1,5 +0,0 @@
-CoreOS Project
-Copyright 2014 CoreOS, Inc
-
-This product includes software developed at CoreOS, Inc.
-(http://www.coreos.com/).

+ 0 - 39
vendor/github.com/coreos/pkg/capnslog/README.md

@@ -1,39 +0,0 @@
-# capnslog, the CoreOS logging package
-
-There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
-capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
-
-### Design Principles
-
-##### `package main` is the place where logging gets turned on and routed
-
-A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
-
-##### All log options are runtime-configurable. 
-
-Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. 
-
-##### There is one log object per package. It is registered under its repository and package name.
-
-`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
-
-##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
-
-Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
-
-Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application.
-
-##### Log objects are an interface
-
-An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
-
-##### Log levels have specific meanings:
-
-  * Critical: Unrecoverable. Must fail.
-  * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
-  * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
-  * Notice: Normal, but important (uncommon) log information.
-  * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
-  * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
-  * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.
-

+ 0 - 157
vendor/github.com/coreos/pkg/capnslog/formatters.go

@@ -1,157 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"bufio"
-	"fmt"
-	"io"
-	"log"
-	"runtime"
-	"strings"
-	"time"
-)
-
-type Formatter interface {
-	Format(pkg string, level LogLevel, depth int, entries ...interface{})
-	Flush()
-}
-
-func NewStringFormatter(w io.Writer) Formatter {
-	return &StringFormatter{
-		w: bufio.NewWriter(w),
-	}
-}
-
-type StringFormatter struct {
-	w *bufio.Writer
-}
-
-func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
-	now := time.Now().UTC()
-	s.w.WriteString(now.Format(time.RFC3339))
-	s.w.WriteByte(' ')
-	writeEntries(s.w, pkg, l, i, entries...)
-	s.Flush()
-}
-
-func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
-	if pkg != "" {
-		w.WriteString(pkg + ": ")
-	}
-	str := fmt.Sprint(entries...)
-	endsInNL := strings.HasSuffix(str, "\n")
-	w.WriteString(str)
-	if !endsInNL {
-		w.WriteString("\n")
-	}
-}
-
-func (s *StringFormatter) Flush() {
-	s.w.Flush()
-}
-
-func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
-	return &PrettyFormatter{
-		w:     bufio.NewWriter(w),
-		debug: debug,
-	}
-}
-
-type PrettyFormatter struct {
-	w     *bufio.Writer
-	debug bool
-}
-
-func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
-	now := time.Now()
-	ts := now.Format("2006-01-02 15:04:05")
-	c.w.WriteString(ts)
-	ms := now.Nanosecond() / 1000
-	c.w.WriteString(fmt.Sprintf(".%06d", ms))
-	if c.debug {
-		_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
-		if !ok {
-			file = "???"
-			line = 1
-		} else {
-			slash := strings.LastIndex(file, "/")
-			if slash >= 0 {
-				file = file[slash+1:]
-			}
-		}
-		if line < 0 {
-			line = 0 // not a real line number
-		}
-		c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
-	}
-	c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
-	writeEntries(c.w, pkg, l, depth, entries...)
-	c.Flush()
-}
-
-func (c *PrettyFormatter) Flush() {
-	c.w.Flush()
-}
-
-// LogFormatter emulates the form of the traditional built-in logger.
-type LogFormatter struct {
-	logger *log.Logger
-	prefix string
-}
-
-// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the
-// golang log package to actually do the logging work so that logs look similar.
-func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter {
-	return &LogFormatter{
-		logger: log.New(w, "", flag), // don't use prefix here
-		prefix: prefix,               // save it instead
-	}
-}
-
-// Format builds a log message for the LogFormatter. The LogLevel is ignored.
-func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) {
-	str := fmt.Sprint(entries...)
-	prefix := lf.prefix
-	if pkg != "" {
-		prefix = fmt.Sprintf("%s%s: ", prefix, pkg)
-	}
-	lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5
-}
-
-// Flush is included so that the interface is complete, but is a no-op.
-func (lf *LogFormatter) Flush() {
-	// noop
-}
-
-// NilFormatter is a no-op log formatter that does nothing.
-type NilFormatter struct {
-}
-
-// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no
-// messages so that you can cause part of your logging to be silent.
-func NewNilFormatter() Formatter {
-	return &NilFormatter{}
-}
-
-// Format does nothing.
-func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) {
-	// noop
-}
-
-// Flush is included so that the interface is complete, but is a no-op.
-func (_ *NilFormatter) Flush() {
-	// noop
-}

+ 0 - 96
vendor/github.com/coreos/pkg/capnslog/glog_formatter.go

@@ -1,96 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"bufio"
-	"bytes"
-	"io"
-	"os"
-	"runtime"
-	"strconv"
-	"strings"
-	"time"
-)
-
-var pid = os.Getpid()
-
-type GlogFormatter struct {
-	StringFormatter
-}
-
-func NewGlogFormatter(w io.Writer) *GlogFormatter {
-	g := &GlogFormatter{}
-	g.w = bufio.NewWriter(w)
-	return g
-}
-
-func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
-	g.w.Write(GlogHeader(level, depth+1))
-	g.StringFormatter.Format(pkg, level, depth+1, entries...)
-}
-
-func GlogHeader(level LogLevel, depth int) []byte {
-	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
-	now := time.Now().UTC()
-	_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
-	if !ok {
-		file = "???"
-		line = 1
-	} else {
-		slash := strings.LastIndex(file, "/")
-		if slash >= 0 {
-			file = file[slash+1:]
-		}
-	}
-	if line < 0 {
-		line = 0 // not a real line number
-	}
-	buf := &bytes.Buffer{}
-	buf.Grow(30)
-	_, month, day := now.Date()
-	hour, minute, second := now.Clock()
-	buf.WriteString(level.Char())
-	twoDigits(buf, int(month))
-	twoDigits(buf, day)
-	buf.WriteByte(' ')
-	twoDigits(buf, hour)
-	buf.WriteByte(':')
-	twoDigits(buf, minute)
-	buf.WriteByte(':')
-	twoDigits(buf, second)
-	buf.WriteByte('.')
-	buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
-	buf.WriteByte('Z')
-	buf.WriteByte(' ')
-	buf.WriteString(strconv.Itoa(pid))
-	buf.WriteByte(' ')
-	buf.WriteString(file)
-	buf.WriteByte(':')
-	buf.WriteString(strconv.Itoa(line))
-	buf.WriteByte(']')
-	buf.WriteByte(' ')
-	return buf.Bytes()
-}
-
-const digits = "0123456789"
-
-func twoDigits(b *bytes.Buffer, d int) {
-	c2 := digits[d%10]
-	d /= 10
-	c1 := digits[d%10]
-	b.WriteByte(c1)
-	b.WriteByte(c2)
-}

+ 0 - 49
vendor/github.com/coreos/pkg/capnslog/init.go

@@ -1,49 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// +build !windows
-
-package capnslog
-
-import (
-	"io"
-	"os"
-	"syscall"
-)
-
-// Here's where the opinionation comes in. We need some sensible defaults,
-// especially after taking over the log package. Your project (whatever it may
-// be) may see things differently. That's okay; there should be no defaults in
-// the main package that cannot be controlled or overridden programatically,
-// otherwise it's a bug. Doing so is creating your own init_log.go file much
-// like this one.
-
-func init() {
-	initHijack()
-
-	// Go `log` pacakge uses os.Stderr.
-	SetFormatter(NewDefaultFormatter(os.Stderr))
-	SetGlobalLogLevel(INFO)
-}
-
-func NewDefaultFormatter(out io.Writer) Formatter {
-	if syscall.Getppid() == 1 {
-		// We're running under init, which may be systemd.
-		f, err := NewJournaldFormatter()
-		if err == nil {
-			return f
-		}
-	}
-	return NewPrettyFormatter(out, false)
-}

+ 0 - 68
vendor/github.com/coreos/pkg/capnslog/journald_formatter.go

@@ -1,68 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// +build !windows
-
-package capnslog
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	"path/filepath"
-
-	"github.com/coreos/go-systemd/journal"
-)
-
-func NewJournaldFormatter() (Formatter, error) {
-	if !journal.Enabled() {
-		return nil, errors.New("No systemd detected")
-	}
-	return &journaldFormatter{}, nil
-}
-
-type journaldFormatter struct{}
-
-func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
-	var pri journal.Priority
-	switch l {
-	case CRITICAL:
-		pri = journal.PriCrit
-	case ERROR:
-		pri = journal.PriErr
-	case WARNING:
-		pri = journal.PriWarning
-	case NOTICE:
-		pri = journal.PriNotice
-	case INFO:
-		pri = journal.PriInfo
-	case DEBUG:
-		pri = journal.PriDebug
-	case TRACE:
-		pri = journal.PriDebug
-	default:
-		panic("Unhandled loglevel")
-	}
-	msg := fmt.Sprint(entries...)
-	tags := map[string]string{
-		"PACKAGE":           pkg,
-		"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
-	}
-	err := journal.Send(msg, pri, tags)
-	if err != nil {
-		fmt.Fprintln(os.Stderr, err)
-	}
-}
-
-func (j *journaldFormatter) Flush() {}

+ 0 - 39
vendor/github.com/coreos/pkg/capnslog/log_hijack.go

@@ -1,39 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"log"
-)
-
-func initHijack() {
-	pkg := NewPackageLogger("log", "")
-	w := packageWriter{pkg}
-	log.SetFlags(0)
-	log.SetPrefix("")
-	log.SetOutput(w)
-}
-
-type packageWriter struct {
-	pl *PackageLogger
-}
-
-func (p packageWriter) Write(b []byte) (int, error) {
-	if p.pl.level < INFO {
-		return 0, nil
-	}
-	p.pl.internalLog(calldepth+2, INFO, string(b))
-	return len(b), nil
-}

+ 0 - 245
vendor/github.com/coreos/pkg/capnslog/logmap.go

@@ -1,245 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"errors"
-	"strings"
-	"sync"
-)
-
-// LogLevel is the set of all log levels.
-type LogLevel int8
-
-const (
-	// CRITICAL is the lowest log level; only errors which will end the program will be propagated.
-	CRITICAL LogLevel = iota - 1
-	// ERROR is for errors that are not fatal but lead to troubling behavior.
-	ERROR
-	// WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
-	WARNING
-	// NOTICE is for normal but significant conditions.
-	NOTICE
-	// INFO is a log level for common, everyday log updates.
-	INFO
-	// DEBUG is the default hidden level for more verbose updates about internal processes.
-	DEBUG
-	// TRACE is for (potentially) call by call tracing of programs.
-	TRACE
-)
-
-// Char returns a single-character representation of the log level.
-func (l LogLevel) Char() string {
-	switch l {
-	case CRITICAL:
-		return "C"
-	case ERROR:
-		return "E"
-	case WARNING:
-		return "W"
-	case NOTICE:
-		return "N"
-	case INFO:
-		return "I"
-	case DEBUG:
-		return "D"
-	case TRACE:
-		return "T"
-	default:
-		panic("Unhandled loglevel")
-	}
-}
-
-// String returns a multi-character representation of the log level.
-func (l LogLevel) String() string {
-	switch l {
-	case CRITICAL:
-		return "CRITICAL"
-	case ERROR:
-		return "ERROR"
-	case WARNING:
-		return "WARNING"
-	case NOTICE:
-		return "NOTICE"
-	case INFO:
-		return "INFO"
-	case DEBUG:
-		return "DEBUG"
-	case TRACE:
-		return "TRACE"
-	default:
-		panic("Unhandled loglevel")
-	}
-}
-
-// Update using the given string value. Fulfills the flag.Value interface.
-func (l *LogLevel) Set(s string) error {
-	value, err := ParseLevel(s)
-	if err != nil {
-		return err
-	}
-
-	*l = value
-	return nil
-}
-
-// Returns an empty string, only here to fulfill the pflag.Value interface.
-func (l *LogLevel) Type() string {
-	return ""
-}
-
-// ParseLevel translates some potential loglevel strings into their corresponding levels.
-func ParseLevel(s string) (LogLevel, error) {
-	switch s {
-	case "CRITICAL", "C":
-		return CRITICAL, nil
-	case "ERROR", "0", "E":
-		return ERROR, nil
-	case "WARNING", "1", "W":
-		return WARNING, nil
-	case "NOTICE", "2", "N":
-		return NOTICE, nil
-	case "INFO", "3", "I":
-		return INFO, nil
-	case "DEBUG", "4", "D":
-		return DEBUG, nil
-	case "TRACE", "5", "T":
-		return TRACE, nil
-	}
-	return CRITICAL, errors.New("couldn't parse log level " + s)
-}
-
-type RepoLogger map[string]*PackageLogger
-
-type loggerStruct struct {
-	sync.Mutex
-	repoMap   map[string]RepoLogger
-	formatter Formatter
-}
-
-// logger is the global logger
-var logger = new(loggerStruct)
-
-// SetGlobalLogLevel sets the log level for all packages in all repositories
-// registered with capnslog.
-func SetGlobalLogLevel(l LogLevel) {
-	logger.Lock()
-	defer logger.Unlock()
-	for _, r := range logger.repoMap {
-		r.setRepoLogLevelInternal(l)
-	}
-}
-
-// GetRepoLogger may return the handle to the repository's set of packages' loggers.
-func GetRepoLogger(repo string) (RepoLogger, error) {
-	logger.Lock()
-	defer logger.Unlock()
-	r, ok := logger.repoMap[repo]
-	if !ok {
-		return nil, errors.New("no packages registered for repo " + repo)
-	}
-	return r, nil
-}
-
-// MustRepoLogger returns the handle to the repository's packages' loggers.
-func MustRepoLogger(repo string) RepoLogger {
-	r, err := GetRepoLogger(repo)
-	if err != nil {
-		panic(err)
-	}
-	return r
-}
-
-// SetRepoLogLevel sets the log level for all packages in the repository.
-func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
-	logger.Lock()
-	defer logger.Unlock()
-	r.setRepoLogLevelInternal(l)
-}
-
-func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
-	for _, v := range r {
-		v.level = l
-	}
-}
-
-// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
-// order, and returns a map of the results, for use in SetLogLevel.
-func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
-	setlist := strings.Split(conf, ",")
-	out := make(map[string]LogLevel)
-	for _, setstring := range setlist {
-		setting := strings.Split(setstring, "=")
-		if len(setting) != 2 {
-			return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
-		}
-		l, err := ParseLevel(setting[1])
-		if err != nil {
-			return nil, err
-		}
-		out[setting[0]] = l
-	}
-	return out, nil
-}
-
-// SetLogLevel takes a map of package names within a repository to their desired
-// loglevel, and sets the levels appropriately. Unknown packages are ignored.
-// "*" is a special package name that corresponds to all packages, and will be
-// processed first.
-func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
-	logger.Lock()
-	defer logger.Unlock()
-	if l, ok := m["*"]; ok {
-		r.setRepoLogLevelInternal(l)
-	}
-	for k, v := range m {
-		l, ok := r[k]
-		if !ok {
-			continue
-		}
-		l.level = v
-	}
-}
-
-// SetFormatter sets the formatting function for all logs.
-func SetFormatter(f Formatter) {
-	logger.Lock()
-	defer logger.Unlock()
-	logger.formatter = f
-}
-
-// NewPackageLogger creates a package logger object.
-// This should be defined as a global var in your package, referencing your repo.
-func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
-	logger.Lock()
-	defer logger.Unlock()
-	if logger.repoMap == nil {
-		logger.repoMap = make(map[string]RepoLogger)
-	}
-	r, rok := logger.repoMap[repo]
-	if !rok {
-		logger.repoMap[repo] = make(RepoLogger)
-		r = logger.repoMap[repo]
-	}
-	p, pok := r[pkg]
-	if !pok {
-		r[pkg] = &PackageLogger{
-			pkg:   pkg,
-			level: INFO,
-		}
-		p = r[pkg]
-	}
-	return
-}

+ 0 - 191
vendor/github.com/coreos/pkg/capnslog/pkg_logger.go

@@ -1,191 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"fmt"
-	"os"
-)
-
-type PackageLogger struct {
-	pkg   string
-	level LogLevel
-}
-
-const calldepth = 2
-
-func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
-	logger.Lock()
-	defer logger.Unlock()
-	if inLevel != CRITICAL && p.level < inLevel {
-		return
-	}
-	if logger.formatter != nil {
-		logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
-	}
-}
-
-// SetLevel allows users to change the current logging level.
-func (p *PackageLogger) SetLevel(l LogLevel) {
-	logger.Lock()
-	defer logger.Unlock()
-	p.level = l
-}
-
-// LevelAt checks if the given log level will be outputted under current setting.
-func (p *PackageLogger) LevelAt(l LogLevel) bool {
-	logger.Lock()
-	defer logger.Unlock()
-	return p.level >= l
-}
-
-// Log a formatted string at any level between ERROR and TRACE
-func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
-	p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
-}
-
-// Log a message at any level between ERROR and TRACE
-func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
-	p.internalLog(calldepth, l, fmt.Sprint(args...))
-}
-
-// log stdlib compatibility
-
-func (p *PackageLogger) Println(args ...interface{}) {
-	p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
-}
-
-func (p *PackageLogger) Printf(format string, args ...interface{}) {
-	p.Logf(INFO, format, args...)
-}
-
-func (p *PackageLogger) Print(args ...interface{}) {
-	p.internalLog(calldepth, INFO, fmt.Sprint(args...))
-}
-
-// Panic and fatal
-
-func (p *PackageLogger) Panicf(format string, args ...interface{}) {
-	s := fmt.Sprintf(format, args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	panic(s)
-}
-
-func (p *PackageLogger) Panic(args ...interface{}) {
-	s := fmt.Sprint(args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	panic(s)
-}
-
-func (p *PackageLogger) Panicln(args ...interface{}) {
-	s := fmt.Sprintln(args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	panic(s)
-}
-
-func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
-	p.Logf(CRITICAL, format, args...)
-	os.Exit(1)
-}
-
-func (p *PackageLogger) Fatal(args ...interface{}) {
-	s := fmt.Sprint(args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	os.Exit(1)
-}
-
-func (p *PackageLogger) Fatalln(args ...interface{}) {
-	s := fmt.Sprintln(args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	os.Exit(1)
-}
-
-// Error Functions
-
-func (p *PackageLogger) Errorf(format string, args ...interface{}) {
-	p.Logf(ERROR, format, args...)
-}
-
-func (p *PackageLogger) Error(entries ...interface{}) {
-	p.internalLog(calldepth, ERROR, entries...)
-}
-
-// Warning Functions
-
-func (p *PackageLogger) Warningf(format string, args ...interface{}) {
-	p.Logf(WARNING, format, args...)
-}
-
-func (p *PackageLogger) Warning(entries ...interface{}) {
-	p.internalLog(calldepth, WARNING, entries...)
-}
-
-// Notice Functions
-
-func (p *PackageLogger) Noticef(format string, args ...interface{}) {
-	p.Logf(NOTICE, format, args...)
-}
-
-func (p *PackageLogger) Notice(entries ...interface{}) {
-	p.internalLog(calldepth, NOTICE, entries...)
-}
-
-// Info Functions
-
-func (p *PackageLogger) Infof(format string, args ...interface{}) {
-	p.Logf(INFO, format, args...)
-}
-
-func (p *PackageLogger) Info(entries ...interface{}) {
-	p.internalLog(calldepth, INFO, entries...)
-}
-
-// Debug Functions
-
-func (p *PackageLogger) Debugf(format string, args ...interface{}) {
-	if p.level < DEBUG {
-		return
-	}
-	p.Logf(DEBUG, format, args...)
-}
-
-func (p *PackageLogger) Debug(entries ...interface{}) {
-	if p.level < DEBUG {
-		return
-	}
-	p.internalLog(calldepth, DEBUG, entries...)
-}
-
-// Trace Functions
-
-func (p *PackageLogger) Tracef(format string, args ...interface{}) {
-	if p.level < TRACE {
-		return
-	}
-	p.Logf(TRACE, format, args...)
-}
-
-func (p *PackageLogger) Trace(entries ...interface{}) {
-	if p.level < TRACE {
-		return
-	}
-	p.internalLog(calldepth, TRACE, entries...)
-}
-
-func (p *PackageLogger) Flush() {
-	logger.Lock()
-	defer logger.Unlock()
-	logger.formatter.Flush()
-}

+ 0 - 65
vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go

@@ -1,65 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// +build !windows
-
-package capnslog
-
-import (
-	"fmt"
-	"log/syslog"
-)
-
-func NewSyslogFormatter(w *syslog.Writer) Formatter {
-	return &syslogFormatter{w}
-}
-
-func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
-	w, err := syslog.New(syslog.LOG_DEBUG, tag)
-	if err != nil {
-		return nil, err
-	}
-	return NewSyslogFormatter(w), nil
-}
-
-type syslogFormatter struct {
-	w *syslog.Writer
-}
-
-func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
-	for _, entry := range entries {
-		str := fmt.Sprint(entry)
-		switch l {
-		case CRITICAL:
-			s.w.Crit(str)
-		case ERROR:
-			s.w.Err(str)
-		case WARNING:
-			s.w.Warning(str)
-		case NOTICE:
-			s.w.Notice(str)
-		case INFO:
-			s.w.Info(str)
-		case DEBUG:
-			s.w.Debug(str)
-		case TRACE:
-			s.w.Debug(str)
-		default:
-			panic("Unhandled loglevel")
-		}
-	}
-}
-
-func (s *syslogFormatter) Flush() {
-}

+ 35 - 1
vendor/github.com/docker/swarmkit/agent/agent.go

@@ -543,6 +543,40 @@ func (a *Agent) UpdateTaskStatus(ctx context.Context, taskID string, status *api
 	}
 }
 
+// ReportVolumeUnpublished sends a Volume status update to the manager
+// indicating that the provided volume has been successfully unpublished.
+func (a *Agent) ReportVolumeUnpublished(ctx context.Context, volumeID string) error {
+	l := log.G(ctx).WithField("volume.ID", volumeID)
+	l.Debug("(*Agent).ReportVolumeUnpublished")
+	ctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+
+	errs := make(chan error, 1)
+	if err := a.withSession(ctx, func(session *session) error {
+		go func() {
+			err := session.reportVolumeUnpublished(ctx, []string{volumeID})
+			if err != nil {
+				l.WithError(err).Error("error reporting volume unpublished")
+			} else {
+				l.Debug("reported volume unpublished")
+			}
+
+			errs <- err
+		}()
+
+		return nil
+	}); err != nil {
+		return err
+	}
+
+	select {
+	case err := <-errs:
+		return err
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+}
+
 // Publisher returns a LogPublisher for the given subscription
 // as well as a cancel function that should be called when the log stream
 // is completed.
@@ -597,8 +631,8 @@ func (a *Agent) Publisher(ctx context.Context, subscriptionID string) (exec.LogP
 func (a *Agent) nodeDescriptionWithHostname(ctx context.Context, tlsInfo *api.NodeTLSInfo) (*api.NodeDescription, error) {
 	desc, err := a.config.Executor.Describe(ctx)
 
-	// Override hostname and TLS info
 	if desc != nil {
+		// Override hostname and TLS info
 		if a.config.Hostname != "" {
 			desc.Hostname = a.config.Hostname
 		}

+ 120 - 0
vendor/github.com/docker/swarmkit/agent/csi/plugin/manager.go

@@ -0,0 +1,120 @@
+package plugin
+
+import (
+	"context"
+	"fmt"
+	"sync"
+
+	"github.com/docker/docker/pkg/plugingetter"
+
+	"github.com/docker/swarmkit/api"
+)
+
+const (
+	// DockerCSIPluginCap is the capability name of the plugins we use with the
+	// PluginGetter to get only the plugins we need. The full name of the
+	// plugin interface is "docker.csinode/1.0". This gets only plugins with
+	// Node capabilities.
+	DockerCSIPluginCap = "csinode"
+)
+
+// PluginManager manages the multiple CSI plugins that may be in use on the
+// node. PluginManager should be thread-safe.
+type PluginManager interface {
+	// Get gets the plugin with the given name
+	Get(name string) (NodePlugin, error)
+
+	// NodeInfo returns the NodeCSIInfo for every active plugin.
+	NodeInfo(ctx context.Context) ([]*api.NodeCSIInfo, error)
+}
+
+type pluginManager struct {
+	plugins   map[string]NodePlugin
+	pluginsMu sync.Mutex
+
+	// newNodePluginFunc usually points to NewNodePlugin. However, for testing,
+	// NewNodePlugin can be swapped out with a function that creates fake node
+	// plugins
+	newNodePluginFunc func(string, plugingetter.CompatPlugin, plugingetter.PluginAddr, SecretGetter) NodePlugin
+
+	// secrets is a SecretGetter for use by node plugins.
+	secrets SecretGetter
+
+	pg plugingetter.PluginGetter
+}
+
+func NewPluginManager(pg plugingetter.PluginGetter, secrets SecretGetter) PluginManager {
+	return &pluginManager{
+		plugins:           map[string]NodePlugin{},
+		newNodePluginFunc: NewNodePlugin,
+		secrets:           secrets,
+		pg:                pg,
+	}
+}
+
+func (pm *pluginManager) Get(name string) (NodePlugin, error) {
+	pm.pluginsMu.Lock()
+	defer pm.pluginsMu.Unlock()
+
+	plugin, err := pm.getPlugin(name)
+	if err != nil {
+		return nil, fmt.Errorf("cannot get plugin %v: %v", name, err)
+	}
+
+	return plugin, nil
+}
+
+func (pm *pluginManager) NodeInfo(ctx context.Context) ([]*api.NodeCSIInfo, error) {
+	// TODO(dperny): do not acquire this lock for the duration of the the
+	// function call. that's too long and too blocking.
+	pm.pluginsMu.Lock()
+	defer pm.pluginsMu.Unlock()
+
+	// first, we should make sure all of the plugins are initialized. do this
+	// by looking up all the current plugins with DockerCSIPluginCap.
+	plugins := pm.pg.GetAllManagedPluginsByCap(DockerCSIPluginCap)
+	for _, plugin := range plugins {
+		// TODO(dperny): use this opportunity to drop plugins that we're
+		// tracking but which no longer exist.
+
+		// we don't actually need the plugin returned, we just need it loaded
+		// as a side effect.
+		pm.getPlugin(plugin.Name())
+	}
+
+	nodeInfo := []*api.NodeCSIInfo{}
+	for _, plugin := range pm.plugins {
+		info, err := plugin.NodeGetInfo(ctx)
+		if err != nil {
+			// skip any plugin that returns an error
+			continue
+		}
+
+		nodeInfo = append(nodeInfo, info)
+	}
+	return nodeInfo, nil
+}
+
+// getPlugin looks up the plugin with the specified name. Loads the plugin if
+// not yet loaded.
+//
+// pm.pluginsMu must be obtained before calling this method.
+func (pm *pluginManager) getPlugin(name string) (NodePlugin, error) {
+	if p, ok := pm.plugins[name]; ok {
+		return p, nil
+	}
+
+	pc, err := pm.pg.Get(name, DockerCSIPluginCap, plugingetter.Lookup)
+	if err != nil {
+		return nil, err
+	}
+
+	pa, ok := pc.(plugingetter.PluginAddr)
+	if !ok {
+		return nil, fmt.Errorf("plugin does not implement PluginAddr interface")
+	}
+
+	p := pm.newNodePluginFunc(name, pc, pa, pm.secrets)
+	pm.plugins[name] = p
+	return p, nil
+}

+ 459 - 0
vendor/github.com/docker/swarmkit/agent/csi/plugin/plugin.go

@@ -0,0 +1,459 @@
+package plugin
+
+import (
+	"context"
+	"fmt"
+	"path/filepath"
+	"sync"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+
+	"github.com/container-storage-interface/spec/lib/go/csi"
+	"github.com/docker/docker/pkg/plugingetter"
+	"github.com/docker/swarmkit/api"
+	"github.com/docker/swarmkit/log"
+)
+
+// SecretGetter is a reimplementation of the exec.SecretGetter interface in the
+// scope of the plugin package. This avoids the needing to import exec into the
+// plugin package.
+type SecretGetter interface {
+	Get(secretID string) (*api.Secret, error)
+}
+
+type NodePlugin interface {
+	GetPublishedPath(volumeID string) string
+	NodeGetInfo(ctx context.Context) (*api.NodeCSIInfo, error)
+	NodeStageVolume(ctx context.Context, req *api.VolumeAssignment) error
+	NodeUnstageVolume(ctx context.Context, req *api.VolumeAssignment) error
+	NodePublishVolume(ctx context.Context, req *api.VolumeAssignment) error
+	NodeUnpublishVolume(ctx context.Context, req *api.VolumeAssignment) error
+}
+
+type volumePublishStatus struct {
+	// stagingPath is staging path of volume
+	stagingPath string
+
+	// isPublished keeps track if the volume is published.
+	isPublished bool
+
+	// publishedPath is published path of volume
+	publishedPath string
+}
+
+type nodePlugin struct {
+	// name is the name of the plugin, which is used in the Driver.Name field.
+	name string
+
+	// socket is the path of the unix socket to connect to this plugin at
+	socket string
+
+	// scopePath gets the provided path relative to the plugin directory.
+	scopePath func(s string) string
+
+	// secrets is the SecretGetter to get volume secret data
+	secrets SecretGetter
+
+	// volumeMap is the map from volume ID to Volume. Will place a volume once it is staged,
+	// remove it from the map for unstage.
+	// TODO: Make this map persistent if the swarm node goes down
+	volumeMap map[string]*volumePublishStatus
+
+	// mu for volumeMap
+	mu sync.RWMutex
+
+	// staging indicates that the plugin has staging capabilities.
+	staging bool
+
+	// cc is the gRPC client connection
+	cc *grpc.ClientConn
+
+	// idClient is the CSI Identity Service client
+	idClient csi.IdentityClient
+
+	// nodeClient is the CSI Node Service client
+	nodeClient csi.NodeClient
+}
+
+const (
+	// TargetStagePath is the path within the plugin's scope that the volume is
+	// to be staged. This does not need to be accessible or propagated outside
+	// of the plugin rootfs.
+	TargetStagePath string = "/data/staged"
+	// TargetPublishPath is the path within the plugin's scope that the volume
+	// is to be published. This needs to be the plugin's PropagatedMount.
+	TargetPublishPath string = "/data/published"
+)
+
+func NewNodePlugin(name string, pc plugingetter.CompatPlugin, pa plugingetter.PluginAddr, secrets SecretGetter) NodePlugin {
+	return newNodePlugin(name, pc, pa, secrets)
+}
+
+// newNodePlugin returns a raw nodePlugin object, not behind an interface. this
+// is useful for testing.
+func newNodePlugin(name string, pc plugingetter.CompatPlugin, pa plugingetter.PluginAddr, secrets SecretGetter) *nodePlugin {
+	return &nodePlugin{
+		name:      name,
+		socket:    fmt.Sprintf("%s://%s", pa.Addr().Network(), pa.Addr().String()),
+		scopePath: pc.ScopedPath,
+		secrets:   secrets,
+		volumeMap: map[string]*volumePublishStatus{},
+	}
+}
+
+// connect is a private method that sets up the identity client and node
+// client from a grpc client. it exists separately so that testing code can
+// substitute in fake clients without a grpc connection
+func (np *nodePlugin) connect(ctx context.Context) error {
+	// even though this is a unix socket, we must set WithInsecure or the
+	// connection will not be allowed.
+	cc, err := grpc.DialContext(ctx, np.socket, grpc.WithInsecure())
+	if err != nil {
+		return err
+	}
+
+	np.cc = cc
+	// first, probe the plugin, to ensure that it exists and is ready to go
+	idc := csi.NewIdentityClient(cc)
+	np.idClient = idc
+
+	np.nodeClient = csi.NewNodeClient(cc)
+
+	return np.init(ctx)
+}
+
+func (np *nodePlugin) Client(ctx context.Context) (csi.NodeClient, error) {
+	if np.nodeClient == nil {
+		if err := np.connect(ctx); err != nil {
+			return nil, err
+		}
+	}
+	return np.nodeClient, nil
+}
+
+func (np *nodePlugin) init(ctx context.Context) error {
+	probe, err := np.idClient.Probe(ctx, &csi.ProbeRequest{})
+	if err != nil {
+		return err
+	}
+	if probe.Ready != nil && !probe.Ready.Value {
+		return status.Error(codes.FailedPrecondition, "Plugin is not Ready")
+	}
+
+	c, err := np.Client(ctx)
+	if err != nil {
+		return err
+	}
+
+	resp, err := c.NodeGetCapabilities(ctx, &csi.NodeGetCapabilitiesRequest{})
+	if err != nil {
+		// TODO(ameyag): handle
+		return err
+	}
+	if resp == nil {
+		return nil
+	}
+	log.G(ctx).Debugf("plugin advertises %d capabilities", len(resp.Capabilities))
+	for _, c := range resp.Capabilities {
+		if rpc := c.GetRpc(); rpc != nil {
+			log.G(ctx).Debugf("plugin has capability %s", rpc)
+			switch rpc.Type {
+			case csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME:
+				np.staging = true
+			}
+		}
+	}
+
+	return nil
+}
+
+// GetPublishedPath returns the path at which the provided volume ID is
+// published. This path is provided in terms of absolute location on the host,
+// not the location in the plugins' scope.
+//
+// Returns an empty string if the volume does not exist.
+func (np *nodePlugin) GetPublishedPath(volumeID string) string {
+	np.mu.RLock()
+	defer np.mu.RUnlock()
+	if volInfo, ok := np.volumeMap[volumeID]; ok {
+		if volInfo.isPublished {
+			return np.scopePath(volInfo.publishedPath)
+		}
+	}
+	return ""
+}
+
+func (np *nodePlugin) NodeGetInfo(ctx context.Context) (*api.NodeCSIInfo, error) {
+	c, err := np.Client(ctx)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := c.NodeGetInfo(ctx, &csi.NodeGetInfoRequest{})
+	if err != nil {
+		return nil, err
+	}
+
+	i := makeNodeInfo(resp)
+	i.PluginName = np.name
+	return i, nil
+}
+
+func (np *nodePlugin) NodeStageVolume(ctx context.Context, req *api.VolumeAssignment) error {
+	np.mu.Lock()
+	defer np.mu.Unlock()
+	if !np.staging {
+		return nil
+	}
+
+	stagingTarget := stagePath(req)
+
+	// Check arguments
+	if len(req.VolumeID) == 0 {
+		return status.Error(codes.InvalidArgument, "VolumeID missing in request")
+	}
+
+	c, err := np.Client(ctx)
+	if err != nil {
+		return err
+	}
+
+	_, err = c.NodeStageVolume(ctx, &csi.NodeStageVolumeRequest{
+		VolumeId:          req.VolumeID,
+		StagingTargetPath: stagingTarget,
+		Secrets:           np.makeSecrets(req),
+		VolumeCapability:  makeCapability(req.AccessMode),
+		VolumeContext:     req.VolumeContext,
+		PublishContext:    req.PublishContext,
+	})
+
+	if err != nil {
+		return err
+	}
+
+	v := &volumePublishStatus{
+		stagingPath: stagingTarget,
+	}
+
+	np.volumeMap[req.ID] = v
+
+	log.G(ctx).Infof("volume staged to path %s", stagingTarget)
+	return nil
+}
+
+func (np *nodePlugin) NodeUnstageVolume(ctx context.Context, req *api.VolumeAssignment) error {
+	np.mu.Lock()
+	defer np.mu.Unlock()
+	if !np.staging {
+		return nil
+	}
+
+	stagingTarget := stagePath(req)
+
+	// Check arguments
+	if len(req.VolumeID) == 0 {
+		return status.Error(codes.FailedPrecondition, "VolumeID missing in request")
+	}
+
+	c, err := np.Client(ctx)
+	if err != nil {
+		return err
+	}
+
+	// we must unpublish before we unstage. verify here that the volume is not
+	// published.
+	if v, ok := np.volumeMap[req.ID]; ok {
+		if v.isPublished {
+			return status.Errorf(codes.FailedPrecondition, "Volume %s is not unpublished", req.ID)
+		}
+		return nil
+	}
+
+	_, err = c.NodeUnstageVolume(ctx, &csi.NodeUnstageVolumeRequest{
+		VolumeId:          req.VolumeID,
+		StagingTargetPath: stagingTarget,
+	})
+	if err != nil {
+		return err
+	}
+
+	// if the volume doesn't exist in the volumeMap, deleting has no effect.
+	delete(np.volumeMap, req.ID)
+	log.G(ctx).Info("volume unstaged")
+
+	return nil
+}
+
+func (np *nodePlugin) NodePublishVolume(ctx context.Context, req *api.VolumeAssignment) error {
+	// Check arguments
+	if len(req.VolumeID) == 0 {
+		return status.Error(codes.InvalidArgument, "Volume ID missing in request")
+	}
+
+	np.mu.Lock()
+	defer np.mu.Unlock()
+
+	publishTarget := publishPath(req)
+
+	// some volumes do not require staging. we can check this by checkign the
+	// staging variable, or we can just see if there is a staging path in the
+	// map.
+	var stagingPath string
+	if vs, ok := np.volumeMap[req.ID]; ok {
+		stagingPath = vs.stagingPath
+	} else {
+		return status.Error(codes.FailedPrecondition, "volume not staged")
+	}
+
+	c, err := np.Client(ctx)
+	if err != nil {
+		return err
+	}
+
+	_, err = c.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{
+		VolumeId:          req.VolumeID,
+		TargetPath:        publishTarget,
+		StagingTargetPath: stagingPath,
+		VolumeCapability:  makeCapability(req.AccessMode),
+		Secrets:           np.makeSecrets(req),
+		VolumeContext:     req.VolumeContext,
+		PublishContext:    req.PublishContext,
+	})
+	if err != nil {
+		return err
+	}
+
+	status, ok := np.volumeMap[req.ID]
+	if !ok {
+		status = &volumePublishStatus{}
+		np.volumeMap[req.ID] = status
+	}
+
+	status.isPublished = true
+	status.publishedPath = publishTarget
+
+	log.G(ctx).Infof("volume published to path %s", publishTarget)
+
+	return nil
+}
+
+func (np *nodePlugin) NodeUnpublishVolume(ctx context.Context, req *api.VolumeAssignment) error {
+	// Check arguments
+	if len(req.VolumeID) == 0 {
+		return status.Error(codes.InvalidArgument, "Volume ID missing in request")
+	}
+
+	np.mu.Lock()
+	defer np.mu.Unlock()
+	publishTarget := publishPath(req)
+
+	c, err := np.Client(ctx)
+	if err != nil {
+		return err
+	}
+
+	_, err = c.NodeUnpublishVolume(ctx, &csi.NodeUnpublishVolumeRequest{
+		VolumeId:   req.VolumeID,
+		TargetPath: publishTarget,
+	})
+
+	if err != nil {
+		return err
+	}
+
+	if v, ok := np.volumeMap[req.ID]; ok {
+		v.publishedPath = ""
+		v.isPublished = false
+		return nil
+	}
+
+	log.G(ctx).Info("volume unpublished")
+	return nil
+}
+
+func (np *nodePlugin) makeSecrets(v *api.VolumeAssignment) map[string]string {
+	// this should never happen, but program defensively.
+	if v == nil {
+		return nil
+	}
+
+	secrets := make(map[string]string, len(v.Secrets))
+	for _, secret := range v.Secrets {
+		// TODO(dperny): handle error from Get
+		value, _ := np.secrets.Get(secret.Secret)
+		if value != nil {
+			secrets[secret.Key] = string(value.Spec.Data)
+		}
+	}
+
+	return secrets
+}
+
+// makeNodeInfo converts a csi.NodeGetInfoResponse object into a swarmkit NodeCSIInfo
+// object.
+func makeNodeInfo(csiNodeInfo *csi.NodeGetInfoResponse) *api.NodeCSIInfo {
+	return &api.NodeCSIInfo{
+		NodeID:            csiNodeInfo.NodeId,
+		MaxVolumesPerNode: csiNodeInfo.MaxVolumesPerNode,
+	}
+}
+
+func makeCapability(am *api.VolumeAccessMode) *csi.VolumeCapability {
+	var mode csi.VolumeCapability_AccessMode_Mode
+	switch am.Scope {
+	case api.VolumeScopeSingleNode:
+		switch am.Sharing {
+		case api.VolumeSharingNone, api.VolumeSharingOneWriter, api.VolumeSharingAll:
+			mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
+		case api.VolumeSharingReadOnly:
+			mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY
+		}
+	case api.VolumeScopeMultiNode:
+		switch am.Sharing {
+		case api.VolumeSharingReadOnly:
+			mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
+		case api.VolumeSharingOneWriter:
+			mode = csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER
+		case api.VolumeSharingAll:
+			mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
+		}
+	}
+
+	capability := &csi.VolumeCapability{
+		AccessMode: &csi.VolumeCapability_AccessMode{
+			Mode: mode,
+		},
+	}
+
+	if block := am.GetBlock(); block != nil {
+		capability.AccessType = &csi.VolumeCapability_Block{
+			// Block type is empty.
+			Block: &csi.VolumeCapability_BlockVolume{},
+		}
+	}
+
+	if mount := am.GetMount(); mount != nil {
+		capability.AccessType = &csi.VolumeCapability_Mount{
+			Mount: &csi.VolumeCapability_MountVolume{
+				FsType:     mount.FsType,
+				MountFlags: mount.MountFlags,
+			},
+		}
+	}
+	return capability
+}
+
+// stagePath returns the staging path for a given volume assignment
+func stagePath(v *api.VolumeAssignment) string {
+	// this really just exists so we use the same trick to determine staging
+	// path across multiple methods and can't forget to change it in one place
+	// but not another
+	return filepath.Join(TargetStagePath, v.ID)
+}
+
+// publishPath returns the publishing path for a given volume assignment
+func publishPath(v *api.VolumeAssignment) string {
+	// ditto as stagePath
+	return filepath.Join(TargetPublishPath, v.ID)
+}

+ 227 - 0
vendor/github.com/docker/swarmkit/agent/csi/volumes.go

@@ -0,0 +1,227 @@
+package csi
+
+import (
+	"context"
+	"fmt"
+	"sync"
+
+	"github.com/sirupsen/logrus"
+
+	"github.com/docker/docker/pkg/plugingetter"
+
+	"github.com/docker/swarmkit/agent/csi/plugin"
+	"github.com/docker/swarmkit/agent/exec"
+	"github.com/docker/swarmkit/api"
+	"github.com/docker/swarmkit/log"
+	"github.com/docker/swarmkit/volumequeue"
+)
+
+// volumeState keeps track of the state of a volume on this node.
+type volumeState struct {
+	// volume is the actual VolumeAssignment for this volume
+	volume *api.VolumeAssignment
+	// remove is true if the volume is to be removed, or false if it should be
+	// active.
+	remove bool
+	// removeCallback is called when the volume is successfully removed.
+	removeCallback func(id string)
+}
+
+// volumes is a map that keeps all the currently available volumes to the agent
+// mapped by volume ID.
+type volumes struct {
+	// mu guards access to the volumes map.
+	mu sync.RWMutex
+
+	// volumes is a mapping of volume ID to volumeState
+	volumes map[string]volumeState
+
+	// plugins is the PluginManager, which provides translation to the CSI RPCs
+	plugins plugin.PluginManager
+
+	// pendingVolumes is a VolumeQueue which manages which volumes are
+	// processed and when.
+	pendingVolumes *volumequeue.VolumeQueue
+}
+
+// NewManager returns a place to store volumes.
+func NewManager(pg plugingetter.PluginGetter, secrets exec.SecretGetter) exec.VolumesManager {
+	r := &volumes{
+		volumes:        map[string]volumeState{},
+		plugins:        plugin.NewPluginManager(pg, secrets),
+		pendingVolumes: volumequeue.NewVolumeQueue(),
+	}
+	go r.retryVolumes()
+
+	return r
+}
+
+// retryVolumes runs in a goroutine to retry failing volumes.
+func (r *volumes) retryVolumes() {
+	ctx := log.WithModule(context.Background(), "node/agent/csi")
+	for {
+		vid, attempt := r.pendingVolumes.Wait()
+
+		dctx := log.WithFields(ctx, logrus.Fields{
+			"volume.id": vid,
+			"attempt":   fmt.Sprintf("%d", attempt),
+		})
+
+		// this case occurs when the Stop method has been called on
+		// pendingVolumes, and means that we should pack up and exit.
+		if vid == "" && attempt == 0 {
+			break
+		}
+		r.tryVolume(dctx, vid, attempt)
+	}
+}
+
+// tryVolume synchronously tries one volume. it puts the volume back into the
+// queue if the attempt fails.
+func (r *volumes) tryVolume(ctx context.Context, id string, attempt uint) {
+	r.mu.RLock()
+	vs, ok := r.volumes[id]
+	r.mu.RUnlock()
+
+	if !ok {
+		return
+	}
+
+	if !vs.remove {
+		if err := r.publishVolume(ctx, vs.volume); err != nil {
+			log.G(ctx).WithError(err).Info("publishing volume failed")
+			r.pendingVolumes.Enqueue(id, attempt+1)
+		}
+	} else {
+		if err := r.unpublishVolume(ctx, vs.volume); err != nil {
+			log.G(ctx).WithError(err).Info("upublishing volume failed")
+			r.pendingVolumes.Enqueue(id, attempt+1)
+		} else {
+			// if unpublishing was successful, then call the callback
+			vs.removeCallback(id)
+		}
+	}
+}
+
+// Get returns a volume published path for the provided volume ID.  If the volume doesn't exist, returns empty string.
+func (r *volumes) Get(volumeID string) (string, error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if vs, ok := r.volumes[volumeID]; ok {
+		if vs.remove {
+			// TODO(dperny): use a structured error
+			return "", fmt.Errorf("volume being removed")
+		}
+
+		if p, err := r.plugins.Get(vs.volume.Driver.Name); err == nil {
+			path := p.GetPublishedPath(volumeID)
+			if path != "" {
+				return path, nil
+			}
+			// don't put this line here, it spams like crazy.
+			// log.L.WithField("method", "(*volumes).Get").Debugf("Path not published for volume:%v", volumeID)
+		} else {
+			return "", err
+		}
+
+	}
+	return "", fmt.Errorf("%w: published path is unavailable", exec.ErrDependencyNotReady)
+}
+
+// Add adds one or more volumes to the volume map.
+func (r *volumes) Add(volumes ...api.VolumeAssignment) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	for _, volume := range volumes {
+		// if we get an Add operation, then we will always restart the retries.
+		v := volume.Copy()
+		r.volumes[volume.ID] = volumeState{
+			volume: v,
+		}
+		// enqueue the volume so that we process it
+		r.pendingVolumes.Enqueue(volume.ID, 0)
+		log.L.WithField("method", "(*volumes).Add").Debugf("Add Volume: %v", volume.VolumeID)
+	}
+}
+
+// Remove removes one or more volumes from this manager. callback is called
+// whenever the removal is successful.
+func (r *volumes) Remove(volumes []api.VolumeAssignment, callback func(id string)) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	for _, volume := range volumes {
+		// if we get a Remove call, then we always restart the retries and
+		// attempt removal.
+		v := volume.Copy()
+		r.volumes[volume.ID] = volumeState{
+			volume:         v,
+			remove:         true,
+			removeCallback: callback,
+		}
+		r.pendingVolumes.Enqueue(volume.ID, 0)
+	}
+}
+
+func (r *volumes) publishVolume(ctx context.Context, assignment *api.VolumeAssignment) error {
+	log.G(ctx).Info("attempting to publish volume")
+	p, err := r.plugins.Get(assignment.Driver.Name)
+	if err != nil {
+		return err
+	}
+
+	// even though this may have succeeded already, the call to NodeStageVolume
+	// is idempotent, so we can retry it every time.
+	if err := p.NodeStageVolume(ctx, assignment); err != nil {
+		return err
+	}
+
+	log.G(ctx).Debug("staging volume succeeded, attempting to publish volume")
+
+	return p.NodePublishVolume(ctx, assignment)
+}
+
+func (r *volumes) unpublishVolume(ctx context.Context, assignment *api.VolumeAssignment) error {
+	log.G(ctx).Info("attempting to unpublish volume")
+	p, err := r.plugins.Get(assignment.Driver.Name)
+	if err != nil {
+		return err
+	}
+
+	if err := p.NodeUnpublishVolume(ctx, assignment); err != nil {
+		return err
+	}
+
+	return p.NodeUnstageVolume(ctx, assignment)
+}
+
+func (r *volumes) Plugins() exec.VolumePluginManager {
+	return r.plugins
+}
+
+// taskRestrictedVolumesProvider restricts the ids to the task.
+type taskRestrictedVolumesProvider struct {
+	volumes   exec.VolumeGetter
+	volumeIDs map[string]struct{}
+}
+
+func (sp *taskRestrictedVolumesProvider) Get(volumeID string) (string, error) {
+	if _, ok := sp.volumeIDs[volumeID]; !ok {
+		return "", fmt.Errorf("task not authorized to access volume %s", volumeID)
+	}
+
+	return sp.volumes.Get(volumeID)
+}
+
+// Restrict provides a getter that only allows access to the volumes
+// referenced by the task.
+func Restrict(volumes exec.VolumeGetter, t *api.Task) exec.VolumeGetter {
+	vids := map[string]struct{}{}
+
+	for _, v := range t.Volumes {
+		vids[v.ID] = struct{}{}
+	}
+
+	return &taskRestrictedVolumesProvider{volumes: volumes, volumeIDs: vids}
+}

+ 18 - 2
vendor/github.com/docker/swarmkit/agent/dependency.go

@@ -1,7 +1,10 @@
 package agent
 
 import (
+	"github.com/docker/docker/pkg/plugingetter"
+
 	"github.com/docker/swarmkit/agent/configs"
+	"github.com/docker/swarmkit/agent/csi"
 	"github.com/docker/swarmkit/agent/exec"
 	"github.com/docker/swarmkit/agent/secrets"
 	"github.com/docker/swarmkit/api"
@@ -10,15 +13,18 @@ import (
 type dependencyManager struct {
 	secrets exec.SecretsManager
 	configs exec.ConfigsManager
+	volumes exec.VolumesManager
 }
 
 // NewDependencyManager creates a dependency manager object that wraps
 // objects which provide access to various dependency types.
-func NewDependencyManager() exec.DependencyManager {
-	return &dependencyManager{
+func NewDependencyManager(pg plugingetter.PluginGetter) exec.DependencyManager {
+	d := &dependencyManager{
 		secrets: secrets.NewManager(),
 		configs: configs.NewManager(),
 	}
+	d.volumes = csi.NewManager(pg, d.secrets)
+	return d
 }
 
 func (d *dependencyManager) Secrets() exec.SecretsManager {
@@ -29,9 +35,14 @@ func (d *dependencyManager) Configs() exec.ConfigsManager {
 	return d.configs
 }
 
+func (d *dependencyManager) Volumes() exec.VolumesManager {
+	return d.volumes
+}
+
 type dependencyGetter struct {
 	secrets exec.SecretGetter
 	configs exec.ConfigGetter
+	volumes exec.VolumeGetter
 }
 
 func (d *dependencyGetter) Secrets() exec.SecretGetter {
@@ -42,11 +53,16 @@ func (d *dependencyGetter) Configs() exec.ConfigGetter {
 	return d.configs
 }
 
+func (d *dependencyGetter) Volumes() exec.VolumeGetter {
+	return d.volumes
+}
+
 // Restrict provides getters that only allows access to the dependencies
 // referenced by the task.
 func Restrict(dependencies exec.DependencyManager, t *api.Task) exec.DependencyGetter {
 	return &dependencyGetter{
 		secrets: secrets.Restrict(dependencies.Secrets(), t),
 		configs: configs.Restrict(dependencies.Configs(), t),
+		volumes: csi.Restrict(dependencies.Volumes(), t),
 	}
 }

+ 11 - 9
vendor/github.com/docker/swarmkit/agent/exec/errors.go

@@ -29,6 +29,11 @@ var (
 	// ErrTaskNoop returns when the a subsequent call to Do will not result in
 	// advancing the task. Callers should avoid calling Do until the task has been updated.
 	ErrTaskNoop = errors.New("exec: task noop")
+
+	// ErrDependencyNotReady is returned if a given dependency can be accessed
+	// through the Getter, but is not yet ready to be used. This is most
+	// relevant for Volumes, which must be staged and published on the node.
+	ErrDependencyNotReady error = errors.New("dependency not ready")
 )
 
 // ExitCoder is implemented by errors that have an exit code.
@@ -65,17 +70,14 @@ func (t temporary) Temporary() bool { return true }
 // IsTemporary returns true if the error or a recursive cause returns true for
 // temporary.
 func IsTemporary(err error) bool {
-	for err != nil {
-		if tmp, ok := err.(Temporary); ok && tmp.Temporary() {
-			return true
-		}
+	if tmp, ok := err.(Temporary); ok && tmp.Temporary() {
+		return true
+	}
 
-		cause := errors.Cause(err)
-		if cause == err {
-			break
-		}
+	cause := errors.Cause(err)
 
-		err = cause
+	if tmp, ok := cause.(Temporary); ok && tmp.Temporary() {
+		return true
 	}
 
 	return false

+ 44 - 0
vendor/github.com/docker/swarmkit/agent/exec/executor.go

@@ -35,11 +35,18 @@ type ConfigsProvider interface {
 	Configs() ConfigsManager
 }
 
+// VolumesProvider is implemented by objects that can store volumes,
+// typically an executor.
+type VolumesProvider interface {
+	Volumes() VolumesManager
+}
+
 // DependencyManager is a meta-object that can keep track of typed objects
 // such as secrets and configs.
 type DependencyManager interface {
 	SecretsProvider
 	ConfigsProvider
+	VolumesProvider
 }
 
 // DependencyGetter is a meta-object that can provide access to typed objects
@@ -47,6 +54,7 @@ type DependencyManager interface {
 type DependencyGetter interface {
 	Secrets() SecretGetter
 	Configs() ConfigGetter
+	Volumes() VolumeGetter
 }
 
 // SecretGetter contains secret data necessary for the Controller.
@@ -80,3 +88,39 @@ type ConfigsManager interface {
 	Remove(configs []string)   // remove the configs by ID
 	Reset()                    // remove all configs
 }
+
+// VolumeGetter contains volume data necessary for the Controller.
+type VolumeGetter interface {
+	// Get returns the the volume with a specific volume ID, if available.
+	// When the volume is not available, the return will be nil.
+	Get(volumeID string) (string, error)
+}
+
+// VolumesManager is the interface for volume storage and updates.
+type VolumesManager interface {
+	VolumeGetter
+
+	// Add adds one or more volumes
+	Add(volumes ...api.VolumeAssignment)
+	// Remove removes one or more volumes. The callback is called each time a
+	// volume is successfully removed with the ID of the volume removed.
+	//
+	// Remove takes a full VolumeAssignment because we may be instructed by the
+	// swarm manager to attempt removal of a Volume we don't know we have.
+	Remove(volumes []api.VolumeAssignment, callback func(string))
+	// Plugins returns the VolumePluginManager for this VolumesManager
+	Plugins() VolumePluginManager
+}
+
+// PluginManager is the interface for accessing the volume plugin manager from
+// the executor. This is identical to
+// github.com/docker/swarmkit/agent/csi/plugin.PluginManager, except the former
+// also includes a Get method for the VolumesManager to use. This does not
+// contain that Get method, to avoid having to import the Plugin type, and
+// because in this context, it is not needed.
+type VolumePluginManager interface {
+	// NodeInfo returns the NodeCSIInfo for each active plugin. Plugins which
+	// are added through Set but to which no connection has yet been
+	// successfully established will not be included.
+	NodeInfo(ctx context.Context) ([]*api.NodeCSIInfo, error)
+}

+ 54 - 6
vendor/github.com/docker/swarmkit/agent/reporter.go

@@ -15,28 +15,48 @@ type StatusReporter interface {
 	UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error
 }
 
+// Reporter recieves update to both task and volume status.
+type Reporter interface {
+	StatusReporter
+	ReportVolumeUnpublished(ctx context.Context, volumeID string) error
+}
+
 type statusReporterFunc func(ctx context.Context, taskID string, status *api.TaskStatus) error
 
 func (fn statusReporterFunc) UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error {
 	return fn(ctx, taskID, status)
 }
 
+type volumeReporterFunc func(ctx context.Context, volumeID string) error
+
+func (fn volumeReporterFunc) ReportVolumeUnpublished(ctx context.Context, volumeID string) error {
+	return fn(ctx, volumeID)
+}
+
+type statusReporterCombined struct {
+	statusReporterFunc
+	volumeReporterFunc
+}
+
 // statusReporter creates a reliable StatusReporter that will always succeed.
 // It handles several tasks at once, ensuring all statuses are reported.
 //
 // The reporter will continue reporting the current status until it succeeds.
 type statusReporter struct {
-	reporter StatusReporter
+	reporter Reporter
 	statuses map[string]*api.TaskStatus
-	mu       sync.Mutex
-	cond     sync.Cond
-	closed   bool
+	// volumes is a set of volumes which are to be reported unpublished.
+	volumes map[string]struct{}
+	mu      sync.Mutex
+	cond    sync.Cond
+	closed  bool
 }
 
-func newStatusReporter(ctx context.Context, upstream StatusReporter) *statusReporter {
+func newStatusReporter(ctx context.Context, upstream Reporter) *statusReporter {
 	r := &statusReporter{
 		reporter: upstream,
 		statuses: make(map[string]*api.TaskStatus),
+		volumes:  make(map[string]struct{}),
 	}
 
 	r.cond.L = &r.mu
@@ -65,6 +85,16 @@ func (sr *statusReporter) UpdateTaskStatus(ctx context.Context, taskID string, s
 	return nil
 }
 
+func (sr *statusReporter) ReportVolumeUnpublished(ctx context.Context, volumeID string) error {
+	sr.mu.Lock()
+	defer sr.mu.Unlock()
+
+	sr.volumes[volumeID] = struct{}{}
+	sr.cond.Signal()
+
+	return nil
+}
+
 func (sr *statusReporter) Close() error {
 	sr.mu.Lock()
 	defer sr.mu.Unlock()
@@ -92,7 +122,7 @@ func (sr *statusReporter) run(ctx context.Context) {
 	}()
 
 	for {
-		if len(sr.statuses) == 0 {
+		if len(sr.statuses) == 0 && len(sr.volumes) == 0 {
 			sr.cond.Wait()
 		}
 
@@ -125,5 +155,23 @@ func (sr *statusReporter) run(ctx context.Context) {
 				}
 			}
 		}
+
+		for volumeID := range sr.volumes {
+			delete(sr.volumes, volumeID)
+
+			sr.mu.Unlock()
+			err := sr.reporter.ReportVolumeUnpublished(ctx, volumeID)
+			sr.mu.Lock()
+
+			// reporter might be closed during ReportVolumeUnpublished call
+			if sr.closed {
+				return
+			}
+
+			if err != nil {
+				log.G(ctx).WithError(err).Error("status reporter failed to report volume status to agent")
+				sr.volumes[volumeID] = struct{}{}
+			}
+		}
 	}
 }

+ 18 - 0
vendor/github.com/docker/swarmkit/agent/session.go

@@ -428,6 +428,24 @@ func (s *session) sendTaskStatuses(ctx context.Context, updates ...*api.UpdateTa
 	return updates[n:], nil
 }
 
+// reportVolumeUnpublished sends a status update to the manager reporting that
+// all volumes in the slice are unpublished.
+func (s *session) reportVolumeUnpublished(ctx context.Context, volumes []string) error {
+	updates := []*api.UpdateVolumeStatusRequest_VolumeStatusUpdate{}
+	for _, volume := range volumes {
+		updates = append(updates, &api.UpdateVolumeStatusRequest_VolumeStatusUpdate{
+			ID:          volume,
+			Unpublished: true,
+		})
+	}
+	client := api.NewDispatcherClient(s.conn.ClientConn)
+	_, err := client.UpdateVolumeStatus(ctx, &api.UpdateVolumeStatusRequest{
+		SessionID: s.sessionID,
+		Updates:   updates,
+	})
+	return err
+}
+
 // sendError is used to send errors to errs channel and trigger session recreation
 func (s *session) sendError(err error) {
 	select {

+ 69 - 9
vendor/github.com/docker/swarmkit/agent/worker.go

@@ -23,11 +23,11 @@ type Worker interface {
 	// It is not safe to call any worker function after that.
 	Close()
 
-	// Assign assigns a complete set of tasks and configs/secrets to a
+	// Assign assigns a complete set of tasks and configs/secrets/volumes to a
 	// worker. Any items not included in this set will be removed.
 	Assign(ctx context.Context, assignments []*api.AssignmentChange) error
 
-	// Updates updates an incremental set of tasks or configs/secrets of
+	// Updates updates an incremental set of tasks or configs/secrets/volumes of
 	// the worker. Any items not included either in added or removed will
 	// remain untouched.
 	Update(ctx context.Context, assignments []*api.AssignmentChange) error
@@ -37,7 +37,7 @@ type Worker interface {
 	// by the worker.
 	//
 	// The listener will be removed if the context is cancelled.
-	Listen(ctx context.Context, reporter StatusReporter)
+	Listen(ctx context.Context, reporter Reporter)
 
 	// Report resends the status of all tasks controlled by this worker.
 	Report(ctx context.Context, reporter StatusReporter)
@@ -51,7 +51,7 @@ type Worker interface {
 
 // statusReporterKey protects removal map from panic.
 type statusReporterKey struct {
-	StatusReporter
+	Reporter
 }
 
 type worker struct {
@@ -152,7 +152,12 @@ func (w *worker) Assign(ctx context.Context, assignments []*api.AssignmentChange
 		return err
 	}
 
-	return reconcileTaskState(ctx, w, assignments, true)
+	err = reconcileTaskState(ctx, w, assignments, true)
+	if err != nil {
+		return err
+	}
+
+	return reconcileVolumes(ctx, w, assignments)
 }
 
 // Update updates the set of tasks, configs, and secrets for the worker.
@@ -184,7 +189,12 @@ func (w *worker) Update(ctx context.Context, assignments []*api.AssignmentChange
 		return err
 	}
 
-	return reconcileTaskState(ctx, w, assignments, false)
+	err = reconcileTaskState(ctx, w, assignments, false)
+	if err != nil {
+		return err
+	}
+
+	return reconcileVolumes(ctx, w, assignments)
 }
 
 func reconcileTaskState(ctx context.Context, w *worker, assignments []*api.AssignmentChange, fullSnapshot bool) error {
@@ -409,7 +419,57 @@ func reconcileConfigs(ctx context.Context, w *worker, assignments []*api.Assignm
 	return nil
 }
 
-func (w *worker) Listen(ctx context.Context, reporter StatusReporter) {
+// reconcileVolumes reconciles the CSI volumes on this node. It does not need
+// fullSnapshot like other reconcile functions because volumes are non-trivial
+// and are never reset.
+func reconcileVolumes(ctx context.Context, w *worker, assignments []*api.AssignmentChange) error {
+	var (
+		updatedVolumes []api.VolumeAssignment
+		removedVolumes []api.VolumeAssignment
+	)
+	for _, a := range assignments {
+		if r := a.Assignment.GetVolume(); r != nil {
+			switch a.Action {
+			case api.AssignmentChange_AssignmentActionUpdate:
+				updatedVolumes = append(updatedVolumes, *r)
+			case api.AssignmentChange_AssignmentActionRemove:
+				removedVolumes = append(removedVolumes, *r)
+			}
+
+		}
+	}
+
+	volumesProvider, ok := w.executor.(exec.VolumesProvider)
+	if !ok {
+		if len(updatedVolumes) != 0 || len(removedVolumes) != 0 {
+			log.G(ctx).Warn("volumes update ignored; executor does not support volumes")
+		}
+		return nil
+	}
+
+	volumes := volumesProvider.Volumes()
+
+	log.G(ctx).WithFields(logrus.Fields{
+		"len(updatedVolumes)": len(updatedVolumes),
+		"len(removedVolumes)": len(removedVolumes),
+	}).Debug("(*worker).reconcileVolumes")
+
+	volumes.Remove(removedVolumes, func(id string) {
+		w.mu.RLock()
+		defer w.mu.RUnlock()
+
+		for key := range w.listeners {
+			if err := key.Reporter.ReportVolumeUnpublished(ctx, id); err != nil {
+				log.G(ctx).WithError(err).Errorf("failed reporting volume unpublished for reporter %v", key.Reporter)
+			}
+		}
+	})
+	volumes.Add(updatedVolumes...)
+
+	return nil
+}
+
+func (w *worker) Listen(ctx context.Context, reporter Reporter) {
 	w.mu.Lock()
 	defer w.mu.Unlock()
 
@@ -526,8 +586,8 @@ func (w *worker) updateTaskStatus(ctx context.Context, tx *bolt.Tx, taskID strin
 
 	// broadcast the task status out.
 	for key := range w.listeners {
-		if err := key.StatusReporter.UpdateTaskStatus(ctx, taskID, status); err != nil {
-			log.G(ctx).WithError(err).Errorf("failed updating status for reporter %v", key.StatusReporter)
+		if err := key.Reporter.UpdateTaskStatus(ctx, taskID, status); err != nil {
+			log.G(ctx).WithError(err).Errorf("failed updating status for reporter %v", key.Reporter)
 		}
 	}
 

File diff suppressed because it is too large
+ 866 - 182
vendor/github.com/docker/swarmkit/api/api.pb.txt


File diff suppressed because it is too large
+ 887 - 187
vendor/github.com/docker/swarmkit/api/control.pb.go


+ 82 - 0
vendor/github.com/docker/swarmkit/api/control.proto

@@ -246,6 +246,34 @@ service Control {
 	rpc RemoveResource(RemoveResourceRequest) returns (RemoveResourceResponse) {
 		option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
 	}
+
+	// --- volumes APIs ---
+
+	// CreateVolume returns a `CreateVolumeResponse` with a `Volume` based on the
+	// provided `CreateVolumeRequest.VolumeSpec`.
+	// - Returns `InvalidArgument` if the `CreateVolumeRequest.VolumeSpec` is
+	//   malformed.
+	rpc CreateVolume(CreateVolumeRequest) returns (CreateVolumeResponse) {
+		option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+	}
+
+	// GetVolume returns a `GetVolumeResponse` with a Volume with the same ID
+	// as `GetVolumeRequest.ID`
+	rpc GetVolume(GetVolumeRequest) returns (GetVolumeResponse) {
+		option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+	}
+
+	rpc UpdateVolume(UpdateVolumeRequest) returns (UpdateVolumeResponse) {
+		option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+	}
+
+	rpc ListVolumes(ListVolumesRequest) returns (ListVolumesResponse) {
+		option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+	}
+
+	rpc RemoveVolume(RemoveVolumeRequest) returns (RemoveVolumeResponse) {
+		option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+	}
 }
 
 message GetNodeRequest {
@@ -787,3 +815,57 @@ message ListResourcesRequest {
 message ListResourcesResponse {
 	repeated Resource resources = 1;
 }
+
+message CreateVolumeRequest {
+	VolumeSpec spec = 1;
+}
+
+message CreateVolumeResponse {
+	Volume volume = 1;
+}
+
+message GetVolumeRequest {
+	string volume_id = 1;
+}
+
+message GetVolumeResponse {
+	Volume volume = 1;
+}
+
+message UpdateVolumeRequest {
+	string volume_id = 1;
+
+	Version volume_version = 2;
+
+	VolumeSpec spec = 3;
+}
+
+message UpdateVolumeResponse {
+	Volume volume = 1;
+}
+
+message ListVolumesRequest {
+	message Filters {
+		repeated string names = 1;
+		repeated string id_prefixes = 2;
+		map<string, string> labels = 3;
+		repeated string name_prefixes = 4;
+		repeated string groups = 5;
+		repeated string drivers = 6;
+	}
+
+	Filters filters = 1;
+}
+
+message ListVolumesResponse {
+	repeated Volume volumes = 1;
+}
+
+message RemoveVolumeRequest {
+	string volume_id = 1;
+	// Force forces the volume to be deleted from swarmkit, regardless of
+	// whether its current state would permit such an action.
+	bool force = 2;
+}
+
+message RemoveVolumeResponse {}

File diff suppressed because it is too large
+ 860 - 78
vendor/github.com/docker/swarmkit/api/dispatcher.pb.go


+ 34 - 1
vendor/github.com/docker/swarmkit/api/dispatcher.proto

@@ -42,6 +42,13 @@ service Dispatcher { // maybe dispatch, al likes this
 		option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
 	};
 
+	// UpdateVolumeStatus updates the status of a Volume. Like
+	// UpdateTaskStatus, the node should send such updates on every status
+	// change of its volumes.
+	rpc UpdateVolumeStatus(UpdateVolumeStatusRequest) returns (UpdateVolumeStatusResponse) {
+		option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
+	};
+
 	// Tasks is a stream of tasks state for node. Each message contains full list
 	// of tasks which should be run on node, if task is not present in that list,
 	// it should be terminated.
@@ -155,10 +162,35 @@ message UpdateTaskStatusRequest {
 	repeated TaskStatusUpdate updates = 3;
 }
 
-message  UpdateTaskStatusResponse{
+message UpdateTaskStatusResponse{
 	// void
 }
 
+message UpdateVolumeStatusRequest {
+	string session_id = 1;
+
+	message VolumeStatusUpdate {
+		// ID is the ID of the volume being updated. This is the Swarmkit ID,
+		// not the CSI VolumeID.
+		string id = 1;
+		// Unpublished is set to true when the volume is affirmatively
+		// unpublished on the Node side. We don't need to report that a Volume
+		// is published on the the node; as soon as the Volume is assigned to
+		// the Node, we must assume that it has been published until informed
+		// otherwise.
+		//
+		// Further, the Node must not send unpublished = true unless it will
+		// definitely no longer attempt to call NodePublishVolume.
+		bool unpublished = 2;
+	}
+
+	repeated VolumeStatusUpdate updates = 2;
+}
+
+message UpdateVolumeStatusResponse {
+	// empty on purpose
+}
+
 message TasksRequest {
 	string session_id = 1;
 }
@@ -178,6 +210,7 @@ message Assignment {
 		Task task = 1;
 		Secret secret = 2;
 		Config config = 3;
+		VolumeAssignment volume = 4;
 	}
 }
 

File diff suppressed because it is too large
+ 720 - 201
vendor/github.com/docker/swarmkit/api/objects.pb.go


+ 44 - 0
vendor/github.com/docker/swarmkit/api/objects.proto

@@ -269,6 +269,10 @@ message Task {
 	// JobIteration is the iteration number of the Job-mode Service that this
 	// task belongs to.
 	Version job_iteration = 16;
+
+	// Volumes is a list of VolumeAttachments for this task. It specifies which
+	// volumes this task is allocated.
+	repeated VolumeAttachment volumes = 17;
 }
 
 // NetworkAttachment specifies the network parameters of attachment to
@@ -510,3 +514,43 @@ message Extension {
 	// // Indices, with values expressed as Go templates.
 	//repeated IndexEntry index_templates = 6;
 }
+
+// Volume is the top-level object describing a volume usable by Swarmkit. The
+// Volume contains the user's VolumeSpec, the Volume's status, and the Volume
+// object that was returned by the CSI Plugin when the volume was created.
+message Volume {
+	option (docker.protobuf.plugin.store_object) = {
+		watch_selectors: {
+			id: true
+			id_prefix: true
+			name: true
+			name_prefix: true
+			custom: true
+			custom_prefix: true
+		}
+	};
+
+	// ID is the swarmkit-internal ID for this volume object. This has no
+	// relation to the CSI volume identifier provided by the CSI Plugin.
+	string id = 1;
+	Meta meta = 2 [(gogoproto.nullable) = false];
+
+	// Spec is the desired state of the Volume, as provided by the user.
+	VolumeSpec spec = 3 [(gogoproto.nullable) = false];
+
+	// PublishStatus is the status of the volume as it pertains to the various
+	// nodes it is in use on.
+	repeated VolumePublishStatus publish_status = 4;
+
+	// VolumeInfo contains information about the volume originating from the
+	// CSI plugin when the volume is created.
+	VolumeInfo volume_info = 5;
+
+	// PendingDelete indicates that this Volume is being removed from Swarm.
+	// Before a Volume can be removed, we must call the DeleteVolume on the
+	// Controller. Because of this, we cannot immediately remove the Volume
+	// when a user wishes to delete it. Instead, we will mark a Volume with
+	// PendingDelete = true, which instructs Swarm to go through the work of
+	// removing the volume and then delete it when finished.
+	bool pending_delete = 6;
+}

+ 164 - 66
vendor/github.com/docker/swarmkit/api/raft.pb.go

@@ -6,10 +6,10 @@ package api
 import (
 	context "context"
 	fmt "fmt"
-	raftpb "github.com/coreos/etcd/raft/raftpb"
 	github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
 	raftselector "github.com/docker/swarmkit/manager/raftselector"
 	proto "github.com/gogo/protobuf/proto"
+	raftpb "go.etcd.io/etcd/raft/v3/raftpb"
 	grpc "google.golang.org/grpc"
 	codes "google.golang.org/grpc/codes"
 	metadata "google.golang.org/grpc/metadata"
@@ -532,6 +532,7 @@ type StoreAction struct {
 	//	*StoreAction_Resource
 	//	*StoreAction_Extension
 	//	*StoreAction_Config
+	//	*StoreAction_Volume
 	Target isStoreAction_Target `protobuf_oneof:"target"`
 }
 
@@ -600,6 +601,9 @@ type StoreAction_Extension struct {
 type StoreAction_Config struct {
 	Config *Config `protobuf:"bytes,10,opt,name=config,proto3,oneof" json:"config,omitempty"`
 }
+type StoreAction_Volume struct {
+	Volume *Volume `protobuf:"bytes,11,opt,name=volume,proto3,oneof" json:"volume,omitempty"`
+}
 
 func (*StoreAction_Node) isStoreAction_Target()      {}
 func (*StoreAction_Service) isStoreAction_Target()   {}
@@ -610,6 +614,7 @@ func (*StoreAction_Secret) isStoreAction_Target()    {}
 func (*StoreAction_Resource) isStoreAction_Target()  {}
 func (*StoreAction_Extension) isStoreAction_Target() {}
 func (*StoreAction_Config) isStoreAction_Target()    {}
+func (*StoreAction_Volume) isStoreAction_Target()    {}
 
 func (m *StoreAction) GetTarget() isStoreAction_Target {
 	if m != nil {
@@ -681,6 +686,13 @@ func (m *StoreAction) GetConfig() *Config {
 	return nil
 }
 
+func (m *StoreAction) GetVolume() *Volume {
+	if x, ok := m.GetTarget().(*StoreAction_Volume); ok {
+		return x.Volume
+	}
+	return nil
+}
+
 // XXX_OneofWrappers is for the internal use of the proto package.
 func (*StoreAction) XXX_OneofWrappers() []interface{} {
 	return []interface{}{
@@ -693,6 +705,7 @@ func (*StoreAction) XXX_OneofWrappers() []interface{} {
 		(*StoreAction_Resource)(nil),
 		(*StoreAction_Extension)(nil),
 		(*StoreAction_Config)(nil),
+		(*StoreAction_Volume)(nil),
 	}
 }
 
@@ -718,72 +731,73 @@ func init() {
 }
 
 var fileDescriptor_d2c32e1e3c930c15 = []byte{
-	// 1028 bytes of a gzipped FileDescriptorProto
+	// 1046 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0x41, 0x73, 0xdb, 0x44,
-	0x14, 0xc7, 0x25, 0x5b, 0x75, 0x9a, 0x97, 0x36, 0x09, 0x5b, 0x12, 0x14, 0xb5, 0x28, 0xae, 0xda,
-	0x19, 0x9c, 0x90, 0xc8, 0x83, 0x61, 0xa6, 0x4c, 0xa1, 0x87, 0x38, 0xf1, 0x4c, 0x4c, 0x5a, 0xa7,
-	0xa3, 0x24, 0xd0, 0x5b, 0x90, 0xa5, 0x8d, 0x2b, 0x1c, 0x6b, 0xcd, 0xee, 0x3a, 0x81, 0x0b, 0xd3,
-	0x23, 0xe4, 0xc4, 0x0d, 0x86, 0x99, 0x0e, 0x07, 0x38, 0xf7, 0x03, 0xf0, 0x01, 0x98, 0x0c, 0xa7,
-	0xde, 0xe8, 0x29, 0x43, 0x9d, 0x3b, 0x7c, 0x05, 0x66, 0x57, 0x52, 0x12, 0x6c, 0xd9, 0xf1, 0x81,
-	0x4b, 0xb2, 0xa3, 0xfd, 0xfd, 0xdf, 0xff, 0xed, 0xee, 0xdb, 0xb7, 0x86, 0x85, 0x46, 0xc0, 0x9f,
-	0x76, 0xea, 0xb6, 0x47, 0x5a, 0x45, 0x9f, 0x78, 0x4d, 0x4c, 0x8b, 0xec, 0xd0, 0xa5, 0xad, 0x66,
-	0xc0, 0x8b, 0x6e, 0x3b, 0x28, 0x52, 0x77, 0x8f, 0xdb, 0x6d, 0x4a, 0x38, 0x41, 0x28, 0x9a, 0xb7,
-	0x93, 0x79, 0xfb, 0xe0, 0x3d, 0x63, 0xe9, 0x12, 0x39, 0xa9, 0x7f, 0x81, 0x3d, 0xce, 0xa2, 0x08,
-	0xc6, 0xe2, 0x25, 0x34, 0xff, 0xba, 0x8d, 0x13, 0x76, 0xf9, 0x02, 0xeb, 0x11, 0x8a, 0x09, 0x2b,
-	0x62, 0xee, 0xf9, 0x32, 0x21, 0xf9, 0xa7, 0x5d, 0xbf, 0x90, 0x9c, 0xf1, 0x66, 0x83, 0x34, 0x88,
-	0x1c, 0x16, 0xc5, 0x28, 0xfe, 0x7a, 0x6f, 0x88, 0xa1, 0x24, 0xea, 0x9d, 0xbd, 0x62, 0x7b, 0xbf,
-	0xd3, 0x08, 0xc2, 0xf8, 0x5f, 0x24, 0xb4, 0x5e, 0xa8, 0x00, 0x8e, 0xbb, 0xc7, 0x1f, 0xe1, 0x56,
-	0x1d, 0x53, 0x74, 0x07, 0xc6, 0x84, 0xd7, 0x6e, 0xe0, 0xeb, 0x6a, 0x5e, 0x2d, 0x68, 0x65, 0xe8,
-	0x9e, 0xcc, 0xe7, 0x04, 0x50, 0x5d, 0x73, 0x72, 0x62, 0xaa, 0xea, 0x0b, 0x28, 0x24, 0x3e, 0x16,
-	0x50, 0x26, 0xaf, 0x16, 0xc6, 0x23, 0xa8, 0x46, 0x7c, 0x2c, 0x20, 0x31, 0x55, 0xf5, 0x11, 0x02,
-	0xcd, 0xf5, 0x7d, 0xaa, 0x67, 0x05, 0xe1, 0xc8, 0x31, 0x2a, 0x43, 0x8e, 0x71, 0x97, 0x77, 0x98,
-	0xae, 0xe5, 0xd5, 0xc2, 0x44, 0xe9, 0xae, 0xdd, 0xbf, 0xd3, 0xf6, 0x79, 0x36, 0x5b, 0x92, 0x2d,
-	0x6b, 0xc7, 0x27, 0xf3, 0x8a, 0x13, 0x2b, 0xad, 0xdb, 0x30, 0xf1, 0x09, 0x09, 0x42, 0x07, 0x7f,
-	0xd9, 0xc1, 0x8c, 0x9f, 0xd9, 0xa8, 0xe7, 0x36, 0xd6, 0x4f, 0x2a, 0x5c, 0x8b, 0x18, 0xd6, 0x26,
-	0x21, 0xc3, 0xa3, 0xad, 0xea, 0x43, 0x18, 0x6b, 0x49, 0x5b, 0xa6, 0x67, 0xf2, 0xd9, 0xc2, 0x44,
-	0xc9, 0x1c, 0x9e, 0x9d, 0x93, 0xe0, 0xe8, 0x5d, 0x98, 0xa2, 0xb8, 0x45, 0x0e, 0xb0, 0xbf, 0x9b,
-	0x44, 0xc8, 0xe6, 0xb3, 0x05, 0xad, 0x9c, 0x99, 0x56, 0x9c, 0xc9, 0x78, 0x2a, 0x12, 0x31, 0xab,
-	0x0c, 0xd7, 0x1e, 0x62, 0xf7, 0x00, 0x27, 0x0b, 0x28, 0x81, 0x26, 0x76, 0x4c, 0x26, 0x76, 0xb9,
-	0xa7, 0x64, 0xad, 0x29, 0xb8, 0x1e, 0xc7, 0x88, 0x16, 0x68, 0x3d, 0x84, 0xb9, 0xc7, 0x94, 0x78,
-	0x98, 0xb1, 0x88, 0x65, 0xcc, 0x6d, 0x9c, 0x39, 0x2c, 0x88, 0x85, 0xc9, 0x2f, 0xb1, 0xc9, 0x94,
-	0x1d, 0x95, 0x95, 0x9d, 0x80, 0xc9, 0xfc, 0x7d, 0xed, 0xd9, 0x0f, 0x96, 0x62, 0xdd, 0x02, 0x23,
-	0x2d, 0x5a, 0xec, 0xb5, 0x01, 0xfa, 0x16, 0xa7, 0xd8, 0x6d, 0xfd, 0x1f, 0x56, 0x37, 0x61, 0x2e,
-	0x25, 0x58, 0xec, 0xf4, 0x31, 0xcc, 0x38, 0x98, 0x91, 0xfd, 0x03, 0xbc, 0xe2, 0xfb, 0x54, 0xa4,
-	0x13, 0xdb, 0x8c, 0x72, 0x9e, 0xd6, 0x12, 0xcc, 0xf6, 0xaa, 0xe3, 0x72, 0x48, 0xab, 0x99, 0x7d,
-	0xb8, 0x51, 0x0d, 0x39, 0xa6, 0xa1, 0xbb, 0x2f, 0xe2, 0x24, 0x4e, 0xb3, 0x90, 0x39, 0x33, 0xc9,
-	0x75, 0x4f, 0xe6, 0x33, 0xd5, 0x35, 0x27, 0x13, 0xf8, 0xe8, 0x01, 0xe4, 0x5c, 0x8f, 0x07, 0x24,
-	0x8c, 0x6b, 0x65, 0x3e, 0xed, 0xdc, 0xb6, 0x38, 0xa1, 0x78, 0x45, 0x62, 0x49, 0x11, 0x47, 0x22,
-	0xeb, 0x77, 0x0d, 0x26, 0x2e, 0xcc, 0xa2, 0x8f, 0xce, 0xc2, 0x09, 0xab, 0xc9, 0xd2, 0x9d, 0x4b,
-	0xc2, 0x6d, 0x04, 0xa1, 0x9f, 0x04, 0x43, 0x76, 0x5c, 0x41, 0x19, 0xb9, 0xe3, 0x7a, 0x9a, 0x54,
-	0xdc, 0xcd, 0x75, 0x25, 0xaa, 0x1e, 0x74, 0x0f, 0xc6, 0x18, 0xa6, 0x07, 0x81, 0x87, 0xe5, 0xe5,
-	0x9c, 0x28, 0xdd, 0x4c, 0x75, 0x8b, 0x90, 0x75, 0xc5, 0x49, 0x68, 0x61, 0xc4, 0x5d, 0xd6, 0x8c,
-	0x2f, 0x6f, 0xaa, 0xd1, 0xb6, 0xcb, 0x9a, 0xc2, 0x48, 0x70, 0xc2, 0x28, 0xc4, 0xfc, 0x90, 0xd0,
-	0xa6, 0x7e, 0x65, 0xb0, 0x51, 0x2d, 0x42, 0x84, 0x51, 0x4c, 0x0b, 0xa1, 0xb7, 0xdf, 0x61, 0x1c,
-	0x53, 0x3d, 0x37, 0x58, 0xb8, 0x1a, 0x21, 0x42, 0x18, 0xd3, 0xe8, 0x03, 0xc8, 0x31, 0xec, 0x51,
-	0xcc, 0xf5, 0x31, 0xa9, 0x33, 0xd2, 0x57, 0x26, 0x88, 0x75, 0xd1, 0x52, 0xe4, 0x08, 0xdd, 0x87,
-	0xab, 0x14, 0x33, 0xd2, 0xa1, 0x1e, 0xd6, 0xaf, 0x4a, 0xdd, 0xad, 0xd4, 0x6b, 0x18, 0x33, 0xeb,
-	0x8a, 0x73, 0xc6, 0xa3, 0x07, 0x30, 0x8e, 0xbf, 0xe2, 0x38, 0x64, 0xe2, 0xf0, 0xc6, 0xa5, 0xf8,
-	0xed, 0x34, 0x71, 0x25, 0x81, 0xd6, 0x15, 0xe7, 0x5c, 0x21, 0x12, 0xf6, 0x48, 0xb8, 0x17, 0x34,
-	0x74, 0x18, 0x9c, 0xf0, 0xaa, 0x24, 0x44, 0xc2, 0x11, 0x5b, 0xbe, 0x0a, 0x39, 0xee, 0xd2, 0x06,
-	0xe6, 0x8b, 0xff, 0xa8, 0x30, 0xd5, 0x53, 0x17, 0xe8, 0x1d, 0x18, 0xdb, 0xa9, 0x6d, 0xd4, 0x36,
-	0x3f, 0xab, 0x4d, 0x2b, 0x86, 0x71, 0xf4, 0x3c, 0x3f, 0xdb, 0x43, 0xec, 0x84, 0xcd, 0x90, 0x1c,
-	0x86, 0xa8, 0x04, 0x37, 0xb6, 0xb6, 0x37, 0x9d, 0xca, 0xee, 0xca, 0xea, 0x76, 0x75, 0xb3, 0xb6,
-	0xbb, 0xea, 0x54, 0x56, 0xb6, 0x2b, 0xd3, 0xaa, 0x31, 0x77, 0xf4, 0x3c, 0x3f, 0xd3, 0x23, 0x5a,
-	0xa5, 0xd8, 0xe5, 0xb8, 0x4f, 0xb3, 0xf3, 0x78, 0x4d, 0x68, 0x32, 0xa9, 0x9a, 0x9d, 0xb6, 0x9f,
-	0xa6, 0x71, 0x2a, 0x8f, 0x36, 0x3f, 0xad, 0x4c, 0x67, 0x53, 0x35, 0x8e, 0x6c, 0x97, 0xc6, 0x5b,
-	0xdf, 0xfe, 0x62, 0x2a, 0xbf, 0xfd, 0x6a, 0xf6, 0xae, 0xae, 0xf4, 0x73, 0x16, 0x34, 0x71, 0x43,
-	0xd1, 0x91, 0x0a, 0xa8, 0xbf, 0x4d, 0xa1, 0xe5, 0xb4, 0x1d, 0x1c, 0xd8, 0x1c, 0x0d, 0x7b, 0x54,
-	0x3c, 0xee, 0x49, 0x33, 0x7f, 0xbc, 0xf8, 0xfb, 0xc7, 0xcc, 0x14, 0x5c, 0x97, 0xfc, 0x72, 0xcb,
-	0x0d, 0xdd, 0x06, 0xa6, 0xe8, 0x3b, 0x15, 0xde, 0xe8, 0x6b, 0x64, 0x68, 0x29, 0xfd, 0x1a, 0xa7,
-	0x37, 0x4f, 0x63, 0x79, 0x44, 0x7a, 0x68, 0x26, 0x05, 0x15, 0x7d, 0x03, 0x93, 0xff, 0x6d, 0x7c,
-	0x68, 0x61, 0x50, 0x39, 0xf7, 0xb5, 0x56, 0x63, 0x71, 0x14, 0x74, 0x68, 0x06, 0xa5, 0x3f, 0x55,
-	0x98, 0x3c, 0x7f, 0xb2, 0xd8, 0xd3, 0xa0, 0x8d, 0x3e, 0x07, 0x4d, 0x3c, 0xc8, 0x28, 0xb5, 0x4d,
-	0x5e, 0x78, 0xce, 0x8d, 0xfc, 0x60, 0x60, 0xf8, 0x01, 0x78, 0x70, 0x45, 0x3e, 0x89, 0x28, 0x35,
-	0xc2, 0xc5, 0x17, 0xd7, 0xb8, 0x3d, 0x84, 0x18, 0x6a, 0x52, 0xbe, 0x7b, 0xfc, 0xda, 0x54, 0x5e,
-	0xbd, 0x36, 0x95, 0x67, 0x5d, 0x53, 0x3d, 0xee, 0x9a, 0xea, 0xcb, 0xae, 0xa9, 0xfe, 0xd5, 0x35,
-	0xd5, 0xef, 0x4f, 0x4d, 0xe5, 0xe5, 0xa9, 0xa9, 0xbc, 0x3a, 0x35, 0x95, 0x27, 0xd9, 0x27, 0x5a,
-	0x3d, 0x27, 0x7f, 0x5d, 0xbd, 0xff, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x42, 0x09, 0xd0,
-	0x76, 0x0a, 0x00, 0x00,
+	0x14, 0xc7, 0x25, 0x5b, 0x75, 0x92, 0xe7, 0x36, 0x09, 0x5b, 0x12, 0x14, 0xb5, 0x28, 0xae, 0xdb,
+	0x19, 0x9c, 0x90, 0xc8, 0x83, 0xcb, 0x4c, 0x99, 0x42, 0x0f, 0x71, 0xe2, 0x99, 0x98, 0xb4, 0x4e,
+	0x47, 0x49, 0x4a, 0x6f, 0x41, 0x96, 0x36, 0xae, 0xb0, 0xad, 0x35, 0xbb, 0x6b, 0x07, 0x2e, 0x4c,
+	0x8f, 0x90, 0x13, 0x37, 0x18, 0x66, 0x3a, 0x1c, 0xe0, 0xdc, 0x0f, 0xc0, 0x27, 0xc8, 0x70, 0xea,
+	0x8d, 0x9e, 0x32, 0xd4, 0xb9, 0xc3, 0x17, 0xe0, 0xc0, 0xec, 0x4a, 0x4a, 0x42, 0x22, 0x3b, 0x3e,
+	0x70, 0xd2, 0x8e, 0xf7, 0xf7, 0x7f, 0xff, 0xb7, 0xbb, 0x6f, 0xdf, 0x1a, 0x16, 0x1a, 0x3e, 0x7f,
+	0xd6, 0xad, 0x5b, 0x2e, 0x69, 0x17, 0x3d, 0xe2, 0x36, 0x31, 0x2d, 0xb2, 0x7d, 0x87, 0xb6, 0x9b,
+	0x3e, 0x2f, 0x3a, 0x1d, 0xbf, 0x48, 0x9d, 0x3d, 0x6e, 0x75, 0x28, 0xe1, 0x04, 0xa1, 0x70, 0xde,
+	0x8a, 0xe7, 0xad, 0xde, 0x07, 0xc6, 0xd2, 0x25, 0x72, 0x52, 0xff, 0x02, 0xbb, 0x9c, 0x85, 0x11,
+	0x8c, 0xc5, 0x4b, 0x68, 0xfe, 0x75, 0x07, 0xc7, 0xec, 0x42, 0x83, 0x58, 0x98, 0xbb, 0x9e, 0xe5,
+	0x93, 0xa2, 0xf8, 0xca, 0x4c, 0x8a, 0xbd, 0xbb, 0xf2, 0xdb, 0xa9, 0x9f, 0x49, 0xcc, 0x78, 0xbb,
+	0x41, 0x1a, 0x44, 0x0e, 0x8b, 0x62, 0x14, 0xfd, 0x7a, 0x6f, 0x88, 0x99, 0x24, 0xea, 0xdd, 0xbd,
+	0x62, 0xa7, 0xd5, 0x6d, 0xf8, 0x41, 0xf4, 0x09, 0x85, 0xf9, 0x97, 0x2a, 0x80, 0xed, 0xec, 0xf1,
+	0x47, 0xb8, 0x5d, 0xc7, 0x14, 0xdd, 0x86, 0x31, 0xe1, 0xb5, 0xeb, 0x7b, 0xba, 0x9a, 0x53, 0x0b,
+	0x5a, 0x19, 0xfa, 0x47, 0xf3, 0x19, 0x01, 0x54, 0xd7, 0xec, 0x8c, 0x98, 0xaa, 0x7a, 0x02, 0x0a,
+	0x88, 0x87, 0x05, 0x94, 0xca, 0xa9, 0x85, 0x89, 0x10, 0xaa, 0x11, 0x0f, 0x0b, 0x48, 0x4c, 0x55,
+	0x3d, 0x84, 0x40, 0x73, 0x3c, 0x8f, 0xea, 0x69, 0x41, 0xd8, 0x72, 0x8c, 0xca, 0x90, 0x61, 0xdc,
+	0xe1, 0x5d, 0xa6, 0x6b, 0x39, 0xb5, 0x90, 0x2d, 0xdd, 0xb1, 0x2e, 0xee, 0xb2, 0x75, 0x9a, 0xcd,
+	0x96, 0x64, 0xcb, 0xda, 0xe1, 0xd1, 0xbc, 0x62, 0x47, 0xca, 0xfc, 0x2d, 0xc8, 0x7e, 0x4a, 0xfc,
+	0xc0, 0xc6, 0x5f, 0x76, 0x31, 0xe3, 0x27, 0x36, 0xea, 0xa9, 0x4d, 0xfe, 0x27, 0x15, 0xae, 0x86,
+	0x0c, 0xeb, 0x90, 0x80, 0xe1, 0xd1, 0x56, 0xf5, 0x11, 0x8c, 0xb5, 0xa5, 0x2d, 0xd3, 0x53, 0xb9,
+	0x74, 0x21, 0x5b, 0x32, 0x87, 0x67, 0x67, 0xc7, 0x38, 0x7a, 0x1f, 0xa6, 0x28, 0x6e, 0x93, 0x1e,
+	0xf6, 0x76, 0xe3, 0x08, 0xe9, 0x5c, 0xba, 0xa0, 0x95, 0x53, 0xd3, 0x8a, 0x3d, 0x19, 0x4d, 0x85,
+	0x22, 0x96, 0x2f, 0xc3, 0xd5, 0x87, 0xd8, 0xe9, 0xe1, 0x78, 0x01, 0x25, 0xd0, 0xc4, 0x8e, 0xc9,
+	0xc4, 0x2e, 0xf7, 0x94, 0x6c, 0x7e, 0x0a, 0xae, 0x45, 0x31, 0xc2, 0x05, 0xe6, 0x1f, 0xc2, 0xdc,
+	0x63, 0x4a, 0x5c, 0xcc, 0x58, 0xc8, 0x32, 0xe6, 0x34, 0x4e, 0x1c, 0x16, 0xc4, 0xc2, 0xe4, 0x2f,
+	0x91, 0xc9, 0x94, 0x15, 0x96, 0x95, 0x15, 0x83, 0xf1, 0xfc, 0x7d, 0xed, 0xf9, 0x0f, 0x79, 0x25,
+	0x7f, 0x13, 0x8c, 0xa4, 0x68, 0x91, 0xd7, 0x06, 0xe8, 0x5b, 0x9c, 0x62, 0xa7, 0xfd, 0x7f, 0x58,
+	0xdd, 0x80, 0xb9, 0x84, 0x60, 0x91, 0xd3, 0x27, 0x30, 0x63, 0x63, 0x46, 0x5a, 0x3d, 0xbc, 0xe2,
+	0x79, 0x54, 0xa4, 0x13, 0xd9, 0x8c, 0x72, 0x9e, 0xf9, 0x25, 0x98, 0x3d, 0xaf, 0x8e, 0xca, 0x21,
+	0xa9, 0x66, 0x5a, 0x70, 0xbd, 0x1a, 0x70, 0x4c, 0x03, 0xa7, 0x25, 0xe2, 0xc4, 0x4e, 0xb3, 0x90,
+	0x3a, 0x31, 0xc9, 0xf4, 0x8f, 0xe6, 0x53, 0xd5, 0x35, 0x3b, 0xe5, 0x7b, 0xe8, 0x01, 0x64, 0x1c,
+	0x97, 0xfb, 0x24, 0x88, 0x6a, 0x65, 0x3e, 0xe9, 0xdc, 0xb6, 0x38, 0xa1, 0x78, 0x45, 0x62, 0x71,
+	0x11, 0x87, 0xa2, 0xfc, 0x3f, 0x1a, 0x64, 0xcf, 0xcc, 0xa2, 0x8f, 0x4f, 0xc2, 0x09, 0xab, 0xc9,
+	0xd2, 0xed, 0x4b, 0xc2, 0x6d, 0xf8, 0x81, 0x17, 0x07, 0x43, 0x56, 0x54, 0x41, 0x29, 0xb9, 0xe3,
+	0x7a, 0x92, 0x54, 0xdc, 0xcd, 0x75, 0x25, 0xac, 0x1e, 0x74, 0x0f, 0xc6, 0x18, 0xa6, 0x3d, 0xdf,
+	0xc5, 0xf2, 0x72, 0x66, 0x4b, 0x37, 0x12, 0xdd, 0x42, 0x64, 0x5d, 0xb1, 0x63, 0x5a, 0x18, 0x71,
+	0x87, 0x35, 0xa3, 0xcb, 0x9b, 0x68, 0xb4, 0xed, 0xb0, 0xa6, 0x30, 0x12, 0x9c, 0x30, 0x0a, 0x30,
+	0xdf, 0x27, 0xb4, 0xa9, 0x5f, 0x19, 0x6c, 0x54, 0x0b, 0x11, 0x61, 0x14, 0xd1, 0x42, 0xe8, 0xb6,
+	0xba, 0x8c, 0x63, 0xaa, 0x67, 0x06, 0x0b, 0x57, 0x43, 0x44, 0x08, 0x23, 0x1a, 0x7d, 0x08, 0x19,
+	0x86, 0x5d, 0x8a, 0xb9, 0x3e, 0x26, 0x75, 0x46, 0xf2, 0xca, 0x04, 0xb1, 0x2e, 0x5a, 0x8a, 0x1c,
+	0xa1, 0xfb, 0x30, 0x4e, 0x31, 0x23, 0x5d, 0xea, 0x62, 0x7d, 0x5c, 0xea, 0x6e, 0x26, 0x5e, 0xc3,
+	0x88, 0x59, 0x57, 0xec, 0x13, 0x1e, 0x3d, 0x80, 0x09, 0xfc, 0x15, 0xc7, 0x01, 0x13, 0x87, 0x37,
+	0x21, 0xc5, 0xef, 0x26, 0x89, 0x2b, 0x31, 0xb4, 0xae, 0xd8, 0xa7, 0x0a, 0x91, 0xb0, 0x4b, 0x82,
+	0x3d, 0xbf, 0xa1, 0xc3, 0xe0, 0x84, 0x57, 0x25, 0x21, 0x12, 0x0e, 0x59, 0xa1, 0xea, 0x91, 0x56,
+	0xb7, 0x8d, 0xf5, 0xec, 0x60, 0xd5, 0x13, 0x49, 0x08, 0x55, 0xc8, 0x96, 0xc7, 0x21, 0xc3, 0x1d,
+	0xda, 0xc0, 0x7c, 0xf1, 0x6f, 0x15, 0xa6, 0xce, 0x55, 0x13, 0x7a, 0x0f, 0xc6, 0x76, 0x6a, 0x1b,
+	0xb5, 0xcd, 0xcf, 0x6a, 0xd3, 0x8a, 0x61, 0x1c, 0xbc, 0xc8, 0xcd, 0x9e, 0x23, 0x76, 0x82, 0x66,
+	0x40, 0xf6, 0x03, 0x54, 0x82, 0xeb, 0x5b, 0xdb, 0x9b, 0x76, 0x65, 0x77, 0x65, 0x75, 0xbb, 0xba,
+	0x59, 0xdb, 0x5d, 0xb5, 0x2b, 0x2b, 0xdb, 0x95, 0x69, 0xd5, 0x98, 0x3b, 0x78, 0x91, 0x9b, 0x39,
+	0x27, 0x5a, 0xa5, 0xd8, 0xe1, 0xf8, 0x82, 0x66, 0xe7, 0xf1, 0x9a, 0xd0, 0xa4, 0x12, 0x35, 0x3b,
+	0x1d, 0x2f, 0x49, 0x63, 0x57, 0x1e, 0x6d, 0x3e, 0xa9, 0x4c, 0xa7, 0x13, 0x35, 0xb6, 0x6c, 0xb2,
+	0xc6, 0x3b, 0xdf, 0xfe, 0x62, 0x2a, 0xbf, 0xfd, 0x6a, 0x9e, 0x5f, 0x5d, 0xe9, 0xe7, 0x34, 0x68,
+	0xe2, 0x5e, 0xa3, 0x03, 0x15, 0xd0, 0xc5, 0xe6, 0x86, 0x96, 0x93, 0x76, 0x70, 0x60, 0x4b, 0x35,
+	0xac, 0x51, 0xf1, 0xa8, 0x93, 0xcd, 0xfc, 0xfe, 0xf2, 0xaf, 0x1f, 0x53, 0x53, 0x70, 0x4d, 0xf2,
+	0xcb, 0x6d, 0x27, 0x70, 0x1a, 0x98, 0xa2, 0xef, 0x54, 0x78, 0xeb, 0x42, 0xfb, 0x43, 0x4b, 0xc9,
+	0x97, 0x3f, 0xb9, 0xe5, 0x1a, 0xcb, 0x23, 0xd2, 0x43, 0x33, 0x29, 0xa8, 0xe8, 0x1b, 0x98, 0xfc,
+	0x6f, 0xbb, 0x44, 0x0b, 0x83, 0x2e, 0xc1, 0x85, 0x86, 0x6c, 0x2c, 0x8e, 0x82, 0x0e, 0xcd, 0xa0,
+	0xf4, 0x87, 0x0a, 0x93, 0xa7, 0x0f, 0x1d, 0x7b, 0xe6, 0x77, 0xd0, 0xe7, 0xa0, 0x89, 0x67, 0x1c,
+	0x25, 0x36, 0xd7, 0x33, 0x7f, 0x02, 0x8c, 0xdc, 0x60, 0x60, 0xf8, 0x01, 0xb8, 0x70, 0x45, 0x3e,
+	0xa4, 0x28, 0x31, 0xc2, 0xd9, 0x77, 0xda, 0xb8, 0x35, 0x84, 0x18, 0x6a, 0x52, 0xbe, 0x73, 0xf8,
+	0xc6, 0x54, 0x5e, 0xbf, 0x31, 0x95, 0xe7, 0x7d, 0x53, 0x3d, 0xec, 0x9b, 0xea, 0xab, 0xbe, 0xa9,
+	0xfe, 0xd9, 0x37, 0xd5, 0xef, 0x8f, 0x4d, 0xe5, 0xd5, 0xb1, 0xa9, 0xbc, 0x3e, 0x36, 0x95, 0xa7,
+	0xe9, 0xa7, 0x5a, 0x3d, 0x23, 0xff, 0x93, 0xdd, 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x4a, 0x56,
+	0x23, 0xf6, 0xa8, 0x0a, 0x00, 0x00,
 }
 
 type authenticatedWrapperRaftServer struct {
@@ -1079,6 +1093,12 @@ func (m *StoreAction) CopyFrom(src interface{}) {
 			}
 			github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig())
 			m.Target = &v
+		case *StoreAction_Volume:
+			v := StoreAction_Volume{
+				Volume: &Volume{},
+			}
+			github_com_docker_swarmkit_api_deepcopy.Copy(v.Volume, o.GetVolume())
+			m.Target = &v
 		}
 	}
 
@@ -2030,6 +2050,27 @@ func (m *StoreAction_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	}
 	return len(dAtA) - i, nil
 }
+func (m *StoreAction_Volume) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StoreAction_Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.Volume != nil {
+		{
+			size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaft(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
+	}
+	return len(dAtA) - i, nil
+}
 func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
 	offset -= sovRaft(v)
 	base := offset
@@ -2673,6 +2714,18 @@ func (m *StoreAction_Config) Size() (n int) {
 	}
 	return n
 }
+func (m *StoreAction_Volume) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Volume != nil {
+		l = m.Volume.Size()
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	return n
+}
 
 func sovRaft(x uint64) (n int) {
 	return (math_bits.Len64(x|1) + 6) / 7
@@ -2914,6 +2967,16 @@ func (this *StoreAction_Config) String() string {
 	}, "")
 	return s
 }
+func (this *StoreAction_Volume) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StoreAction_Volume{`,
+		`Volume:` + strings.Replace(fmt.Sprintf("%v", this.Volume), "Volume", "Volume", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func valueToStringRaft(v interface{}) string {
 	rv := reflect.ValueOf(v)
 	if rv.IsNil() {
@@ -4374,6 +4437,41 @@ func (m *StoreAction) Unmarshal(dAtA []byte) error {
 			}
 			m.Target = &StoreAction_Config{v}
 			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &Volume{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Target = &StoreAction_Volume{v}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipRaft(dAtA[iNdEx:])

+ 2 - 1
vendor/github.com/docker/swarmkit/api/raft.proto

@@ -4,7 +4,7 @@ package docker.swarmkit.v1;
 
 import "github.com/docker/swarmkit/api/objects.proto";
 import "github.com/docker/swarmkit/api/types.proto";
-import "github.com/coreos/etcd/raft/raftpb/raft.proto";
+import "go.etcd.io/etcd/raft/v3/raftpb/raft.proto";
 import weak "gogoproto/gogo.proto";
 import weak "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
 
@@ -146,5 +146,6 @@ message StoreAction {
 		Resource resource = 8;
 		Extension extension = 9;
 		Config config = 10;
+		Volume volume = 11;
 	}
 }

+ 103 - 33
vendor/github.com/docker/swarmkit/api/snapshot.pb.go

@@ -59,6 +59,7 @@ type StoreSnapshot struct {
 	Resources  []*Resource  `protobuf:"bytes,7,rep,name=resources,proto3" json:"resources,omitempty"`
 	Extensions []*Extension `protobuf:"bytes,8,rep,name=extensions,proto3" json:"extensions,omitempty"`
 	Configs    []*Config    `protobuf:"bytes,9,rep,name=configs,proto3" json:"configs,omitempty"`
+	Volumes    []*Volume    `protobuf:"bytes,10,rep,name=volumes,proto3" json:"volumes,omitempty"`
 }
 
 func (m *StoreSnapshot) Reset()      { *m = StoreSnapshot{} }
@@ -181,39 +182,40 @@ func init() {
 }
 
 var fileDescriptor_c5cad0b62cecd9af = []byte{
-	// 507 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x6f, 0xd3, 0x30,
-	0x14, 0xc7, 0x93, 0xfe, 0x4a, 0xe7, 0x09, 0x18, 0x16, 0x07, 0xab, 0x0c, 0x53, 0xca, 0x0e, 0x45,
-	0x82, 0x14, 0x0a, 0x12, 0x08, 0x69, 0x1c, 0x3a, 0x71, 0xe0, 0xc0, 0x0e, 0x2e, 0x9a, 0xb8, 0xa6,
-	0xa9, 0xdb, 0x86, 0x90, 0xb8, 0xf2, 0x73, 0x3b, 0x8e, 0xf0, 0x1f, 0xf0, 0x67, 0xf5, 0xb8, 0xe3,
-	0x4e, 0x88, 0xb5, 0x07, 0xfe, 0x0d, 0x64, 0x3b, 0x09, 0x95, 0x48, 0xb7, 0x5b, 0x64, 0x7d, 0x3e,
-	0xef, 0x7d, 0xed, 0xbc, 0x87, 0x9e, 0x4d, 0x23, 0x35, 0x5b, 0x8c, 0xfc, 0x50, 0x24, 0xbd, 0xb1,
-	0x08, 0x63, 0x2e, 0x7b, 0x70, 0x1e, 0xc8, 0x24, 0x8e, 0x54, 0x2f, 0x98, 0x47, 0x3d, 0x48, 0x83,
-	0x39, 0xcc, 0x84, 0xf2, 0xe7, 0x52, 0x28, 0x81, 0xb1, 0x65, 0xfc, 0x9c, 0xf1, 0x97, 0x2f, 0x5a,
-	0x4f, 0x6f, 0x28, 0x21, 0x46, 0x5f, 0x78, 0xa8, 0xc0, 0x56, 0x68, 0x3d, 0xb9, 0x81, 0x96, 0xc1,
-	0x24, 0x6b, 0xd6, 0xba, 0x37, 0x15, 0x53, 0x61, 0x3e, 0x7b, 0xfa, 0xcb, 0x9e, 0x76, 0x7e, 0xd4,
-	0xd0, 0xad, 0xa1, 0x12, 0x92, 0x0f, 0xb3, 0x68, 0xd8, 0x47, 0xf5, 0x54, 0x8c, 0x39, 0x10, 0xb7,
-	0x5d, 0xed, 0xee, 0xf7, 0x89, 0xff, 0x7f, 0x48, 0xff, 0x54, 0x8c, 0x39, 0xb3, 0x18, 0x7e, 0x8d,
-	0x9a, 0xc0, 0xe5, 0x32, 0x0a, 0x39, 0x90, 0x8a, 0x51, 0xee, 0x97, 0x29, 0x43, 0xcb, 0xb0, 0x02,
-	0xd6, 0x62, 0xca, 0xd5, 0xb9, 0x90, 0x31, 0x90, 0xea, 0x6e, 0xf1, 0xd4, 0x32, 0xac, 0x80, 0x75,
-	0x42, 0x15, 0x40, 0x0c, 0xa4, 0xb6, 0x3b, 0xe1, 0xa7, 0x00, 0x62, 0x66, 0x31, 0xdd, 0x28, 0xfc,
-	0xba, 0x00, 0xc5, 0x25, 0x90, 0xfa, 0xee, 0x46, 0x27, 0x96, 0x61, 0x05, 0x8c, 0x5f, 0x21, 0x0f,
-	0x78, 0x28, 0xb9, 0x02, 0xd2, 0x30, 0x5e, 0xab, 0xfc, 0x66, 0x1a, 0x61, 0x39, 0x8a, 0xdf, 0xa2,
-	0x3d, 0xc9, 0x41, 0x2c, 0xa4, 0x7e, 0x11, 0xcf, 0x78, 0x87, 0x65, 0x1e, 0xcb, 0x20, 0xf6, 0x0f,
-	0xc7, 0xc7, 0x08, 0xf1, 0x6f, 0x8a, 0xa7, 0x10, 0x89, 0x14, 0x48, 0xd3, 0xc8, 0x0f, 0xca, 0xe4,
-	0xf7, 0x39, 0xc5, 0xb6, 0x04, 0x1d, 0x38, 0x14, 0xe9, 0x24, 0x9a, 0x02, 0xd9, 0xdb, 0x1d, 0xf8,
-	0xc4, 0x20, 0x2c, 0x47, 0x3b, 0x11, 0xba, 0x93, 0xdd, 0xbd, 0x18, 0x82, 0x37, 0xc8, 0x4b, 0x78,
-	0x32, 0xd2, 0x2f, 0x66, 0xc7, 0x80, 0x96, 0xde, 0x20, 0x98, 0xa8, 0x8f, 0x06, 0x63, 0x39, 0x8e,
-	0x0f, 0x91, 0x27, 0x79, 0x22, 0x96, 0x7c, 0x6c, 0xa6, 0xa1, 0x36, 0xa8, 0x1c, 0x38, 0x2c, 0x3f,
-	0xea, 0xfc, 0x71, 0x51, 0xb3, 0x68, 0xf2, 0x0e, 0x79, 0x4b, 0x2e, 0x75, 0x72, 0xe2, 0xb6, 0xdd,
-	0xee, 0xed, 0xfe, 0x51, 0xe9, 0xf3, 0xe6, 0x3b, 0x73, 0x66, 0x59, 0x96, 0x4b, 0xf8, 0x03, 0x42,
-	0x59, 0xd7, 0x59, 0x34, 0x27, 0x95, 0xb6, 0xdb, 0xdd, 0xef, 0x3f, 0xbe, 0xe6, 0xcf, 0xe6, 0x95,
-	0x06, 0xb5, 0xd5, 0xaf, 0x87, 0x0e, 0xdb, 0x92, 0xf1, 0x31, 0xaa, 0x83, 0xde, 0x02, 0x52, 0x35,
-	0x55, 0x1e, 0x95, 0x06, 0xd9, 0x5e, 0x93, 0xac, 0x86, 0xb5, 0x3a, 0x77, 0x91, 0x97, 0xa5, 0xc3,
-	0x0d, 0x54, 0x39, 0x7b, 0x7e, 0xe0, 0x0c, 0x8e, 0x56, 0x57, 0xd4, 0xb9, 0xbc, 0xa2, 0xce, 0xf7,
-	0x35, 0x75, 0x57, 0x6b, 0xea, 0x5e, 0xac, 0xa9, 0xfb, 0x7b, 0x4d, 0xdd, 0x9f, 0x1b, 0xea, 0x5c,
-	0x6c, 0xa8, 0x73, 0xb9, 0xa1, 0xce, 0xe7, 0xca, 0xa8, 0x61, 0xf6, 0xf0, 0xe5, 0xdf, 0x00, 0x00,
-	0x00, 0xff, 0xff, 0x97, 0x4e, 0xfd, 0x2a, 0x3b, 0x04, 0x00, 0x00,
+	// 523 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0x41, 0x8f, 0x12, 0x31,
+	0x14, 0xc7, 0x67, 0x58, 0x60, 0xd8, 0x6e, 0xd4, 0xb5, 0xf1, 0xd0, 0xe0, 0x5a, 0x11, 0xf7, 0x80,
+	0x89, 0x0e, 0x8a, 0x26, 0x1a, 0x93, 0xf5, 0xc0, 0xc6, 0x83, 0x07, 0xf7, 0x50, 0x0c, 0xf1, 0x3a,
+	0x0c, 0x05, 0x46, 0x76, 0xa6, 0xa4, 0xaf, 0xb0, 0x1e, 0xfd, 0x08, 0x7e, 0x1b, 0xbf, 0x02, 0xc7,
+	0x3d, 0xee, 0xc9, 0xb8, 0x70, 0xf0, 0x6b, 0x98, 0xb6, 0x53, 0x24, 0x71, 0x70, 0x6f, 0x93, 0xe6,
+	0xf7, 0x7b, 0xef, 0xdf, 0xce, 0x7b, 0xe8, 0xd9, 0x38, 0x51, 0x93, 0xf9, 0x20, 0x8c, 0x45, 0xda,
+	0x1e, 0x8a, 0x78, 0xca, 0x65, 0x1b, 0x2e, 0x22, 0x99, 0x4e, 0x13, 0xd5, 0x8e, 0x66, 0x49, 0x1b,
+	0xb2, 0x68, 0x06, 0x13, 0xa1, 0xc2, 0x99, 0x14, 0x4a, 0x60, 0x6c, 0x99, 0xd0, 0x31, 0xe1, 0xe2,
+	0x45, 0xfd, 0xe9, 0x0d, 0x25, 0xc4, 0xe0, 0x0b, 0x8f, 0x15, 0xd8, 0x0a, 0xf5, 0x27, 0x37, 0xd0,
+	0x32, 0x1a, 0xe5, 0xcd, 0xea, 0xf7, 0xc6, 0x62, 0x2c, 0xcc, 0x67, 0x5b, 0x7f, 0xd9, 0xd3, 0xe6,
+	0x8f, 0x32, 0xba, 0xd5, 0x53, 0x42, 0xf2, 0x5e, 0x1e, 0x0d, 0x87, 0xa8, 0x92, 0x89, 0x21, 0x07,
+	0xe2, 0x37, 0xf6, 0x5a, 0x07, 0x1d, 0x12, 0xfe, 0x1b, 0x32, 0x3c, 0x13, 0x43, 0xce, 0x2c, 0x86,
+	0x5f, 0xa3, 0x1a, 0x70, 0xb9, 0x48, 0x62, 0x0e, 0xa4, 0x64, 0x94, 0xfb, 0x45, 0x4a, 0xcf, 0x32,
+	0x6c, 0x03, 0x6b, 0x31, 0xe3, 0xea, 0x42, 0xc8, 0x29, 0x90, 0xbd, 0xdd, 0xe2, 0x99, 0x65, 0xd8,
+	0x06, 0xd6, 0x09, 0x55, 0x04, 0x53, 0x20, 0xe5, 0xdd, 0x09, 0x3f, 0x45, 0x30, 0x65, 0x16, 0xd3,
+	0x8d, 0xe2, 0xf3, 0x39, 0x28, 0x2e, 0x81, 0x54, 0x76, 0x37, 0x3a, 0xb5, 0x0c, 0xdb, 0xc0, 0xf8,
+	0x15, 0x0a, 0x80, 0xc7, 0x92, 0x2b, 0x20, 0x55, 0xe3, 0xd5, 0x8b, 0x6f, 0xa6, 0x11, 0xe6, 0x50,
+	0xfc, 0x16, 0xed, 0x4b, 0x0e, 0x62, 0x2e, 0xf5, 0x8b, 0x04, 0xc6, 0x3b, 0x2a, 0xf2, 0x58, 0x0e,
+	0xb1, 0xbf, 0x38, 0x3e, 0x41, 0x88, 0x7f, 0x55, 0x3c, 0x83, 0x44, 0x64, 0x40, 0x6a, 0x46, 0x7e,
+	0x50, 0x24, 0xbf, 0x77, 0x14, 0xdb, 0x12, 0x74, 0xe0, 0x58, 0x64, 0xa3, 0x64, 0x0c, 0x64, 0x7f,
+	0x77, 0xe0, 0x53, 0x83, 0x30, 0x87, 0x6a, 0x6b, 0x21, 0xce, 0xe7, 0x29, 0x07, 0x82, 0x76, 0x5b,
+	0x7d, 0x83, 0x30, 0x87, 0x36, 0x13, 0x74, 0x27, 0x7f, 0xb1, 0xcd, 0xe8, 0xbc, 0x41, 0x41, 0xca,
+	0xd3, 0x81, 0x7e, 0x67, 0x3b, 0x3c, 0xb4, 0xf0, 0xde, 0xd1, 0x48, 0x7d, 0x34, 0x18, 0x73, 0x38,
+	0x3e, 0x42, 0x81, 0xe4, 0xa9, 0x58, 0xf0, 0xa1, 0x99, 0xa1, 0x72, 0xb7, 0x74, 0xe8, 0x31, 0x77,
+	0xd4, 0xfc, 0xed, 0xa3, 0xda, 0xa6, 0xc9, 0x3b, 0x14, 0x2c, 0xb8, 0xd4, 0xf7, 0x25, 0x7e, 0xc3,
+	0x6f, 0xdd, 0xee, 0x1c, 0x17, 0xfe, 0x14, 0xb7, 0x69, 0x7d, 0xcb, 0x32, 0x27, 0xe1, 0x0f, 0x08,
+	0xe5, 0x5d, 0x27, 0xc9, 0x8c, 0x94, 0x1a, 0x7e, 0xeb, 0xa0, 0xf3, 0xf8, 0x3f, 0xf3, 0xe0, 0x2a,
+	0x75, 0xcb, 0xcb, 0x9f, 0x0f, 0x3d, 0xb6, 0x25, 0xe3, 0x13, 0x54, 0x01, 0xbd, 0x3b, 0x64, 0xcf,
+	0x54, 0x79, 0x54, 0x18, 0x64, 0x7b, 0xb9, 0xf2, 0x1a, 0xd6, 0x6a, 0xde, 0x45, 0x41, 0x9e, 0x0e,
+	0x57, 0x51, 0xa9, 0xff, 0xfc, 0xd0, 0xeb, 0x1e, 0x2f, 0xaf, 0xa9, 0x77, 0x75, 0x4d, 0xbd, 0x6f,
+	0x2b, 0xea, 0x2f, 0x57, 0xd4, 0xbf, 0x5c, 0x51, 0xff, 0xd7, 0x8a, 0xfa, 0xdf, 0xd7, 0xd4, 0xbb,
+	0x5c, 0x53, 0xef, 0x6a, 0x4d, 0xbd, 0xcf, 0xa5, 0x41, 0xd5, 0x6c, 0xef, 0xcb, 0x3f, 0x01, 0x00,
+	0x00, 0xff, 0xff, 0x27, 0xb3, 0xad, 0x75, 0x71, 0x04, 0x00, 0x00,
 }
 
 func (m *StoreSnapshot) Copy() *StoreSnapshot {
@@ -301,6 +303,14 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) {
 		}
 	}
 
+	if o.Volumes != nil {
+		m.Volumes = make([]*Volume, len(o.Volumes))
+		for i := range m.Volumes {
+			m.Volumes[i] = &Volume{}
+			github_com_docker_swarmkit_api_deepcopy.Copy(m.Volumes[i], o.Volumes[i])
+		}
+	}
+
 }
 
 func (m *ClusterSnapshot) Copy() *ClusterSnapshot {
@@ -368,6 +378,20 @@ func (m *StoreSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if len(m.Volumes) > 0 {
+		for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintSnapshot(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x52
+		}
+	}
 	if len(m.Configs) > 0 {
 		for iNdEx := len(m.Configs) - 1; iNdEx >= 0; iNdEx-- {
 			{
@@ -660,6 +684,12 @@ func (m *StoreSnapshot) Size() (n int) {
 			n += 1 + l + sovSnapshot(uint64(l))
 		}
 	}
+	if len(m.Volumes) > 0 {
+		for _, e := range m.Volumes {
+			l = e.Size()
+			n += 1 + l + sovSnapshot(uint64(l))
+		}
+	}
 	return n
 }
 
@@ -754,6 +784,11 @@ func (this *StoreSnapshot) String() string {
 		repeatedStringForConfigs += strings.Replace(fmt.Sprintf("%v", f), "Config", "Config", 1) + ","
 	}
 	repeatedStringForConfigs += "}"
+	repeatedStringForVolumes := "[]*Volume{"
+	for _, f := range this.Volumes {
+		repeatedStringForVolumes += strings.Replace(fmt.Sprintf("%v", f), "Volume", "Volume", 1) + ","
+	}
+	repeatedStringForVolumes += "}"
 	s := strings.Join([]string{`&StoreSnapshot{`,
 		`Nodes:` + repeatedStringForNodes + `,`,
 		`Services:` + repeatedStringForServices + `,`,
@@ -764,6 +799,7 @@ func (this *StoreSnapshot) String() string {
 		`Resources:` + repeatedStringForResources + `,`,
 		`Extensions:` + repeatedStringForExtensions + `,`,
 		`Configs:` + repeatedStringForConfigs + `,`,
+		`Volumes:` + repeatedStringForVolumes + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1139,6 +1175,40 @@ func (m *StoreSnapshot) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshot
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshot
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSnapshot
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Volumes = append(m.Volumes, &Volume{})
+			if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipSnapshot(dAtA[iNdEx:])

+ 1 - 0
vendor/github.com/docker/swarmkit/api/snapshot.proto

@@ -23,6 +23,7 @@ message StoreSnapshot {
 	repeated Resource resources = 7;
 	repeated Extension extensions = 8;
 	repeated Config configs = 9;
+	repeated Volume volumes = 10;
 }
 
 // ClusterSnapshot stores cluster membership information in snapshots.

+ 794 - 149
vendor/github.com/docker/swarmkit/api/specs.pb.go

@@ -155,6 +155,40 @@ func (EndpointSpec_ResolutionMode) EnumDescriptor() ([]byte, []int) {
 	return fileDescriptor_6589acc608f7d4fd, []int{11, 0}
 }
 
+type VolumeSpec_VolumeAvailability int32
+
+const (
+	// Active allows a volume to be used and scheduled to. This is the
+	// default state.
+	VolumeAvailabilityActive VolumeSpec_VolumeAvailability = 0
+	// Pause prevents volumes from having new workloads scheduled to use
+	// them, even if they're already published on a Node.
+	VolumeAvailabilityPause VolumeSpec_VolumeAvailability = 1
+	// Drain causes existing workloads using this volume to be rescheduled,
+	// causing the volume to be unpublished and removed from nodes.
+	VolumeAvailabilityDrain VolumeSpec_VolumeAvailability = 2
+)
+
+var VolumeSpec_VolumeAvailability_name = map[int32]string{
+	0: "ACTIVE",
+	1: "PAUSE",
+	2: "DRAIN",
+}
+
+var VolumeSpec_VolumeAvailability_value = map[string]int32{
+	"ACTIVE": 0,
+	"PAUSE":  1,
+	"DRAIN":  2,
+}
+
+func (x VolumeSpec_VolumeAvailability) String() string {
+	return proto.EnumName(VolumeSpec_VolumeAvailability_name, int32(x))
+}
+
+func (VolumeSpec_VolumeAvailability) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_6589acc608f7d4fd, []int{16, 0}
+}
+
 type NodeSpec struct {
 	Annotations Annotations `protobuf:"bytes,1,opt,name=annotations,proto3" json:"annotations"`
 	// DesiredRole defines the role the node should have.
@@ -1283,11 +1317,89 @@ func (m *ConfigSpec) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_ConfigSpec proto.InternalMessageInfo
 
+type VolumeSpec struct {
+	// Annotations includes the name and labels of a volume. The name used in the
+	// spec's Annotations will be passed to the Plugin as the "Name" in the
+	// CreateVolume request.
+	Annotations Annotations `protobuf:"bytes,1,opt,name=annotations,proto3" json:"annotations"`
+	// Group defines the volume group this particular volume belongs to. When
+	// requesting volumes for a workload, the group name can be used instead of
+	// the volume's name, which tells swarmkit to pick one from the many volumes
+	// belonging to that group.
+	Group string `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"`
+	// Driver represents the CSI Plugin object and its configuration parameters.
+	// The "options" field of the Driver object is passed in the CSI
+	// CreateVolumeRequest as the "parameters" field. The Driver must be
+	// specified; there is no default CSI Plugin.
+	Driver *Driver `protobuf:"bytes,3,opt,name=driver,proto3" json:"driver,omitempty"`
+	// AccessMode is similar to, and used to determine, the volume access mode as
+	// defined in the CSI spec, as well as the volume type (block vs mount). In
+	// this way, it is more similar to the VolumeCapability message in the CSI
+	// spec.
+	AccessMode *VolumeAccessMode `protobuf:"bytes,4,opt,name=access_mode,json=accessMode,proto3" json:"access_mode,omitempty"`
+	// Secrets represents a set of key/value pairs to pass to the CSI plugin. The
+	// keys of the secrets can be anything, but the values refer to swarmkit
+	// Secret objects. See the "Secrets Requirements" section of the CSI Plugin
+	// Spec for more information.
+	Secrets []*VolumeSecret `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty"`
+	// AccessibilityRequirements specifies where a volume must be accessible
+	// from.
+	//
+	// This field must be empty if the plugin does not support
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the
+	// plugin does not support it, volume will not be created.
+	//
+	// If AccessibilityRequirements is empty, but the plugin does support
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire
+	// cluster is a valid target for the volume.
+	AccessibilityRequirements *TopologyRequirement `protobuf:"bytes,6,opt,name=AccessibilityRequirements,proto3" json:"AccessibilityRequirements,omitempty"`
+	// CapacityRange is the capacity this volume should be created with. If nil,
+	// the plugin will decide the capacity.
+	CapacityRange *CapacityRange `protobuf:"bytes,7,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"`
+	// Availability is the Volume's desired availability. Analogous to Node
+	// Availability, this allows the user to take volumes offline in order to
+	// update or delete them.
+	Availability VolumeSpec_VolumeAvailability `protobuf:"varint,8,opt,name=availability,proto3,enum=docker.swarmkit.v1.VolumeSpec_VolumeAvailability" json:"availability,omitempty"`
+}
+
+func (m *VolumeSpec) Reset()      { *m = VolumeSpec{} }
+func (*VolumeSpec) ProtoMessage() {}
+func (*VolumeSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6589acc608f7d4fd, []int{16}
+}
+func (m *VolumeSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *VolumeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_VolumeSpec.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *VolumeSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeSpec.Merge(m, src)
+}
+func (m *VolumeSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *VolumeSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeSpec proto.InternalMessageInfo
+
 func init() {
 	proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Membership", NodeSpec_Membership_name, NodeSpec_Membership_value)
 	proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Availability", NodeSpec_Availability_name, NodeSpec_Availability_value)
 	proto.RegisterEnum("docker.swarmkit.v1.ContainerSpec_Isolation", ContainerSpec_Isolation_name, ContainerSpec_Isolation_value)
 	proto.RegisterEnum("docker.swarmkit.v1.EndpointSpec_ResolutionMode", EndpointSpec_ResolutionMode_name, EndpointSpec_ResolutionMode_value)
+	proto.RegisterEnum("docker.swarmkit.v1.VolumeSpec_VolumeAvailability", VolumeSpec_VolumeAvailability_name, VolumeSpec_VolumeAvailability_value)
 	proto.RegisterType((*NodeSpec)(nil), "docker.swarmkit.v1.NodeSpec")
 	proto.RegisterType((*ServiceSpec)(nil), "docker.swarmkit.v1.ServiceSpec")
 	proto.RegisterType((*ReplicatedService)(nil), "docker.swarmkit.v1.ReplicatedService")
@@ -1309,6 +1421,7 @@ func init() {
 	proto.RegisterType((*ClusterSpec)(nil), "docker.swarmkit.v1.ClusterSpec")
 	proto.RegisterType((*SecretSpec)(nil), "docker.swarmkit.v1.SecretSpec")
 	proto.RegisterType((*ConfigSpec)(nil), "docker.swarmkit.v1.ConfigSpec")
+	proto.RegisterType((*VolumeSpec)(nil), "docker.swarmkit.v1.VolumeSpec")
 }
 
 func init() {
@@ -1316,155 +1429,166 @@ func init() {
 }
 
 var fileDescriptor_6589acc608f7d4fd = []byte{
-	// 2363 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4d, 0x73, 0x1b, 0xc7,
-	0xd1, 0x06, 0x48, 0x10, 0x1f, 0xbd, 0x00, 0x05, 0x8e, 0x65, 0x7b, 0x09, 0x49, 0x20, 0x0c, 0xcb,
-	0x36, 0x6d, 0xd7, 0x0b, 0xd6, 0xcb, 0xb8, 0x1c, 0x7f, 0xc4, 0x49, 0x00, 0x02, 0x96, 0x60, 0x49,
-	0x14, 0x6a, 0x40, 0x29, 0x51, 0x55, 0xaa, 0x50, 0x83, 0xdd, 0x21, 0xb0, 0xe1, 0x62, 0x67, 0x33,
-	0x3b, 0xa0, 0x8d, 0x5b, 0x8e, 0x2e, 0xe5, 0x92, 0x3f, 0xc0, 0x53, 0x2a, 0xa7, 0x5c, 0x92, 0x7f,
-	0x90, 0xa3, 0x8f, 0x3e, 0x3a, 0x17, 0x56, 0x4c, 0xff, 0x84, 0xdc, 0x72, 0x49, 0x6a, 0x66, 0x67,
-	0x17, 0x0b, 0x0a, 0x10, 0x95, 0x8a, 0x0e, 0xb9, 0xcd, 0xf4, 0x3e, 0x4f, 0x4f, 0xcf, 0x4c, 0x77,
-	0x4f, 0xf7, 0xc2, 0x7b, 0x23, 0x47, 0x8c, 0xa7, 0xc3, 0x86, 0xc5, 0x26, 0x7b, 0x36, 0xb3, 0x4e,
-	0x28, 0xdf, 0x0b, 0xbe, 0x24, 0x7c, 0x72, 0xe2, 0x88, 0x3d, 0xe2, 0x3b, 0x7b, 0x81, 0x4f, 0xad,
-	0xa0, 0xe1, 0x73, 0x26, 0x18, 0x42, 0x21, 0xa0, 0x11, 0x01, 0x1a, 0xa7, 0xff, 0x5f, 0xb9, 0x8a,
-	0x2f, 0x66, 0x3e, 0xd5, 0xfc, 0xca, 0xf5, 0x11, 0x1b, 0x31, 0x35, 0xdc, 0x93, 0x23, 0x2d, 0xad,
-	0x8e, 0x18, 0x1b, 0xb9, 0x74, 0x4f, 0xcd, 0x86, 0xd3, 0xe3, 0x3d, 0x7b, 0xca, 0x89, 0x70, 0x98,
-	0xa7, 0xbf, 0x6f, 0x5f, 0xfe, 0x4e, 0xbc, 0xd9, 0x2a, 0xea, 0x97, 0x9c, 0xf8, 0x3e, 0xe5, 0x7a,
-	0xc1, 0xfa, 0x59, 0x06, 0xf2, 0x87, 0xcc, 0xa6, 0x7d, 0x9f, 0x5a, 0xe8, 0x0e, 0x18, 0xc4, 0xf3,
-	0x98, 0x50, 0xba, 0x03, 0x33, 0x5d, 0x4b, 0xef, 0x1a, 0xfb, 0x3b, 0x8d, 0x67, 0xf7, 0xd4, 0x68,
-	0xce, 0x61, 0xad, 0xcc, 0x37, 0xe7, 0x3b, 0x29, 0x9c, 0x64, 0xa2, 0x9f, 0x41, 0xd1, 0xa6, 0x81,
-	0xc3, 0xa9, 0x3d, 0xe0, 0xcc, 0xa5, 0xe6, 0x5a, 0x2d, 0xbd, 0xbb, 0xb9, 0x7f, 0x73, 0x99, 0x26,
-	0xb9, 0x38, 0x66, 0x2e, 0xc5, 0x86, 0x66, 0xc8, 0x09, 0xba, 0x03, 0x30, 0xa1, 0x93, 0x21, 0xe5,
-	0xc1, 0xd8, 0xf1, 0xcd, 0x75, 0x45, 0x7f, 0x67, 0x15, 0x5d, 0xda, 0xde, 0x78, 0x10, 0xc3, 0x71,
-	0x82, 0x8a, 0x1e, 0x40, 0x91, 0x9c, 0x12, 0xc7, 0x25, 0x43, 0xc7, 0x75, 0xc4, 0xcc, 0xcc, 0x28,
-	0x55, 0xef, 0x3e, 0x57, 0x55, 0x33, 0x41, 0xc0, 0x0b, 0xf4, 0xba, 0x0d, 0x30, 0x5f, 0x08, 0xbd,
-	0x0d, 0xb9, 0x5e, 0xe7, 0xb0, 0xdd, 0x3d, 0xbc, 0x53, 0x4e, 0x55, 0xb6, 0x9f, 0x9e, 0xd5, 0x5e,
-	0x95, 0x3a, 0xe6, 0x80, 0x1e, 0xf5, 0x6c, 0xc7, 0x1b, 0xa1, 0x5d, 0xc8, 0x37, 0x0f, 0x0e, 0x3a,
-	0xbd, 0xa3, 0x4e, 0xbb, 0x9c, 0xae, 0x54, 0x9e, 0x9e, 0xd5, 0x5e, 0x5b, 0x04, 0x36, 0x2d, 0x8b,
-	0xfa, 0x82, 0xda, 0x95, 0xcc, 0xd7, 0x7f, 0xa8, 0xa6, 0xea, 0x5f, 0xa7, 0xa1, 0x98, 0x34, 0x02,
-	0xbd, 0x0d, 0xd9, 0xe6, 0xc1, 0x51, 0xf7, 0x71, 0xa7, 0x9c, 0x9a, 0xd3, 0x93, 0x88, 0xa6, 0x25,
-	0x9c, 0x53, 0x8a, 0x6e, 0xc3, 0x46, 0xaf, 0xf9, 0xa8, 0xdf, 0x29, 0xa7, 0xe7, 0xe6, 0x24, 0x61,
-	0x3d, 0x32, 0x0d, 0x14, 0xaa, 0x8d, 0x9b, 0xdd, 0xc3, 0xf2, 0xda, 0x72, 0x54, 0x9b, 0x13, 0xc7,
-	0xd3, 0xa6, 0xfc, 0x69, 0x03, 0x8c, 0x3e, 0xe5, 0xa7, 0x8e, 0xf5, 0x92, 0x5d, 0xe4, 0x43, 0xc8,
-	0x08, 0x12, 0x9c, 0x28, 0xd7, 0x30, 0x96, 0xbb, 0xc6, 0x11, 0x09, 0x4e, 0xe4, 0xa2, 0x9a, 0xae,
-	0xf0, 0xd2, 0x33, 0x38, 0xf5, 0x5d, 0xc7, 0x22, 0x82, 0xda, 0xca, 0x33, 0x8c, 0xfd, 0xb7, 0x96,
-	0xb1, 0x71, 0x8c, 0xd2, 0xf6, 0xdf, 0x4d, 0xe1, 0x04, 0x15, 0x7d, 0x0a, 0xd9, 0x91, 0xcb, 0x86,
-	0xc4, 0x55, 0x3e, 0x61, 0xec, 0xbf, 0xb1, 0x4c, 0xc9, 0x1d, 0x85, 0x98, 0x2b, 0xd0, 0x14, 0xf4,
-	0x05, 0x6c, 0xce, 0x55, 0x0d, 0x7e, 0xcd, 0x86, 0x26, 0xac, 0x56, 0x32, 0xb7, 0xe4, 0x0b, 0x36,
-	0xbc, 0x9b, 0xc2, 0x25, 0x9e, 0x14, 0xa0, 0x9f, 0x02, 0x84, 0x5a, 0x95, 0x1e, 0x43, 0xe9, 0xb9,
-	0xb5, 0xda, 0x98, 0x50, 0x47, 0x61, 0x14, 0x4d, 0xd0, 0x47, 0x90, 0x9d, 0xfa, 0x36, 0x11, 0xd4,
-	0xcc, 0x2a, 0x6e, 0x6d, 0x19, 0xf7, 0x91, 0x42, 0x1c, 0x30, 0xef, 0xd8, 0x19, 0x61, 0x8d, 0x47,
-	0x3f, 0x81, 0x3c, 0x67, 0xae, 0x3b, 0x24, 0xd6, 0x89, 0x59, 0x78, 0x41, 0x6e, 0xcc, 0x40, 0xf7,
-	0x20, 0xef, 0x51, 0xf1, 0x25, 0xe3, 0x27, 0x81, 0x99, 0xab, 0xad, 0xef, 0x1a, 0xfb, 0xef, 0x2f,
-	0x0d, 0xab, 0x10, 0xd3, 0x14, 0x82, 0x58, 0xe3, 0x09, 0xf5, 0x44, 0xa8, 0xa8, 0xb5, 0x66, 0xa6,
-	0x71, 0xac, 0x40, 0x9a, 0x42, 0x3d, 0xdb, 0x67, 0x8e, 0x27, 0xcc, 0xfc, 0x6a, 0x53, 0x3a, 0x1a,
-	0x23, 0xdd, 0x02, 0xc7, 0x8c, 0x56, 0x16, 0x32, 0x13, 0x66, 0xd3, 0xfa, 0x1e, 0x6c, 0x3d, 0x73,
-	0xed, 0xa8, 0x02, 0x79, 0x7d, 0xe0, 0xa1, 0xbf, 0x66, 0x70, 0x3c, 0xaf, 0x5f, 0x83, 0xd2, 0xc2,
-	0x15, 0xd7, 0x2d, 0x28, 0x2d, 0x5c, 0x17, 0x7a, 0x0b, 0x36, 0x27, 0xe4, 0xab, 0x81, 0xc5, 0x3c,
-	0x6b, 0xca, 0x39, 0xf5, 0x84, 0xd6, 0x51, 0x9a, 0x90, 0xaf, 0x0e, 0x62, 0x21, 0x7a, 0x1f, 0xb6,
-	0x04, 0x13, 0xc4, 0x1d, 0x58, 0x6c, 0xe2, 0xbb, 0x34, 0x8c, 0x8e, 0x35, 0x85, 0x2c, 0xab, 0x0f,
-	0x07, 0x73, 0x79, 0xdd, 0x80, 0x42, 0x7c, 0x97, 0xf5, 0x3f, 0x6f, 0x40, 0x3e, 0xf2, 0x74, 0x74,
-	0x0f, 0x80, 0xc4, 0x07, 0xa5, 0x0f, 0xe2, 0xdd, 0x17, 0x3a, 0x55, 0x49, 0x97, 0x1e, 0x3e, 0xa7,
-	0xa3, 0x26, 0x14, 0x2c, 0xe6, 0x09, 0xe2, 0x78, 0x94, 0xeb, 0x48, 0x5d, 0xea, 0x9f, 0x07, 0x11,
-	0x48, 0xeb, 0x98, 0xb3, 0x50, 0x0b, 0x72, 0x23, 0xea, 0x51, 0xee, 0x58, 0xda, 0xc1, 0xdf, 0x5e,
-	0xea, 0x98, 0x21, 0x04, 0x4f, 0x3d, 0xe1, 0x4c, 0xa8, 0xd6, 0x12, 0x11, 0xd1, 0xe7, 0x50, 0xe0,
-	0x34, 0x60, 0x53, 0x6e, 0xd1, 0x40, 0x87, 0xfb, 0xee, 0xf2, 0x30, 0x09, 0x41, 0x98, 0xfe, 0x66,
-	0xea, 0x70, 0x2a, 0xb7, 0x10, 0xe0, 0x39, 0x15, 0x7d, 0x0a, 0x39, 0x4e, 0x03, 0x41, 0xb8, 0x78,
-	0x5e, 0xc4, 0xe2, 0x10, 0xd2, 0x63, 0xae, 0x63, 0xcd, 0x70, 0xc4, 0x40, 0x9f, 0x42, 0xc1, 0x77,
-	0x89, 0xa5, 0xb4, 0x9a, 0x1b, 0xab, 0x63, 0xac, 0x17, 0x81, 0xf0, 0x1c, 0x8f, 0x3e, 0x06, 0x70,
-	0xd9, 0x68, 0x60, 0x73, 0xe7, 0x94, 0x72, 0x1d, 0x65, 0x95, 0x65, 0xec, 0xb6, 0x42, 0xe0, 0x82,
-	0xcb, 0x46, 0xe1, 0x10, 0xdd, 0xf9, 0xaf, 0x82, 0x24, 0x11, 0x20, 0x6f, 0x40, 0xf1, 0x98, 0x71,
-	0x8b, 0x0e, 0x74, 0xac, 0x17, 0x94, 0x6f, 0x19, 0x4a, 0x16, 0x06, 0x28, 0xfa, 0x15, 0xbc, 0x12,
-	0x9d, 0xd6, 0x80, 0xd3, 0x63, 0xca, 0xa9, 0x27, 0x8f, 0xdc, 0x50, 0xcb, 0xbe, 0xf5, 0xfc, 0x23,
-	0xd7, 0x68, 0x9d, 0x6a, 0x11, 0xbf, 0xfc, 0x21, 0x68, 0x15, 0x20, 0xc7, 0xc3, 0x0b, 0xae, 0xff,
-	0x2e, 0x2d, 0xe3, 0xec, 0x12, 0x02, 0xed, 0x81, 0x11, 0x2f, 0xef, 0xd8, 0xca, 0xe1, 0x0a, 0xad,
-	0xcd, 0x8b, 0xf3, 0x1d, 0x88, 0xb0, 0xdd, 0xb6, 0xcc, 0xc0, 0x7a, 0x6c, 0xa3, 0x0e, 0x94, 0x62,
-	0x82, 0x2c, 0x82, 0x74, 0x99, 0x50, 0x7b, 0x9e, 0xa5, 0x47, 0x33, 0x9f, 0xe2, 0x22, 0x4f, 0xcc,
-	0xea, 0xbf, 0x04, 0xf4, 0xac, 0x03, 0x22, 0x04, 0x99, 0x13, 0xc7, 0xd3, 0x66, 0x60, 0x35, 0x46,
-	0x0d, 0xc8, 0xf9, 0x64, 0xe6, 0x32, 0x62, 0x6b, 0x3f, 0xbc, 0xde, 0x08, 0xcb, 0xa3, 0x46, 0x54,
-	0x1e, 0x35, 0x9a, 0xde, 0x0c, 0x47, 0xa0, 0xfa, 0x3d, 0x78, 0x75, 0x69, 0x9c, 0xa1, 0x7d, 0x28,
-	0xc6, 0x31, 0x32, 0xdf, 0xeb, 0xb5, 0x8b, 0xf3, 0x1d, 0x23, 0x0e, 0xa6, 0x6e, 0x1b, 0x1b, 0x31,
-	0xa8, 0x6b, 0xd7, 0xff, 0xba, 0x09, 0xa5, 0x85, 0x48, 0x43, 0xd7, 0x61, 0xc3, 0x99, 0x90, 0x11,
-	0xd5, 0x36, 0x86, 0x13, 0xd4, 0x81, 0xac, 0x4b, 0x86, 0xd4, 0x95, 0xb1, 0x22, 0x2f, 0xee, 0xff,
-	0xae, 0x0c, 0xd9, 0xc6, 0x7d, 0x85, 0xef, 0x78, 0x82, 0xcf, 0xb0, 0x26, 0x23, 0x13, 0x72, 0x16,
-	0x9b, 0x4c, 0x88, 0x27, 0x1f, 0xc9, 0xf5, 0xdd, 0x02, 0x8e, 0xa6, 0xf2, 0x64, 0x08, 0x1f, 0x05,
-	0x66, 0x46, 0x89, 0xd5, 0x58, 0xe6, 0xc8, 0x31, 0x0b, 0x84, 0x47, 0x26, 0xd4, 0xdc, 0x54, 0xd6,
-	0xc4, 0x73, 0x54, 0x86, 0x75, 0xea, 0x9d, 0x9a, 0x1b, 0x0a, 0x2e, 0x87, 0x52, 0x62, 0x3b, 0x61,
-	0x20, 0x14, 0xb0, 0x1c, 0x4a, 0x9d, 0xd3, 0x80, 0x72, 0x33, 0x17, 0x9e, 0xb6, 0x1c, 0xa3, 0xd7,
-	0x20, 0x3b, 0xe2, 0x6c, 0xea, 0x87, 0x1e, 0x58, 0xc0, 0x7a, 0x26, 0xdf, 0x3b, 0x9f, 0x3b, 0xa7,
-	0x8e, 0x4b, 0x47, 0x34, 0x30, 0x5f, 0x53, 0x17, 0x51, 0x5d, 0x1a, 0x8b, 0x31, 0x0a, 0x27, 0x18,
-	0xa8, 0x01, 0x19, 0xc7, 0x73, 0x84, 0xf9, 0xba, 0x8e, 0xc3, 0xcb, 0x57, 0xd8, 0x62, 0xcc, 0x7d,
-	0x4c, 0xdc, 0x29, 0xc5, 0x0a, 0x87, 0xb6, 0x61, 0x5d, 0x88, 0x99, 0x59, 0xaa, 0xa5, 0x77, 0xf3,
-	0xad, 0xdc, 0xc5, 0xf9, 0xce, 0xfa, 0xd1, 0xd1, 0x13, 0x2c, 0x65, 0xe8, 0x16, 0x00, 0xf3, 0xa9,
-	0x37, 0x08, 0x84, 0xed, 0x78, 0x26, 0x92, 0x08, 0x5c, 0x90, 0x92, 0xbe, 0x14, 0xa0, 0x1b, 0x32,
-	0x73, 0x11, 0x7b, 0xc0, 0x3c, 0x77, 0x66, 0xbe, 0xa2, 0xbe, 0xe6, 0xa5, 0xe0, 0xa1, 0xe7, 0xce,
-	0xd0, 0x0e, 0x18, 0x81, 0x60, 0xfe, 0x20, 0x70, 0x46, 0x1e, 0x71, 0xcd, 0xeb, 0x6a, 0xe7, 0x20,
-	0x45, 0x7d, 0x25, 0x41, 0x3f, 0x86, 0xec, 0x84, 0x4d, 0x3d, 0x11, 0x98, 0x79, 0x75, 0x91, 0xdb,
-	0xcb, 0xf6, 0xf8, 0x40, 0x22, 0x74, 0xd4, 0x69, 0x38, 0xea, 0xc0, 0x96, 0xd2, 0x3c, 0xe2, 0xc4,
-	0xa2, 0x03, 0x9f, 0x72, 0x87, 0xd9, 0xfa, 0x7d, 0xde, 0x7e, 0x66, 0xb7, 0x6d, 0xdd, 0x0a, 0xe0,
-	0x6b, 0x92, 0x73, 0x47, 0x52, 0x7a, 0x8a, 0x81, 0x7a, 0x50, 0xf4, 0xa7, 0xae, 0x3b, 0x60, 0x7e,
-	0xf8, 0x1a, 0x85, 0x09, 0xfc, 0x05, 0xdc, 0xa9, 0x37, 0x75, 0xdd, 0x87, 0x21, 0x09, 0x1b, 0xfe,
-	0x7c, 0x82, 0x3e, 0x83, 0x5c, 0x40, 0x2d, 0x4e, 0x45, 0x60, 0x16, 0xd5, 0x96, 0xde, 0x5c, 0xa6,
-	0xac, 0xaf, 0x20, 0x71, 0x5e, 0xc0, 0x11, 0x47, 0xd2, 0x2d, 0x95, 0xd6, 0x02, 0xf3, 0xd5, 0xd5,
-	0x74, 0x9d, 0xf9, 0xe6, 0x74, 0xcd, 0x91, 0xe1, 0x22, 0x7d, 0x32, 0x30, 0xb7, 0x94, 0x3b, 0x85,
-	0x13, 0xf4, 0x04, 0xc0, 0xf6, 0x82, 0x41, 0x08, 0x32, 0xaf, 0xa9, 0x3d, 0xbe, 0x7f, 0xf5, 0x1e,
-	0xdb, 0x87, 0x7d, 0x5d, 0x87, 0x94, 0x2e, 0xce, 0x77, 0x0a, 0xf1, 0x14, 0x17, 0x6c, 0x2f, 0x08,
-	0x87, 0xa8, 0x05, 0xc6, 0x98, 0x12, 0x57, 0x8c, 0xad, 0x31, 0xb5, 0x4e, 0xcc, 0xf2, 0xea, 0xb2,
-	0xe4, 0xae, 0x82, 0x69, 0x0d, 0x49, 0x12, 0xea, 0x42, 0xc1, 0x09, 0x98, 0xab, 0xae, 0xc8, 0x34,
-	0x55, 0x7e, 0x7b, 0x01, 0xeb, 0xba, 0x11, 0x05, 0xcf, 0xd9, 0xe8, 0x26, 0x14, 0x7c, 0xc7, 0x0e,
-	0xee, 0x3b, 0x13, 0x47, 0x98, 0xdb, 0xb5, 0xf4, 0xee, 0x3a, 0x9e, 0x0b, 0xd0, 0x5d, 0xc8, 0x05,
-	0xb3, 0xc0, 0x12, 0x6e, 0x60, 0x56, 0xd4, 0xe1, 0x36, 0xae, 0x5e, 0xa6, 0x1f, 0x12, 0xc2, 0xc4,
-	0x11, 0xd1, 0x65, 0xc5, 0x63, 0x11, 0x5f, 0xf7, 0x02, 0x03, 0x62, 0xdb, 0xe6, 0x0d, 0x75, 0xe0,
-	0xa5, 0xb9, 0xb4, 0x69, 0xdb, 0xe8, 0x1d, 0xb8, 0x96, 0x80, 0xd9, 0x9c, 0xf9, 0xe6, 0x4d, 0x85,
-	0x4b, 0xb0, 0xdb, 0x9c, 0xf9, 0xb2, 0x86, 0x98, 0xba, 0xd2, 0xc6, 0xc0, 0xbc, 0xa5, 0x2c, 0xdb,
-	0xbd, 0xda, 0xb2, 0x47, 0x8a, 0x80, 0x23, 0x62, 0xe5, 0x63, 0x30, 0x12, 0x49, 0x4e, 0x26, 0xa0,
-	0x13, 0x3a, 0xd3, 0x79, 0x53, 0x0e, 0xa5, 0x73, 0x9c, 0xca, 0x98, 0x57, 0x89, 0xbd, 0x80, 0xc3,
-	0xc9, 0x27, 0x6b, 0x1f, 0xa5, 0x2b, 0xfb, 0x60, 0x24, 0x1c, 0x1a, 0xbd, 0x29, 0x1f, 0x9d, 0x91,
-	0x13, 0x08, 0x3e, 0x1b, 0x90, 0xa9, 0x18, 0x9b, 0x3f, 0x57, 0x84, 0x62, 0x24, 0x6c, 0x4e, 0xc5,
-	0xb8, 0x32, 0x80, 0xb9, 0x47, 0xa0, 0x1a, 0x18, 0x32, 0x0f, 0x06, 0x94, 0x9f, 0x52, 0x2e, 0x4b,
-	0x48, 0xb9, 0xc9, 0xa4, 0x48, 0x66, 0xba, 0x80, 0x12, 0x6e, 0x8d, 0x55, 0xca, 0x2e, 0x60, 0x3d,
-	0x93, 0x39, 0x38, 0x0a, 0x3e, 0x9d, 0x83, 0xf5, 0xb4, 0xf2, 0x09, 0x14, 0x93, 0x87, 0xff, 0x1f,
-	0x6d, 0xa8, 0x0d, 0xd9, 0xf0, 0x78, 0x64, 0xd6, 0x55, 0x19, 0x5b, 0xbf, 0x71, 0x2a, 0x5b, 0x23,
-	0xc8, 0x04, 0xec, 0x58, 0x28, 0xda, 0x3a, 0x56, 0x63, 0x29, 0x1b, 0x13, 0x1e, 0x76, 0x4b, 0xeb,
-	0x58, 0x8d, 0xeb, 0x7f, 0x49, 0x43, 0x21, 0x76, 0x33, 0xf4, 0x01, 0x6c, 0x75, 0xfb, 0x0f, 0xef,
-	0x37, 0x8f, 0xba, 0x0f, 0x0f, 0x07, 0xed, 0xce, 0xe7, 0xcd, 0x47, 0xf7, 0x8f, 0xca, 0xa9, 0xca,
-	0xad, 0xa7, 0x67, 0xb5, 0xed, 0xf9, 0x8b, 0x16, 0xc1, 0xdb, 0xf4, 0x98, 0x4c, 0x5d, 0xb1, 0xc8,
-	0xea, 0xe1, 0x87, 0x07, 0x9d, 0x7e, 0xbf, 0x9c, 0x5e, 0xc5, 0xea, 0x71, 0x66, 0xd1, 0x20, 0x40,
-	0xfb, 0x50, 0x9e, 0xb3, 0xee, 0x3e, 0xe9, 0x75, 0xf0, 0xe3, 0xf2, 0x5a, 0xe5, 0xe6, 0xd3, 0xb3,
-	0x9a, 0xf9, 0x2c, 0xe9, 0xee, 0xcc, 0xa7, 0xfc, 0xb1, 0x6e, 0x46, 0xff, 0x91, 0x86, 0x62, 0xb2,
-	0x03, 0x40, 0x07, 0x61, 0xdd, 0xaf, 0x0e, 0x60, 0x73, 0x7f, 0xef, 0xaa, 0x8e, 0x41, 0x55, 0x11,
-	0xee, 0x54, 0xea, 0x7d, 0xc0, 0x6c, 0x8a, 0x15, 0x19, 0x7d, 0x00, 0x1b, 0x3e, 0xe3, 0x22, 0x7a,
-	0x6f, 0x97, 0x3f, 0x45, 0x8c, 0x47, 0x25, 0x59, 0x08, 0xae, 0x8f, 0x61, 0x73, 0x51, 0x1b, 0xba,
-	0x0d, 0xeb, 0x8f, 0xbb, 0xbd, 0x72, 0xaa, 0x72, 0xe3, 0xe9, 0x59, 0xed, 0xf5, 0xc5, 0x8f, 0x8f,
-	0x1d, 0x2e, 0xa6, 0xc4, 0xed, 0xf6, 0xd0, 0x7b, 0xb0, 0xd1, 0x3e, 0xec, 0x63, 0x5c, 0x4e, 0x57,
-	0x76, 0x9e, 0x9e, 0xd5, 0x6e, 0x2c, 0xe2, 0xe4, 0x27, 0x36, 0xf5, 0x6c, 0xcc, 0x86, 0x71, 0x0b,
-	0xfe, 0xcf, 0x35, 0x30, 0x74, 0x19, 0xf2, 0xb2, 0xff, 0xd2, 0x94, 0xc2, 0x92, 0x36, 0xca, 0x9e,
-	0x6b, 0x57, 0x56, 0xb6, 0xc5, 0x90, 0xa0, 0x23, 0xe3, 0x0d, 0x28, 0x3a, 0xfe, 0xe9, 0x87, 0x03,
-	0xea, 0x91, 0xa1, 0xab, 0xbb, 0xf1, 0x3c, 0x36, 0xa4, 0xac, 0x13, 0x8a, 0x64, 0x61, 0xe1, 0x78,
-	0x82, 0x72, 0x4f, 0xf7, 0xd9, 0x79, 0x1c, 0xcf, 0xd1, 0x67, 0x90, 0x71, 0x7c, 0x32, 0xd1, 0xe5,
-	0xf8, 0xd2, 0x1d, 0x74, 0x7b, 0xcd, 0x07, 0x3a, 0x72, 0x5b, 0xf9, 0x8b, 0xf3, 0x9d, 0x8c, 0x14,
-	0x60, 0x45, 0x43, 0xd5, 0xa8, 0x57, 0x92, 0x2b, 0xa9, 0x62, 0x24, 0x8f, 0x13, 0x12, 0x19, 0x7d,
-	0x8e, 0x37, 0xe2, 0x34, 0x08, 0x54, 0x59, 0x92, 0xc7, 0xd1, 0x14, 0x55, 0x20, 0xa7, 0xeb, 0x6a,
-	0xd5, 0x62, 0x15, 0x64, 0xb7, 0xa2, 0x05, 0xad, 0x12, 0x18, 0xe1, 0x69, 0x0c, 0x8e, 0x39, 0x9b,
-	0xd4, 0xff, 0x95, 0x01, 0xe3, 0xc0, 0x9d, 0x06, 0x42, 0xd7, 0x6c, 0x2f, 0xed, 0xf0, 0x9f, 0xc0,
-	0x16, 0x51, 0x7f, 0x7d, 0x88, 0x27, 0x1f, 0x79, 0xd5, 0xae, 0xe8, 0x0b, 0xb8, 0xbd, 0x54, 0x5d,
-	0x0c, 0x0e, 0x5b, 0x9b, 0x56, 0x56, 0xea, 0x34, 0xd3, 0xb8, 0x4c, 0x2e, 0x7d, 0x41, 0x7d, 0x28,
-	0x31, 0x6e, 0x8d, 0x69, 0x20, 0xc2, 0xd2, 0x40, 0xff, 0x25, 0x59, 0xfa, 0xff, 0xec, 0x61, 0x12,
-	0xa8, 0x5f, 0xc4, 0xd0, 0xda, 0x45, 0x1d, 0xe8, 0x23, 0xc8, 0x70, 0x72, 0x1c, 0xb5, 0x5e, 0x4b,
-	0x83, 0x04, 0x93, 0x63, 0xb1, 0xa0, 0x42, 0x31, 0xd0, 0x17, 0x00, 0xb6, 0x13, 0xf8, 0x44, 0x58,
-	0x63, 0xca, 0xf5, 0x65, 0x2f, 0xdd, 0x62, 0x3b, 0x46, 0x2d, 0x68, 0x49, 0xb0, 0xd1, 0x3d, 0x28,
-	0x58, 0x24, 0x72, 0xd7, 0xec, 0xea, 0x5f, 0x47, 0x07, 0x4d, 0xad, 0xa2, 0x2c, 0x55, 0x5c, 0x9c,
-	0xef, 0xe4, 0x23, 0x09, 0xce, 0x5b, 0x44, 0xbb, 0xef, 0x3d, 0x28, 0x09, 0x12, 0x9c, 0x0c, 0xec,
-	0x30, 0x9d, 0x85, 0x6e, 0xb2, 0xe2, 0x85, 0x97, 0x1d, 0xba, 0x4e, 0x7b, 0xd1, 0x75, 0x16, 0x45,
-	0x42, 0x86, 0x7e, 0x01, 0x5b, 0xd4, 0xb3, 0xf8, 0x4c, 0x39, 0x6b, 0x64, 0x61, 0x7e, 0xf5, 0x66,
-	0x3b, 0x31, 0x78, 0x61, 0xb3, 0x65, 0x7a, 0x49, 0x5e, 0xff, 0x5b, 0x1a, 0x20, 0x2c, 0xa9, 0x5e,
-	0xae, 0x03, 0x22, 0xc8, 0xd8, 0x44, 0x10, 0xe5, 0x73, 0x45, 0xac, 0xc6, 0xe8, 0x13, 0x00, 0x41,
-	0x27, 0xbe, 0x4c, 0xbd, 0xde, 0x48, 0xbb, 0xcd, 0xf3, 0xd2, 0x41, 0x02, 0x8d, 0xf6, 0x21, 0xab,
-	0x1b, 0xe4, 0xcc, 0x95, 0x3c, 0x8d, 0xac, 0xff, 0x31, 0x0d, 0x10, 0x6e, 0xf3, 0x7f, 0x7a, 0x6f,
-	0xad, 0xdb, 0xdf, 0x7c, 0x5f, 0x4d, 0x7d, 0xf7, 0x7d, 0x35, 0xf5, 0xdb, 0x8b, 0x6a, 0xfa, 0x9b,
-	0x8b, 0x6a, 0xfa, 0xdb, 0x8b, 0x6a, 0xfa, 0xef, 0x17, 0xd5, 0xf4, 0xef, 0x7f, 0xa8, 0xa6, 0xbe,
-	0xfd, 0xa1, 0x9a, 0xfa, 0xee, 0x87, 0x6a, 0x6a, 0x98, 0x55, 0x45, 0xf9, 0x8f, 0xfe, 0x1d, 0x00,
-	0x00, 0xff, 0xff, 0x74, 0x9e, 0x83, 0x44, 0x31, 0x18, 0x00, 0x00,
+	// 2537 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x41, 0x73, 0x1b, 0xb7,
+	0x15, 0x26, 0x25, 0x8a, 0x22, 0xdf, 0x92, 0x32, 0x8d, 0x38, 0xc9, 0x8a, 0xb6, 0x29, 0x86, 0x71,
+	0x1c, 0x25, 0x99, 0x52, 0x13, 0x35, 0x93, 0x26, 0x4e, 0xd3, 0x96, 0x14, 0x19, 0x8b, 0xb1, 0x2d,
+	0x73, 0x40, 0x59, 0xad, 0x67, 0x3a, 0xc3, 0x81, 0x76, 0x21, 0x72, 0xab, 0xe5, 0x62, 0x8b, 0x05,
+	0x95, 0xf0, 0xd6, 0x63, 0xc6, 0xbd, 0xf4, 0xd0, 0xab, 0x4e, 0x9d, 0x9e, 0x7a, 0x69, 0xff, 0x41,
+	0x7b, 0xcb, 0x31, 0xc7, 0xf4, 0xa2, 0x69, 0x94, 0x9f, 0xd0, 0x5b, 0x2f, 0xed, 0x00, 0x8b, 0x5d,
+	0x2e, 0x25, 0xd2, 0x72, 0xa7, 0x3e, 0xf4, 0x06, 0x3c, 0x7e, 0xdf, 0x5b, 0xe0, 0xe1, 0xbd, 0x87,
+	0xf7, 0x40, 0x78, 0x77, 0xe0, 0x88, 0xe1, 0xf8, 0xb0, 0x6e, 0xb1, 0xd1, 0x96, 0xcd, 0xac, 0x63,
+	0xca, 0xb7, 0x82, 0x2f, 0x08, 0x1f, 0x1d, 0x3b, 0x62, 0x8b, 0xf8, 0xce, 0x56, 0xe0, 0x53, 0x2b,
+	0xa8, 0xfb, 0x9c, 0x09, 0x86, 0x50, 0x08, 0xa8, 0x47, 0x80, 0xfa, 0xc9, 0xfb, 0xe5, 0xab, 0xf8,
+	0x62, 0xe2, 0x53, 0xcd, 0x2f, 0xdf, 0x18, 0xb0, 0x01, 0x53, 0xc3, 0x2d, 0x39, 0xd2, 0xd2, 0xca,
+	0x80, 0xb1, 0x81, 0x4b, 0xb7, 0xd4, 0xec, 0x70, 0x7c, 0xb4, 0x65, 0x8f, 0x39, 0x11, 0x0e, 0xf3,
+	0xf4, 0xef, 0xeb, 0x17, 0x7f, 0x27, 0xde, 0x64, 0x11, 0xf5, 0x0b, 0x4e, 0x7c, 0x9f, 0x72, 0xfd,
+	0xc1, 0xda, 0x69, 0x06, 0x72, 0x7b, 0xcc, 0xa6, 0x3d, 0x9f, 0x5a, 0xe8, 0x3e, 0x18, 0xc4, 0xf3,
+	0x98, 0x50, 0xba, 0x03, 0x33, 0x5d, 0x4d, 0x6f, 0x1a, 0xdb, 0x1b, 0xf5, 0xcb, 0x7b, 0xaa, 0x37,
+	0xa6, 0xb0, 0x66, 0xe6, 0xeb, 0xb3, 0x8d, 0x14, 0x4e, 0x32, 0xd1, 0x4f, 0xa1, 0x60, 0xd3, 0xc0,
+	0xe1, 0xd4, 0xee, 0x73, 0xe6, 0x52, 0x73, 0xa9, 0x9a, 0xde, 0x5c, 0xdb, 0xbe, 0x35, 0x4f, 0x93,
+	0xfc, 0x38, 0x66, 0x2e, 0xc5, 0x86, 0x66, 0xc8, 0x09, 0xba, 0x0f, 0x30, 0xa2, 0xa3, 0x43, 0xca,
+	0x83, 0xa1, 0xe3, 0x9b, 0xcb, 0x8a, 0xfe, 0xf6, 0x22, 0xba, 0x5c, 0x7b, 0xfd, 0x51, 0x0c, 0xc7,
+	0x09, 0x2a, 0x7a, 0x04, 0x05, 0x72, 0x42, 0x1c, 0x97, 0x1c, 0x3a, 0xae, 0x23, 0x26, 0x66, 0x46,
+	0xa9, 0x7a, 0xe7, 0xb9, 0xaa, 0x1a, 0x09, 0x02, 0x9e, 0xa1, 0xd7, 0x6c, 0x80, 0xe9, 0x87, 0xd0,
+	0x5d, 0x58, 0xed, 0xb6, 0xf7, 0x5a, 0x9d, 0xbd, 0xfb, 0xa5, 0x54, 0x79, 0xfd, 0xd9, 0x69, 0xf5,
+	0x55, 0xa9, 0x63, 0x0a, 0xe8, 0x52, 0xcf, 0x76, 0xbc, 0x01, 0xda, 0x84, 0x5c, 0x63, 0x67, 0xa7,
+	0xdd, 0xdd, 0x6f, 0xb7, 0x4a, 0xe9, 0x72, 0xf9, 0xd9, 0x69, 0xf5, 0xb5, 0x59, 0x60, 0xc3, 0xb2,
+	0xa8, 0x2f, 0xa8, 0x5d, 0xce, 0x7c, 0xf5, 0x87, 0x4a, 0xaa, 0xf6, 0x55, 0x1a, 0x0a, 0xc9, 0x45,
+	0xa0, 0xbb, 0x90, 0x6d, 0xec, 0xec, 0x77, 0x0e, 0xda, 0xa5, 0xd4, 0x94, 0x9e, 0x44, 0x34, 0x2c,
+	0xe1, 0x9c, 0x50, 0x74, 0x07, 0x56, 0xba, 0x8d, 0x27, 0xbd, 0x76, 0x29, 0x3d, 0x5d, 0x4e, 0x12,
+	0xd6, 0x25, 0xe3, 0x40, 0xa1, 0x5a, 0xb8, 0xd1, 0xd9, 0x2b, 0x2d, 0xcd, 0x47, 0xb5, 0x38, 0x71,
+	0x3c, 0xbd, 0x94, 0x3f, 0xad, 0x80, 0xd1, 0xa3, 0xfc, 0xc4, 0xb1, 0x5e, 0xb2, 0x8b, 0x7c, 0x08,
+	0x19, 0x41, 0x82, 0x63, 0xe5, 0x1a, 0xc6, 0x7c, 0xd7, 0xd8, 0x27, 0xc1, 0xb1, 0xfc, 0xa8, 0xa6,
+	0x2b, 0xbc, 0xf4, 0x0c, 0x4e, 0x7d, 0xd7, 0xb1, 0x88, 0xa0, 0xb6, 0xf2, 0x0c, 0x63, 0xfb, 0xad,
+	0x79, 0x6c, 0x1c, 0xa3, 0xf4, 0xfa, 0x77, 0x53, 0x38, 0x41, 0x45, 0x9f, 0x40, 0x76, 0xe0, 0xb2,
+	0x43, 0xe2, 0x2a, 0x9f, 0x30, 0xb6, 0xdf, 0x98, 0xa7, 0xe4, 0xbe, 0x42, 0x4c, 0x15, 0x68, 0x0a,
+	0xfa, 0x1c, 0xd6, 0xa6, 0xaa, 0xfa, 0xbf, 0x62, 0x87, 0x26, 0x2c, 0x56, 0x32, 0x5d, 0xc9, 0xe7,
+	0xec, 0x70, 0x37, 0x85, 0x8b, 0x3c, 0x29, 0x40, 0x3f, 0x01, 0x08, 0xb5, 0x2a, 0x3d, 0x86, 0xd2,
+	0x73, 0x7b, 0xf1, 0x62, 0x42, 0x1d, 0xf9, 0x41, 0x34, 0x41, 0x1f, 0x41, 0x76, 0xec, 0xdb, 0x44,
+	0x50, 0x33, 0xab, 0xb8, 0xd5, 0x79, 0xdc, 0x27, 0x0a, 0xb1, 0xc3, 0xbc, 0x23, 0x67, 0x80, 0x35,
+	0x1e, 0xfd, 0x18, 0x72, 0x9c, 0xb9, 0xee, 0x21, 0xb1, 0x8e, 0xcd, 0xfc, 0x0b, 0x72, 0x63, 0x06,
+	0x7a, 0x00, 0x39, 0x8f, 0x8a, 0x2f, 0x18, 0x3f, 0x0e, 0xcc, 0xd5, 0xea, 0xf2, 0xa6, 0xb1, 0xfd,
+	0xde, 0xdc, 0xb0, 0x0a, 0x31, 0x0d, 0x21, 0x88, 0x35, 0x1c, 0x51, 0x4f, 0x84, 0x8a, 0x9a, 0x4b,
+	0x66, 0x1a, 0xc7, 0x0a, 0xe4, 0x52, 0xa8, 0x67, 0xfb, 0xcc, 0xf1, 0x84, 0x99, 0x5b, 0xbc, 0x94,
+	0xb6, 0xc6, 0x48, 0xb7, 0xc0, 0x31, 0xa3, 0x99, 0x85, 0xcc, 0x88, 0xd9, 0xb4, 0xb6, 0x05, 0xd7,
+	0x2f, 0x1d, 0x3b, 0x2a, 0x43, 0x4e, 0x1b, 0x3c, 0xf4, 0xd7, 0x0c, 0x8e, 0xe7, 0xb5, 0x6b, 0x50,
+	0x9c, 0x39, 0xe2, 0x9a, 0x05, 0xc5, 0x99, 0xe3, 0x42, 0x6f, 0xc1, 0xda, 0x88, 0x7c, 0xd9, 0xb7,
+	0x98, 0x67, 0x8d, 0x39, 0xa7, 0x9e, 0xd0, 0x3a, 0x8a, 0x23, 0xf2, 0xe5, 0x4e, 0x2c, 0x44, 0xef,
+	0xc1, 0x75, 0xc1, 0x04, 0x71, 0xfb, 0x16, 0x1b, 0xf9, 0x2e, 0x0d, 0xa3, 0x63, 0x49, 0x21, 0x4b,
+	0xea, 0x87, 0x9d, 0xa9, 0xbc, 0x66, 0x40, 0x3e, 0x3e, 0xcb, 0xda, 0x9f, 0x57, 0x20, 0x17, 0x79,
+	0x3a, 0x7a, 0x00, 0x40, 0x62, 0x43, 0x69, 0x43, 0xbc, 0xf3, 0x42, 0x56, 0x95, 0x74, 0xe9, 0xe1,
+	0x53, 0x3a, 0x6a, 0x40, 0xde, 0x62, 0x9e, 0x20, 0x8e, 0x47, 0xb9, 0x8e, 0xd4, 0xb9, 0xfe, 0xb9,
+	0x13, 0x81, 0xb4, 0x8e, 0x29, 0x0b, 0x35, 0x61, 0x75, 0x40, 0x3d, 0xca, 0x1d, 0x4b, 0x3b, 0xf8,
+	0xdd, 0xb9, 0x8e, 0x19, 0x42, 0xf0, 0xd8, 0x13, 0xce, 0x88, 0x6a, 0x2d, 0x11, 0x11, 0x7d, 0x06,
+	0x79, 0x4e, 0x03, 0x36, 0xe6, 0x16, 0x0d, 0x74, 0xb8, 0x6f, 0xce, 0x0f, 0x93, 0x10, 0x84, 0xe9,
+	0xaf, 0xc7, 0x0e, 0xa7, 0x72, 0x0b, 0x01, 0x9e, 0x52, 0xd1, 0x27, 0xb0, 0xca, 0x69, 0x20, 0x08,
+	0x17, 0xcf, 0x8b, 0x58, 0x1c, 0x42, 0xba, 0xcc, 0x75, 0xac, 0x09, 0x8e, 0x18, 0xe8, 0x13, 0xc8,
+	0xfb, 0x2e, 0xb1, 0x94, 0x56, 0x73, 0x65, 0x71, 0x8c, 0x75, 0x23, 0x10, 0x9e, 0xe2, 0xd1, 0xc7,
+	0x00, 0x2e, 0x1b, 0xf4, 0x6d, 0xee, 0x9c, 0x50, 0xae, 0xa3, 0xac, 0x3c, 0x8f, 0xdd, 0x52, 0x08,
+	0x9c, 0x77, 0xd9, 0x20, 0x1c, 0xa2, 0xfb, 0xff, 0x53, 0x90, 0x24, 0x02, 0xe4, 0x0d, 0x28, 0x1c,
+	0x31, 0x6e, 0xd1, 0xbe, 0x8e, 0xf5, 0xbc, 0xf2, 0x2d, 0x43, 0xc9, 0xc2, 0x00, 0x45, 0xbf, 0x84,
+	0x57, 0x22, 0x6b, 0xf5, 0x39, 0x3d, 0xa2, 0x9c, 0x7a, 0xd2, 0xe4, 0x86, 0xfa, 0xec, 0x5b, 0xcf,
+	0x37, 0xb9, 0x46, 0xeb, 0x54, 0x8b, 0xf8, 0xc5, 0x1f, 0x82, 0x66, 0x1e, 0x56, 0x79, 0x78, 0xc0,
+	0xb5, 0xdf, 0xa6, 0x65, 0x9c, 0x5d, 0x40, 0xa0, 0x2d, 0x30, 0xe2, 0xcf, 0x3b, 0xb6, 0x72, 0xb8,
+	0x7c, 0x73, 0xed, 0xfc, 0x6c, 0x03, 0x22, 0x6c, 0xa7, 0x25, 0x33, 0xb0, 0x1e, 0xdb, 0xa8, 0x0d,
+	0xc5, 0x98, 0x20, 0x8b, 0x20, 0x5d, 0x26, 0x54, 0x9f, 0xb7, 0xd2, 0xfd, 0x89, 0x4f, 0x71, 0x81,
+	0x27, 0x66, 0xb5, 0x5f, 0x00, 0xba, 0xec, 0x80, 0x08, 0x41, 0xe6, 0xd8, 0xf1, 0xf4, 0x32, 0xb0,
+	0x1a, 0xa3, 0x3a, 0xac, 0xfa, 0x64, 0xe2, 0x32, 0x62, 0x6b, 0x3f, 0xbc, 0x51, 0x0f, 0xcb, 0xa3,
+	0x7a, 0x54, 0x1e, 0xd5, 0x1b, 0xde, 0x04, 0x47, 0xa0, 0xda, 0x03, 0x78, 0x75, 0x6e, 0x9c, 0xa1,
+	0x6d, 0x28, 0xc4, 0x31, 0x32, 0xdd, 0xeb, 0xb5, 0xf3, 0xb3, 0x0d, 0x23, 0x0e, 0xa6, 0x4e, 0x0b,
+	0x1b, 0x31, 0xa8, 0x63, 0xd7, 0xfe, 0xba, 0x06, 0xc5, 0x99, 0x48, 0x43, 0x37, 0x60, 0xc5, 0x19,
+	0x91, 0x01, 0xd5, 0x6b, 0x0c, 0x27, 0xa8, 0x0d, 0x59, 0x97, 0x1c, 0x52, 0x57, 0xc6, 0x8a, 0x3c,
+	0xb8, 0x1f, 0x5c, 0x19, 0xb2, 0xf5, 0x87, 0x0a, 0xdf, 0xf6, 0x04, 0x9f, 0x60, 0x4d, 0x46, 0x26,
+	0xac, 0x5a, 0x6c, 0x34, 0x22, 0x9e, 0xbc, 0x24, 0x97, 0x37, 0xf3, 0x38, 0x9a, 0x4a, 0xcb, 0x10,
+	0x3e, 0x08, 0xcc, 0x8c, 0x12, 0xab, 0xb1, 0xcc, 0x91, 0x43, 0x16, 0x08, 0x8f, 0x8c, 0xa8, 0xb9,
+	0xa6, 0x56, 0x13, 0xcf, 0x51, 0x09, 0x96, 0xa9, 0x77, 0x62, 0xae, 0x28, 0xb8, 0x1c, 0x4a, 0x89,
+	0xed, 0x84, 0x81, 0x90, 0xc7, 0x72, 0x28, 0x75, 0x8e, 0x03, 0xca, 0xcd, 0xd5, 0xd0, 0xda, 0x72,
+	0x8c, 0x5e, 0x83, 0xec, 0x80, 0xb3, 0xb1, 0x1f, 0x7a, 0x60, 0x1e, 0xeb, 0x99, 0xbc, 0xef, 0x7c,
+	0xee, 0x9c, 0x38, 0x2e, 0x1d, 0xd0, 0xc0, 0x7c, 0x4d, 0x1d, 0x44, 0x65, 0x6e, 0x2c, 0xc6, 0x28,
+	0x9c, 0x60, 0xa0, 0x3a, 0x64, 0x1c, 0xcf, 0x11, 0xe6, 0xeb, 0x3a, 0x0e, 0x2f, 0x1e, 0x61, 0x93,
+	0x31, 0xf7, 0x80, 0xb8, 0x63, 0x8a, 0x15, 0x0e, 0xad, 0xc3, 0xb2, 0x10, 0x13, 0xb3, 0x58, 0x4d,
+	0x6f, 0xe6, 0x9a, 0xab, 0xe7, 0x67, 0x1b, 0xcb, 0xfb, 0xfb, 0x4f, 0xb1, 0x94, 0xa1, 0xdb, 0x00,
+	0xcc, 0xa7, 0x5e, 0x3f, 0x10, 0xb6, 0xe3, 0x99, 0x48, 0x22, 0x70, 0x5e, 0x4a, 0x7a, 0x52, 0x80,
+	0x6e, 0xca, 0xcc, 0x45, 0xec, 0x3e, 0xf3, 0xdc, 0x89, 0xf9, 0x8a, 0xfa, 0x35, 0x27, 0x05, 0x8f,
+	0x3d, 0x77, 0x82, 0x36, 0xc0, 0x08, 0x04, 0xf3, 0xfb, 0x81, 0x33, 0xf0, 0x88, 0x6b, 0xde, 0x50,
+	0x3b, 0x07, 0x29, 0xea, 0x29, 0x09, 0xfa, 0x11, 0x64, 0x47, 0x6c, 0xec, 0x89, 0xc0, 0xcc, 0xa9,
+	0x83, 0x5c, 0x9f, 0xb7, 0xc7, 0x47, 0x12, 0xa1, 0xa3, 0x4e, 0xc3, 0x51, 0x1b, 0xae, 0x2b, 0xcd,
+	0x03, 0x4e, 0x2c, 0xda, 0xf7, 0x29, 0x77, 0x98, 0xad, 0xef, 0xe7, 0xf5, 0x4b, 0xbb, 0x6d, 0xe9,
+	0x56, 0x00, 0x5f, 0x93, 0x9c, 0xfb, 0x92, 0xd2, 0x55, 0x0c, 0xd4, 0x85, 0x82, 0x3f, 0x76, 0xdd,
+	0x3e, 0xf3, 0xc3, 0xdb, 0x28, 0x4c, 0xe0, 0x2f, 0xe0, 0x4e, 0xdd, 0xb1, 0xeb, 0x3e, 0x0e, 0x49,
+	0xd8, 0xf0, 0xa7, 0x13, 0xf4, 0x29, 0xac, 0x06, 0xd4, 0xe2, 0x54, 0x04, 0x66, 0x41, 0x6d, 0xe9,
+	0xcd, 0x79, 0xca, 0x7a, 0x0a, 0x12, 0xe7, 0x05, 0x1c, 0x71, 0x24, 0xdd, 0x52, 0x69, 0x2d, 0x30,
+	0x5f, 0x5d, 0x4c, 0xd7, 0x99, 0x6f, 0x4a, 0xd7, 0x1c, 0x19, 0x2e, 0xd2, 0x27, 0x03, 0xf3, 0xba,
+	0x72, 0xa7, 0x70, 0x82, 0x9e, 0x02, 0xd8, 0x5e, 0xd0, 0x0f, 0x41, 0xe6, 0x35, 0xb5, 0xc7, 0xf7,
+	0xae, 0xde, 0x63, 0x6b, 0xaf, 0xa7, 0xeb, 0x90, 0xe2, 0xf9, 0xd9, 0x46, 0x3e, 0x9e, 0xe2, 0xbc,
+	0xed, 0x05, 0xe1, 0x10, 0x35, 0xc1, 0x18, 0x52, 0xe2, 0x8a, 0xa1, 0x35, 0xa4, 0xd6, 0xb1, 0x59,
+	0x5a, 0x5c, 0x96, 0xec, 0x2a, 0x98, 0xd6, 0x90, 0x24, 0xa1, 0x0e, 0xe4, 0x9d, 0x80, 0xb9, 0xea,
+	0x88, 0x4c, 0x53, 0xe5, 0xb7, 0x17, 0x58, 0x5d, 0x27, 0xa2, 0xe0, 0x29, 0x1b, 0xdd, 0x82, 0xbc,
+	0xef, 0xd8, 0xc1, 0x43, 0x67, 0xe4, 0x08, 0x73, 0xbd, 0x9a, 0xde, 0x5c, 0xc6, 0x53, 0x01, 0xda,
+	0x85, 0xd5, 0x60, 0x12, 0x58, 0xc2, 0x0d, 0xcc, 0xb2, 0x32, 0x6e, 0xfd, 0xea, 0xcf, 0xf4, 0x42,
+	0x42, 0x98, 0x38, 0x22, 0xba, 0xac, 0x78, 0x2c, 0xe2, 0xeb, 0x5e, 0xa0, 0x4f, 0x6c, 0xdb, 0xbc,
+	0xa9, 0x0c, 0x5e, 0x9c, 0x4a, 0x1b, 0xb6, 0x8d, 0xde, 0x86, 0x6b, 0x09, 0x98, 0xcd, 0x99, 0x6f,
+	0xde, 0x52, 0xb8, 0x04, 0xbb, 0xc5, 0x99, 0x2f, 0x6b, 0x88, 0xb1, 0x2b, 0xd7, 0x18, 0x98, 0xb7,
+	0xd5, 0xca, 0x36, 0xaf, 0x5e, 0xd9, 0x13, 0x45, 0xc0, 0x11, 0xb1, 0xfc, 0x31, 0x18, 0x89, 0x24,
+	0x27, 0x13, 0xd0, 0x31, 0x9d, 0xe8, 0xbc, 0x29, 0x87, 0xd2, 0x39, 0x4e, 0x64, 0xcc, 0xab, 0xc4,
+	0x9e, 0xc7, 0xe1, 0xe4, 0xde, 0xd2, 0x47, 0xe9, 0xf2, 0x36, 0x18, 0x09, 0x87, 0x46, 0x6f, 0xca,
+	0x4b, 0x67, 0xe0, 0x04, 0x82, 0x4f, 0xfa, 0x64, 0x2c, 0x86, 0xe6, 0xcf, 0x14, 0xa1, 0x10, 0x09,
+	0x1b, 0x63, 0x31, 0x2c, 0xf7, 0x61, 0xea, 0x11, 0xa8, 0x0a, 0x86, 0xcc, 0x83, 0x01, 0xe5, 0x27,
+	0x94, 0xcb, 0x12, 0x52, 0x6e, 0x32, 0x29, 0x92, 0x99, 0x2e, 0xa0, 0x84, 0x5b, 0x43, 0x95, 0xb2,
+	0xf3, 0x58, 0xcf, 0x64, 0x0e, 0x8e, 0x82, 0x4f, 0xe7, 0x60, 0x3d, 0x2d, 0xdf, 0x83, 0x42, 0xd2,
+	0xf8, 0xff, 0xd5, 0x86, 0x5a, 0x90, 0x0d, 0xcd, 0x23, 0xb3, 0xae, 0xca, 0xd8, 0xfa, 0x8e, 0x53,
+	0xd9, 0x1a, 0x41, 0x26, 0x60, 0x47, 0x42, 0xd1, 0x96, 0xb1, 0x1a, 0x4b, 0xd9, 0x90, 0xf0, 0xb0,
+	0x5b, 0x5a, 0xc6, 0x6a, 0x5c, 0xfb, 0x4b, 0x1a, 0xf2, 0xb1, 0x9b, 0xa1, 0x0f, 0xe0, 0x7a, 0xa7,
+	0xf7, 0xf8, 0x61, 0x63, 0xbf, 0xf3, 0x78, 0xaf, 0xdf, 0x6a, 0x7f, 0xd6, 0x78, 0xf2, 0x70, 0xbf,
+	0x94, 0x2a, 0xdf, 0x7e, 0x76, 0x5a, 0x5d, 0x9f, 0xde, 0x68, 0x11, 0xbc, 0x45, 0x8f, 0xc8, 0xd8,
+	0x15, 0xb3, 0xac, 0x2e, 0x7e, 0xbc, 0xd3, 0xee, 0xf5, 0x4a, 0xe9, 0x45, 0xac, 0x2e, 0x67, 0x16,
+	0x0d, 0x02, 0xb4, 0x0d, 0xa5, 0x29, 0x6b, 0xf7, 0x69, 0xb7, 0x8d, 0x0f, 0x4a, 0x4b, 0xe5, 0x5b,
+	0xcf, 0x4e, 0xab, 0xe6, 0x65, 0xd2, 0xee, 0xc4, 0xa7, 0xfc, 0x40, 0x37, 0xa3, 0xff, 0x4c, 0x43,
+	0x21, 0xd9, 0x01, 0xa0, 0x9d, 0xb0, 0xee, 0x57, 0x06, 0x58, 0xdb, 0xde, 0xba, 0xaa, 0x63, 0x50,
+	0x55, 0x84, 0x3b, 0x96, 0x7a, 0x1f, 0x31, 0x9b, 0x62, 0x45, 0x46, 0x1f, 0xc0, 0x8a, 0xcf, 0xb8,
+	0x88, 0xee, 0xdb, 0xf9, 0x57, 0x11, 0xe3, 0x51, 0x49, 0x16, 0x82, 0x6b, 0x43, 0x58, 0x9b, 0xd5,
+	0x86, 0xee, 0xc0, 0xf2, 0x41, 0xa7, 0x5b, 0x4a, 0x95, 0x6f, 0x3e, 0x3b, 0xad, 0xbe, 0x3e, 0xfb,
+	0xe3, 0x81, 0xc3, 0xc5, 0x98, 0xb8, 0x9d, 0x2e, 0x7a, 0x17, 0x56, 0x5a, 0x7b, 0x3d, 0x8c, 0x4b,
+	0xe9, 0xf2, 0xc6, 0xb3, 0xd3, 0xea, 0xcd, 0x59, 0x9c, 0xfc, 0x89, 0x8d, 0x3d, 0x1b, 0xb3, 0xc3,
+	0xb8, 0x05, 0xff, 0xd7, 0x12, 0x18, 0xba, 0x0c, 0x79, 0xd9, 0xaf, 0x34, 0xc5, 0xb0, 0xa4, 0x8d,
+	0xb2, 0xe7, 0xd2, 0x95, 0x95, 0x6d, 0x21, 0x24, 0xe8, 0xc8, 0x78, 0x03, 0x0a, 0x8e, 0x7f, 0xf2,
+	0x61, 0x9f, 0x7a, 0xe4, 0xd0, 0xd5, 0xdd, 0x78, 0x0e, 0x1b, 0x52, 0xd6, 0x0e, 0x45, 0xb2, 0xb0,
+	0x70, 0x3c, 0x41, 0xb9, 0xa7, 0xfb, 0xec, 0x1c, 0x8e, 0xe7, 0xe8, 0x53, 0xc8, 0x38, 0x3e, 0x19,
+	0xe9, 0x72, 0x7c, 0xee, 0x0e, 0x3a, 0xdd, 0xc6, 0x23, 0x1d, 0xb9, 0xcd, 0xdc, 0xf9, 0xd9, 0x46,
+	0x46, 0x0a, 0xb0, 0xa2, 0xa1, 0x4a, 0xd4, 0x2b, 0xc9, 0x2f, 0xa9, 0x62, 0x24, 0x87, 0x13, 0x12,
+	0x19, 0x7d, 0x8e, 0x37, 0xe0, 0x34, 0x08, 0x54, 0x59, 0x92, 0xc3, 0xd1, 0x14, 0x95, 0x61, 0x55,
+	0xd7, 0xd5, 0xaa, 0xc5, 0xca, 0xcb, 0x6e, 0x45, 0x0b, 0x9a, 0x45, 0x30, 0x42, 0x6b, 0xf4, 0x8f,
+	0x38, 0x1b, 0xd5, 0xfe, 0x9d, 0x01, 0x63, 0xc7, 0x1d, 0x07, 0x42, 0xd7, 0x6c, 0x2f, 0xcd, 0xf8,
+	0x4f, 0xe1, 0x3a, 0x51, 0xaf, 0x3e, 0xc4, 0x93, 0x97, 0xbc, 0x6a, 0x57, 0xf4, 0x01, 0xdc, 0x99,
+	0xab, 0x2e, 0x06, 0x87, 0xad, 0x4d, 0x33, 0x2b, 0x75, 0x9a, 0x69, 0x5c, 0x22, 0x17, 0x7e, 0x41,
+	0x3d, 0x28, 0x32, 0x6e, 0x0d, 0x69, 0x20, 0xc2, 0xd2, 0x40, 0xbf, 0x92, 0xcc, 0x7d, 0x3f, 0x7b,
+	0x9c, 0x04, 0xea, 0x1b, 0x31, 0x5c, 0xed, 0xac, 0x0e, 0xf4, 0x11, 0x64, 0x38, 0x39, 0x8a, 0x5a,
+	0xaf, 0xb9, 0x41, 0x82, 0xc9, 0x91, 0x98, 0x51, 0xa1, 0x18, 0xe8, 0x73, 0x00, 0xdb, 0x09, 0x7c,
+	0x22, 0xac, 0x21, 0xe5, 0xfa, 0xb0, 0xe7, 0x6e, 0xb1, 0x15, 0xa3, 0x66, 0xb4, 0x24, 0xd8, 0xe8,
+	0x01, 0xe4, 0x2d, 0x12, 0xb9, 0x6b, 0x76, 0xf1, 0xd3, 0xd1, 0x4e, 0x43, 0xab, 0x28, 0x49, 0x15,
+	0xe7, 0x67, 0x1b, 0xb9, 0x48, 0x82, 0x73, 0x16, 0xd1, 0xee, 0xfb, 0x00, 0x8a, 0x82, 0x04, 0xc7,
+	0x7d, 0x3b, 0x4c, 0x67, 0xa1, 0x9b, 0x2c, 0xb8, 0xe1, 0x65, 0x87, 0xae, 0xd3, 0x5e, 0x74, 0x9c,
+	0x05, 0x91, 0x90, 0xa1, 0x9f, 0xc3, 0x75, 0xea, 0x59, 0x7c, 0xa2, 0x9c, 0x35, 0x5a, 0x61, 0x6e,
+	0xf1, 0x66, 0xdb, 0x31, 0x78, 0x66, 0xb3, 0x25, 0x7a, 0x41, 0x5e, 0xfb, 0x7b, 0x1a, 0x20, 0x2c,
+	0xa9, 0x5e, 0xae, 0x03, 0x22, 0xc8, 0xd8, 0x44, 0x10, 0xe5, 0x73, 0x05, 0xac, 0xc6, 0xe8, 0x1e,
+	0x80, 0xa0, 0x23, 0x5f, 0xa6, 0x5e, 0x6f, 0xa0, 0xdd, 0xe6, 0x79, 0xe9, 0x20, 0x81, 0x46, 0xdb,
+	0x90, 0xd5, 0x0d, 0x72, 0xe6, 0x4a, 0x9e, 0x46, 0xd6, 0xfe, 0x98, 0x06, 0x08, 0xb7, 0xf9, 0x7f,
+	0xbd, 0xb7, 0xda, 0xdf, 0x56, 0x00, 0x0e, 0x98, 0x3b, 0x1e, 0xbd, 0xe4, 0x47, 0xd0, 0x1b, 0xb0,
+	0xa2, 0x9a, 0xa2, 0xe8, 0x92, 0x57, 0x93, 0x84, 0x25, 0x97, 0x5f, 0xd4, 0x92, 0xa8, 0x0d, 0x86,
+	0xcc, 0x03, 0x41, 0xd0, 0x57, 0x17, 0x62, 0x66, 0xb1, 0xe3, 0x85, 0xfb, 0x68, 0x28, 0xb0, 0xba,
+	0x05, 0x81, 0xc4, 0x63, 0x74, 0x6f, 0x5a, 0xe1, 0xaf, 0xa8, 0xdb, 0xb0, 0xba, 0x58, 0x85, 0xae,
+	0xf3, 0xe3, 0xf2, 0x9e, 0xc2, 0x7a, 0xa8, 0xd5, 0xd1, 0x4f, 0xe7, 0x89, 0x77, 0x1c, 0x1d, 0xab,
+	0x73, 0x53, 0xd0, 0x3e, 0xf3, 0x99, 0xcb, 0x06, 0x49, 0x3c, 0x5e, 0xac, 0x09, 0xed, 0x86, 0xe5,
+	0xa9, 0x25, 0xab, 0x4e, 0x4e, 0xbc, 0x01, 0xd5, 0x61, 0x3b, 0xff, 0x69, 0x4b, 0x23, 0xb1, 0x04,
+	0x86, 0x15, 0x6c, 0x3c, 0x45, 0x4f, 0x2e, 0xfc, 0x37, 0x90, 0x53, 0x55, 0xc4, 0xfb, 0xcf, 0xd9,
+	0xb1, 0xac, 0x21, 0xb4, 0xfd, 0x16, 0xff, 0x47, 0xf0, 0xfb, 0x34, 0xa0, 0xcb, 0x20, 0xb4, 0x99,
+	0x78, 0xc3, 0x57, 0xc5, 0xce, 0x65, 0x8c, 0x7e, 0xc5, 0xbf, 0x3b, 0x7d, 0xc5, 0x57, 0xa5, 0xc4,
+	0x65, 0x60, 0xf8, 0x8e, 0x7f, 0x77, 0xfa, 0x8e, 0xbf, 0x00, 0x97, 0x78, 0xc9, 0x6f, 0xde, 0xf9,
+	0xfa, 0xbb, 0x4a, 0xea, 0xdb, 0xef, 0x2a, 0xa9, 0xdf, 0x9c, 0x57, 0xd2, 0x5f, 0x9f, 0x57, 0xd2,
+	0xdf, 0x9c, 0x57, 0xd2, 0xff, 0x38, 0xaf, 0xa4, 0x7f, 0xf7, 0x7d, 0x25, 0xf5, 0xcd, 0xf7, 0x95,
+	0xd4, 0xb7, 0xdf, 0x57, 0x52, 0x87, 0x59, 0xd5, 0x58, 0xfe, 0xf0, 0x3f, 0x01, 0x00, 0x00, 0xff,
+	0xff, 0xd2, 0x36, 0x0b, 0x87, 0xf5, 0x1a, 0x00, 0x00,
 }
 
 func (m *NodeSpec) Copy() *NodeSpec {
@@ -2028,6 +2152,46 @@ func (m *ConfigSpec) CopyFrom(src interface{}) {
 	}
 }
 
+func (m *VolumeSpec) Copy() *VolumeSpec {
+	if m == nil {
+		return nil
+	}
+	o := &VolumeSpec{}
+	o.CopyFrom(m)
+	return o
+}
+
+func (m *VolumeSpec) CopyFrom(src interface{}) {
+
+	o := src.(*VolumeSpec)
+	*m = *o
+	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations)
+	if o.Driver != nil {
+		m.Driver = &Driver{}
+		github_com_docker_swarmkit_api_deepcopy.Copy(m.Driver, o.Driver)
+	}
+	if o.AccessMode != nil {
+		m.AccessMode = &VolumeAccessMode{}
+		github_com_docker_swarmkit_api_deepcopy.Copy(m.AccessMode, o.AccessMode)
+	}
+	if o.Secrets != nil {
+		m.Secrets = make([]*VolumeSecret, len(o.Secrets))
+		for i := range m.Secrets {
+			m.Secrets[i] = &VolumeSecret{}
+			github_com_docker_swarmkit_api_deepcopy.Copy(m.Secrets[i], o.Secrets[i])
+		}
+	}
+
+	if o.AccessibilityRequirements != nil {
+		m.AccessibilityRequirements = &TopologyRequirement{}
+		github_com_docker_swarmkit_api_deepcopy.Copy(m.AccessibilityRequirements, o.AccessibilityRequirements)
+	}
+	if o.CapacityRange != nil {
+		m.CapacityRange = &CapacityRange{}
+		github_com_docker_swarmkit_api_deepcopy.Copy(m.CapacityRange, o.CapacityRange)
+	}
+}
+
 func (m *NodeSpec) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -3510,6 +3674,113 @@ func (m *ConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
+func (m *VolumeSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *VolumeSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Availability != 0 {
+		i = encodeVarintSpecs(dAtA, i, uint64(m.Availability))
+		i--
+		dAtA[i] = 0x40
+	}
+	if m.CapacityRange != nil {
+		{
+			size, err := m.CapacityRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintSpecs(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	if m.AccessibilityRequirements != nil {
+		{
+			size, err := m.AccessibilityRequirements.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintSpecs(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x32
+	}
+	if len(m.Secrets) > 0 {
+		for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintSpecs(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if m.AccessMode != nil {
+		{
+			size, err := m.AccessMode.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintSpecs(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.Driver != nil {
+		{
+			size, err := m.Driver.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintSpecs(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Group) > 0 {
+		i -= len(m.Group)
+		copy(dAtA[i:], m.Group)
+		i = encodeVarintSpecs(dAtA, i, uint64(len(m.Group)))
+		i--
+		dAtA[i] = 0x12
+	}
+	{
+		size, err := m.Annotations.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintSpecs(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
 func encodeVarintSpecs(dAtA []byte, offset int, v uint64) int {
 	offset -= sovSpecs(v)
 	base := offset
@@ -4131,6 +4402,46 @@ func (m *ConfigSpec) Size() (n int) {
 	return n
 }
 
+func (m *VolumeSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.Annotations.Size()
+	n += 1 + l + sovSpecs(uint64(l))
+	l = len(m.Group)
+	if l > 0 {
+		n += 1 + l + sovSpecs(uint64(l))
+	}
+	if m.Driver != nil {
+		l = m.Driver.Size()
+		n += 1 + l + sovSpecs(uint64(l))
+	}
+	if m.AccessMode != nil {
+		l = m.AccessMode.Size()
+		n += 1 + l + sovSpecs(uint64(l))
+	}
+	if len(m.Secrets) > 0 {
+		for _, e := range m.Secrets {
+			l = e.Size()
+			n += 1 + l + sovSpecs(uint64(l))
+		}
+	}
+	if m.AccessibilityRequirements != nil {
+		l = m.AccessibilityRequirements.Size()
+		n += 1 + l + sovSpecs(uint64(l))
+	}
+	if m.CapacityRange != nil {
+		l = m.CapacityRange.Size()
+		n += 1 + l + sovSpecs(uint64(l))
+	}
+	if m.Availability != 0 {
+		n += 1 + sovSpecs(uint64(m.Availability))
+	}
+	return n
+}
+
 func sovSpecs(x uint64) (n int) {
 	return (math_bits.Len64(x|1) + 6) / 7
 }
@@ -4536,6 +4847,28 @@ func (this *ConfigSpec) String() string {
 	}, "")
 	return s
 }
+func (this *VolumeSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForSecrets := "[]*VolumeSecret{"
+	for _, f := range this.Secrets {
+		repeatedStringForSecrets += strings.Replace(fmt.Sprintf("%v", f), "VolumeSecret", "VolumeSecret", 1) + ","
+	}
+	repeatedStringForSecrets += "}"
+	s := strings.Join([]string{`&VolumeSpec{`,
+		`Annotations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Annotations), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`,
+		`Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+		`Driver:` + strings.Replace(fmt.Sprintf("%v", this.Driver), "Driver", "Driver", 1) + `,`,
+		`AccessMode:` + strings.Replace(fmt.Sprintf("%v", this.AccessMode), "VolumeAccessMode", "VolumeAccessMode", 1) + `,`,
+		`Secrets:` + repeatedStringForSecrets + `,`,
+		`AccessibilityRequirements:` + strings.Replace(fmt.Sprintf("%v", this.AccessibilityRequirements), "TopologyRequirement", "TopologyRequirement", 1) + `,`,
+		`CapacityRange:` + strings.Replace(fmt.Sprintf("%v", this.CapacityRange), "CapacityRange", "CapacityRange", 1) + `,`,
+		`Availability:` + fmt.Sprintf("%v", this.Availability) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func valueToStringSpecs(v interface{}) string {
 	rv := reflect.ValueOf(v)
 	if rv.IsNil() {
@@ -8538,6 +8871,318 @@ func (m *ConfigSpec) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
+func (m *VolumeSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSpecs
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: VolumeSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: VolumeSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSpecs
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSpecs
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Group = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSpecs
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Driver == nil {
+				m.Driver = &Driver{}
+			}
+			if err := m.Driver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AccessMode", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSpecs
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AccessMode == nil {
+				m.AccessMode = &VolumeAccessMode{}
+			}
+			if err := m.AccessMode.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSpecs
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Secrets = append(m.Secrets, &VolumeSecret{})
+			if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AccessibilityRequirements", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSpecs
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AccessibilityRequirements == nil {
+				m.AccessibilityRequirements = &TopologyRequirement{}
+			}
+			if err := m.AccessibilityRequirements.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CapacityRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSpecs
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.CapacityRange == nil {
+				m.CapacityRange = &CapacityRange{}
+			}
+			if err := m.CapacityRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Availability", wireType)
+			}
+			m.Availability = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSpecs
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Availability |= VolumeSpec_VolumeAvailability(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSpecs(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func skipSpecs(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0

+ 68 - 0
vendor/github.com/docker/swarmkit/api/specs.proto

@@ -506,3 +506,71 @@ message ConfigSpec {
 	// - golang: Go templating
 	Driver templating = 3;
 }
+
+message VolumeSpec {
+	// Annotations includes the name and labels of a volume. The name used in the
+	// spec's Annotations will be passed to the Plugin as the "Name" in the
+	// CreateVolume request.
+	Annotations annotations = 1 [(gogoproto.nullable) = false];
+
+	// Group defines the volume group this particular volume belongs to. When
+	// requesting volumes for a workload, the group name can be used instead of
+	// the volume's name, which tells swarmkit to pick one from the many volumes
+	// belonging to that group.
+	string group = 2;
+
+	// Driver represents the CSI Plugin object and its configuration parameters.
+	// The "options" field of the Driver object is passed in the CSI
+	// CreateVolumeRequest as the "parameters" field. The Driver must be
+	// specified; there is no default CSI Plugin.
+	Driver driver = 3;
+
+	// AccessMode is similar to, and used to determine, the volume access mode as
+	// defined in the CSI spec, as well as the volume type (block vs mount). In
+	// this way, it is more similar to the VolumeCapability message in the CSI
+	// spec.
+	VolumeAccessMode access_mode = 4;
+
+	// Secrets represents a set of key/value pairs to pass to the CSI plugin. The
+	// keys of the secrets can be anything, but the values refer to swarmkit
+	// Secret objects. See the "Secrets Requirements" section of the CSI Plugin
+	// Spec for more information.
+	repeated VolumeSecret secrets = 5;
+
+	// AccessibilityRequirements specifies where a volume must be accessible
+	// from.
+	//
+	// This field must be empty if the plugin does not support
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the
+	// plugin does not support it, volume will not be created.
+	//
+	// If AccessibilityRequirements is empty, but the plugin does support
+	// VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire
+	// cluster is a valid target for the volume.
+	TopologyRequirement AccessibilityRequirements = 6;
+
+	// CapacityRange is the capacity this volume should be created with. If nil,
+	// the plugin will decide the capacity.
+	CapacityRange capacity_range = 7;
+
+	enum VolumeAvailability {
+		option (gogoproto.goproto_enum_prefix) = false;
+
+		// Active allows a volume to be used and scheduled to. This is the
+		// default state.
+		ACTIVE = 0 [(gogoproto.enumvalue_customname) = "VolumeAvailabilityActive"];
+
+		// Pause prevents volumes from having new workloads scheduled to use
+		// them, even if they're already published on a Node.
+		PAUSE = 1 [(gogoproto.enumvalue_customname) = "VolumeAvailabilityPause"];
+
+		// Drain causes existing workloads using this volume to be rescheduled,
+		// causing the volume to be unpublished and removed from nodes.
+		DRAIN = 2 [(gogoproto.enumvalue_customname) = "VolumeAvailabilityDrain"];
+	}
+
+	// Availability is the Volume's desired availability. Analogous to Node
+	// Availability, this allows the user to take volumes offline in order to
+	// update or delete them.
+	VolumeAvailability availability = 8;
+}

File diff suppressed because it is too large
+ 2715 - 1941
vendor/github.com/docker/swarmkit/api/types.pb.go


+ 507 - 0
vendor/github.com/docker/swarmkit/api/types.proto

@@ -62,6 +62,7 @@ enum ResourceType {
 	TASK = 0;
 	SECRET = 1;
 	CONFIG = 2;
+	VOLUME = 3;
 }
 
 message Resources {
@@ -140,6 +141,8 @@ message NodeDescription {
 
 	// FIPS indicates whether the node has FIPS-enabled
 	bool fips = 6 [(gogoproto.customname) = "FIPS"];
+
+	repeated NodeCSIInfo csi_info = 7 [(gogoproto.customname) = "CSIInfo"];
 }
 
 message NodeTLSInfo {
@@ -151,6 +154,27 @@ message NodeTLSInfo {
 	bytes cert_issuer_public_key = 3;
 }
 
+// NodeCSIInfo represents information about a Node returned by calling the
+// NodeGetInfo RPC on the CSI plugin present on the node. There is a separate
+// NodeCSIInfo object for each CSI plugin present.
+message NodeCSIInfo {
+
+	// PluginName is the name of the CSI plugin.
+	string plugin_name = 1;
+
+	// NodeID is the ID of the node as reported by the CSI plugin. This will be
+	// different from the swarmkit node ID.
+	string node_id = 2;
+
+	// MaxVolumesPerNode is the maximum number of volumes that may be published
+	// to this node.
+	int64 max_volumes_per_node = 3;
+
+	// AccessibleTopology indicates the location of this node in the CSI plugin's
+	// topology
+	Topology accessible_topology = 4;
+}
+
 message RaftMemberStatus {
 	bool leader = 1;
 
@@ -215,6 +239,7 @@ message Mount {
 		VOLUME = 1 [(gogoproto.enumvalue_customname) = "MountTypeVolume"];  // Remote storage volumes
 		TMPFS = 2 [(gogoproto.enumvalue_customname) = "MountTypeTmpfs"]; // Mount a tmpfs
 		NPIPE = 3 [(gogoproto.enumvalue_customname) = "MountTypeNamedPipe"]; // Windows named pipes
+		CSI = 4 [(gogoproto.enumvalue_customname) = "MountTypeCSI"]; // CSI volume
 	}
 
 	// Type defines the nature of the mount.
@@ -222,6 +247,10 @@ message Mount {
 
 	// Source specifies the name of the mount. Depending on mount type, this
 	// may be a volume name or a host path, or even ignored.
+	//
+	// For CSI type mounts, the source is either the name of the volume or the
+	// name of the volume group. To specify a volume group, the source should be
+	// prefixed with "group:", as in "group:groupname"
 	string source = 2;
 
 	// Target path in container
@@ -1131,3 +1160,481 @@ message JobStatus {
 	// newly added nodes executing long-forgotten jobs.
 	google.protobuf.Timestamp last_execution = 2;
 }
+
+// VolumeAccessMode is the access mode of the volume, and is used to determine
+// the CSI AccessMode value, as well as the volume access type (block vs
+// mount). In this way, it is more similar to the CSI VolumeCapability message.
+//
+// This defines how and where a volume can be accessed by more than
+// one Task, but does not imply anything about the accessible topology of the
+// volume.
+//
+// For analogy, a flash drive can be used on many computers, but only one of
+// them at a time, and so would have a scope of "Single". But, it can be used
+// by any number of programs simultaneously, so would have a sharing of "All".
+message VolumeAccessMode {
+	// Scope enumerates the possible volume access scopes.
+	enum Scope {
+		option (gogoproto.goproto_enum_prefix) = false;
+		// VolumeScopeSingleNode indicates that only one node at a time may have
+		// access to the volume.
+		SINGLE_NODE = 0 [(gogoproto.enumvalue_customname) = "VolumeScopeSingleNode"];
+		// VolumeScopeMultiNode indicates that multiple nodes may access the volume
+		// at the same time.
+		MULTI_NODE = 1 [(gogoproto.enumvalue_customname) = "VolumeScopeMultiNode"];
+	}
+
+	// Sharing enumerates the possible volume sharing modes.
+	enum Sharing {
+		option (gogoproto.goproto_enum_prefix) = false;
+		// VolumeSharingNone indicates that the volume may only be used by a single
+		// Task at any given time.
+		NONE = 0 [(gogoproto.enumvalue_customname) = "VolumeSharingNone"];
+		// VolumeSharingReadOnly indicates that the volume may be accessed by
+		// multiple Tasks, but all Tasks only have have read access.
+		READ_ONLY = 1 [(gogoproto.enumvalue_customname) = "VolumeSharingReadOnly"];
+		// VolumeSharingOneWriter indicates that the Volume may be accessed by
+		// multiple Tasks, but only the one Task may have write permission for the
+		// Volume.
+		ONE_WRITER = 2 [(gogoproto.enumvalue_customname) = "VolumeSharingOneWriter"];
+		// VolumeSharingAll indicates that any number of Tasks may have read and
+		// write access to the volume.
+		ALL = 3 [(gogoproto.enumvalue_customname) = "VolumeSharingAll"];
+	}
+
+	// BlockVolume indicates the volume will be accessed with the block device
+	// API.
+	message BlockVolume {
+		// intentionally empty
+	}
+
+	// MountVolume indicates the volume will be access with the filesystem API.
+	message MountVolume {
+		// FsType is the filesystem type. This field is optional, and an empty
+		// string is equal to an unspecified value.
+		string fs_type = 1;
+
+		// MountFlags indicates mount options to be used for the volume. This
+		// field is optional, and may contain sensitive data.
+		repeated string mount_flags = 2;
+	}
+
+	// Scope defines on how many nodes this volume can be accessed
+	// simultaneously. If unset, will default to the zero-value of SINGLE_NODE.
+	Scope scope = 1;
+
+	// Sharing defines how many tasks can use this volume at the same time, and
+	// in what way. If unset, will default to the zero-value of NONE.
+	Sharing sharing = 2;
+
+	// AccessType defines the access type of the volume. Unlike Sharing and
+	// Scope, Swarmkit itself doesn't define either of these as a default, but
+	// but the upstream is free to do so. However, one of these MUST be set.
+	oneof access_type {
+		BlockVolume block = 3;
+		MountVolume mount = 4;
+	}
+}
+
+// VolumeSecret indicates a secret value that must be passed to CSI plugin
+// operations.
+message VolumeSecret {
+	// Key represents the key that will be passed as a controller secret to the
+	// CSI plugin.
+	string key = 1;
+
+	// Secret represents the swarmkit Secret object from which to read data to
+	// use as the value to pass to the CSI plugin. This can be either a secret
+	// name or ID.
+	//
+	// TODO(dperny): should this be a SecretReference instead?
+	string secret = 2;
+}
+
+// VolumePublishStatus contains information about the volume's publishing to a
+// specific node.
+//
+// Publishing or unpublishing a volume to a node is a two-step process.
+//
+// When a Volume is needed on a Node, a VolumePublishStatus with state
+// PendingPublish is added. This indicates that the volume should be published,
+// but the RPCs have not been executed.
+//
+// Then, afterward, ControllerPublishVolume is called for the Volume, and the
+// State is changed to Published, indicating that the call was a success.
+//
+// When a Volume is no longer needed, the process is similar, with the State
+// being changed to PendingUnpublish. When ControllerUnpublishVolume succeeds,
+// the PublishStatus for that Node is simply removed.
+//
+// Without this two-step process, the following could happen:
+//
+//   1. ControllerPublishVolume is called and the Volume is successfully
+//      published.
+//   2. A crash or leadership change disrupts the cluster before
+//      the Volume with the updated VolumePublishStatus can be added to the
+//      store.
+//   3. The Task that required the Volume to be published is deleted.
+//
+// In this case, the Volume would be published to the Node, but Swarm would be
+// unaware of this, and would additionally be unaware that the Volume _should_
+// be published to the Node.
+//
+// By first committing our intention to publish a Volume, we guarantee that the
+// Volume itself is sufficient to know which Nodes it may have been published
+// to.
+message VolumePublishStatus {
+	// State is the state of the volume in the publish/unpublish
+	// lifecycle, on a particular node.
+	enum State {
+		// PendingPublish indicates that the volume should be published on this
+		// node, but the call to ControllerPublishVolume has not been
+		// successfully completed yet and the result recorded by swarmkit.
+		PENDING_PUBLISH = 0;
+
+		// Published means the volume is published successfully to the node.
+		PUBLISHED = 1;
+
+		// PendingNodeUnpublish indicates that the Volume should be unpublished
+		// on the Node, and we're waiting for confirmation that it has done so.
+		// After the Node has confirmed that the Volume has been unpublished,
+		// the state will move to PendingUnpublish.
+		PENDING_NODE_UNPUBLISH = 2;
+
+		// PendingUnpublish means the volume is published to the node, and
+		// needs to not be, but the call to ControllerUnpublishVolume has not
+		// verifiably succeeded yet. There is no Unpublished state, because
+		// after the volume has been verifiably unpublished, the
+		// VolumePublishStatus for the node is removed.
+		PENDING_UNPUBLISH = 3;
+	}
+
+	// NodeID is the swarm (not CSI plugin) node ID that this volume is
+	// published to.
+	string node_id = 1;
+
+	// State is the publish state of the volume.
+	State state = 2;
+
+	// PublishContext is the same PublishContext returned by a call to
+	// ControllerPublishVolume.
+	map<string, string> publish_context = 3;
+
+	// Message is a human-readable message explaining the state of the volume.
+	// It exists to convey the current situation with the volume to the user,
+	// allowing, for example, the user to see error messages why a volume might
+	// not be published yet.
+	string message = 5;
+}
+
+// VolumeInfo contains information about the volume originating from the CSI
+// plugin.
+message VolumeInfo {
+	// CapacityBytes is the capacity of this volume in bytes. A value of 0
+	// indicates that the capcity is unknown.
+	int64 capacity_bytes = 1;
+
+	// VolumeContext includes fields that are opaque to Swarmkit.
+	map<string, string> volume_context = 2;
+
+	// VolumeID is the ID of the volume as reported by the CSI plugin.
+	// Information about the volume is not cached in swarmkit's object store;
+	// instead, it is retrieved on-demand as needed. If the VolumeID field is an
+	// empty string, and the plugin advertises CREATE_DELETE_VOLUME capability,
+	// then Swarmkit has not yet called CreateVolume.
+	string volume_id = 3;
+
+	// AccessibleTopology is the topology this volume is actually accessible
+	// from.
+	repeated Topology accessible_topology = 4;
+}
+
+// CapacityRange describes the minimum and maximum capacity a volume should be
+// created with.
+message CapacityRange {
+	// RequiredBytes specifies that a volume must be at least this big. The value
+	// of 0 indicates an unspecified minimum. Must not be negative.
+	int64 required_bytes = 1;
+
+	// LimitBytes specifies that a volume must not be bigger than this. The value
+	// of 0 indicates an unspecified maximum. Must not be negative.
+	int64 limit_bytes = 2;
+}
+
+// VolumeAssignment contains the information needed by a Node to use a CSI
+// volume. This includes the information need to Stage and Publish the volume
+// on the node, but never the full Volume object.
+message VolumeAssignment {
+	// ID is the swarmkit ID for the volume. This is used by swarmkit components
+	// to identify the volume.
+	string id = 1;
+
+	// VolumeID is the CSI volume ID as returned from CreateVolume. This is used
+	// by the CSI driver to identify the volume.
+	string volume_id = 2;
+
+	// Driver is the CSI Driver that this volume is managed by.
+	Driver driver = 3;
+
+	// VolumeContext is a map returned from the CSI Controller service when a
+	// Volume is created. It is optional for the driver to provide, but if it is
+	// provided, it must be passed to subsequent calls.
+	map<string,string> volume_context = 4;
+
+	// PublishContext is a map returned from the Controller service when
+	// ControllerPublishVolume is called. Again, it is optional, but if provided,
+	// must be passed.
+	map<string,string> publish_context = 5;
+
+	// AccessMode specifies the access mode of the volume.
+	VolumeAccessMode access_mode = 6;
+
+	// Secrets is the set of secrets required by the CSI plugin. These refer to
+	// swarmkit Secrets that will be distributed separately to the node.
+	repeated VolumeSecret secrets = 7;
+}
+
+// VolumeAttachment is the information associating a Volume with a Task.
+message VolumeAttachment {
+	// ID is the swarmkit ID of the volume assigned to this task, not the CSI
+	// volume ID.
+	string id = 1;
+
+	// Source indicates the Mount source that this volume is assigned for.
+	string source = 2;
+
+	// Target indicates the Mount target that this volume is assigned for.
+	string target = 3;
+}
+
+
+// These types are copied from the CSI spec. They are copied because there is
+// difficulty in compatibility between the CSI protos and the swarmkit protos,
+// and straight importing them is difficult.
+
+// TopologyRequirement expresses the user's requirements for a volume's
+// accessible topology.
+message TopologyRequirement {
+	// Specifies the list of topologies the provisioned volume MUST be
+	// accessible from.
+	// This field is OPTIONAL. If TopologyRequirement is specified either
+	// requisite or preferred or both MUST be specified.
+	//
+	// If requisite is specified, the provisioned volume MUST be
+	// accessible from at least one of the requisite topologies.
+	//
+	// Given
+	//   x = number of topologies provisioned volume is accessible from
+	//   n = number of requisite topologies
+	// The CO MUST ensure n >= 1. The SP MUST ensure x >= 1
+	// If x==n, then the SP MUST make the provisioned volume available to
+	// all topologies from the list of requisite topologies. If it is
+	// unable to do so, the SP MUST fail the CreateVolume call.
+	// For example, if a volume should be accessible from a single zone,
+	// and requisite =
+	//   {"region": "R1", "zone": "Z2"}
+	// then the provisioned volume MUST be accessible from the "region"
+	// "R1" and the "zone" "Z2".
+	// Similarly, if a volume should be accessible from two zones, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"}
+	// then the provisioned volume MUST be accessible from the "region"
+	// "R1" and both "zone" "Z2" and "zone" "Z3".
+	//
+	// If x<n, then the SP SHALL choose x unique topologies from the list
+	// of requisite topologies. If it is unable to do so, the SP MUST fail
+	// the CreateVolume call.
+	// For example, if a volume should be accessible from a single zone,
+	// and requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"}
+	// then the SP may choose to make the provisioned volume available in
+	// either the "zone" "Z2" or the "zone" "Z3" in the "region" "R1".
+	// Similarly, if a volume should be accessible from two zones, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"},
+	//   {"region": "R1", "zone": "Z4"}
+	// then the provisioned volume MUST be accessible from any combination
+	// of two unique topologies: e.g. "R1/Z2" and "R1/Z3", or "R1/Z2" and
+	//  "R1/Z4", or "R1/Z3" and "R1/Z4".
+	//
+	// If x>n, then the SP MUST make the provisioned volume available from
+	// all topologies from the list of requisite topologies and MAY choose
+	// the remaining x-n unique topologies from the list of all possible
+	// topologies. If it is unable to do so, the SP MUST fail the
+	// CreateVolume call.
+	// For example, if a volume should be accessible from two zones, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"}
+	// then the provisioned volume MUST be accessible from the "region"
+	// "R1" and the "zone" "Z2" and the SP may select the second zone
+	// independently, e.g. "R1/Z4".
+	repeated Topology requisite = 1;
+
+	// Specifies the list of topologies the CO would prefer the volume to
+	// be provisioned in.
+	//
+	// This field is OPTIONAL. If TopologyRequirement is specified either
+	// requisite or preferred or both MUST be specified.
+	//
+	// An SP MUST attempt to make the provisioned volume available using
+	// the preferred topologies in order from first to last.
+	//
+	// If requisite is specified, all topologies in preferred list MUST
+	// also be present in the list of requisite topologies.
+	//
+	// If the SP is unable to to make the provisioned volume available
+	// from any of the preferred topologies, the SP MAY choose a topology
+	// from the list of requisite topologies.
+	// If the list of requisite topologies is not specified, then the SP
+	// MAY choose from the list of all possible topologies.
+	// If the list of requisite topologies is specified and the SP is
+	// unable to to make the provisioned volume available from any of the
+	// requisite topologies it MUST fail the CreateVolume call.
+	//
+	// Example 1:
+	// Given a volume should be accessible from a single zone, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"}
+	// preferred =
+	//   {"region": "R1", "zone": "Z3"}
+	// then the the SP SHOULD first attempt to make the provisioned volume
+	// available from "zone" "Z3" in the "region" "R1" and fall back to
+	// "zone" "Z2" in the "region" "R1" if that is not possible.
+	//
+	// Example 2:
+	// Given a volume should be accessible from a single zone, and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"},
+	//   {"region": "R1", "zone": "Z4"},
+	//   {"region": "R1", "zone": "Z5"}
+	// preferred =
+	//   {"region": "R1", "zone": "Z4"},
+	//   {"region": "R1", "zone": "Z2"}
+	// then the the SP SHOULD first attempt to make the provisioned volume
+	// accessible from "zone" "Z4" in the "region" "R1" and fall back to
+	// "zone" "Z2" in the "region" "R1" if that is not possible. If that
+	// is not possible, the SP may choose between either the "zone"
+	// "Z3" or "Z5" in the "region" "R1".
+	//
+	// Example 3:
+	// Given a volume should be accessible from TWO zones (because an
+	// opaque parameter in CreateVolumeRequest, for example, specifies
+	// the volume is accessible from two zones, aka synchronously
+	// replicated), and
+	// requisite =
+	//   {"region": "R1", "zone": "Z2"},
+	//   {"region": "R1", "zone": "Z3"},
+	//   {"region": "R1", "zone": "Z4"},
+	//   {"region": "R1", "zone": "Z5"}
+	// preferred =
+	//   {"region": "R1", "zone": "Z5"},
+	//   {"region": "R1", "zone": "Z3"}
+	// then the the SP SHOULD first attempt to make the provisioned volume
+	// accessible from the combination of the two "zones" "Z5" and "Z3" in
+	// the "region" "R1". If that's not possible, it should fall back to
+	// a combination of "Z5" and other possibilities from the list of
+	// requisite. If that's not possible, it should fall back  to a
+	// combination of "Z3" and other possibilities from the list of
+	// requisite. If that's not possible, it should fall back  to a
+	// combination of other possibilities from the list of requisite.
+	repeated Topology preferred = 2;
+}
+
+// Topology is a map of topological domains to topological segments.
+// A topological domain is a sub-division of a cluster, like "region",
+// "zone", "rack", etc.
+// A topological segment is a specific instance of a topological domain,
+// like "zone3", "rack3", etc.
+// For example {"com.company/zone": "Z1", "com.company/rack": "R3"}
+// Valid keys have two segments: an OPTIONAL prefix and name, separated
+// by a slash (/), for example: "com.company.example/zone".
+// The key name segment is REQUIRED. The prefix is OPTIONAL.
+// The key name MUST be 63 characters or less, begin and end with an
+// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
+// underscores (_), dots (.), or alphanumerics in between, for example
+// "zone".
+// The key prefix MUST be 63 characters or less, begin and end with a
+// lower-case alphanumeric character ([a-z0-9]), contain only
+// dashes (-), dots (.), or lower-case alphanumerics in between, and
+// follow domain name notation format
+// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
+// The key prefix SHOULD include the plugin's host company name and/or
+// the plugin name, to minimize the possibility of collisions with keys
+// from other plugins.
+// If a key prefix is specified, it MUST be identical across all
+// topology keys returned by the SP (across all RPCs).
+// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone"
+// MUST not both exist.
+// Each value (topological segment) MUST contain 1 or more strings.
+// Each string MUST be 63 characters or less and begin and end with an
+// alphanumeric character with '-', '_', '.', or alphanumerics in
+// between.
+message Topology {
+	map<string, string> segments = 1;
+}
+
+// VolumeCapability specifies a capability of a volume.
+message VolumeCapability {
+	// Indicate that the volume will be accessed via the block device API.
+	message BlockVolume {
+		// Intentionally empty, for now.
+	}
+
+	// Indicate that the volume will be accessed via the filesystem API.
+	message MountVolume {
+		// The filesystem type. This field is OPTIONAL.
+		// An empty string is equal to an unspecified field value.
+		string fs_type = 1;
+
+		// The mount options that can be used for the volume. This field is
+		// OPTIONAL. `mount_flags` MAY contain sensitive information.
+		// Therefore, the CO and the Plugin MUST NOT leak this information
+		// to untrusted entities. The total size of this repeated field
+		// SHALL NOT exceed 4 KiB.
+		repeated string mount_flags = 2;
+	}
+
+	// Specify how a volume can be accessed.
+	message AccessMode {
+		enum Mode {
+			UNKNOWN = 0;
+
+			// Can only be published once as read/write on a single node, at
+			// any given time.
+			SINGLE_NODE_WRITER = 1;
+
+			// Can only be published once as readonly on a single node, at
+			// any given time.
+			SINGLE_NODE_READER_ONLY = 2;
+
+			// Can be published as readonly at multiple nodes simultaneously.
+			MULTI_NODE_READER_ONLY = 3;
+
+			// Can be published at multiple nodes simultaneously. Only one of
+			// the node can be used as read/write. The rest will be readonly.
+			MULTI_NODE_SINGLE_WRITER = 4;
+
+			// Can be published as read/write at multiple nodes
+			// simultaneously.
+			MULTI_NODE_MULTI_WRITER = 5;
+		}
+
+		// This field is REQUIRED.
+		Mode mode = 1;
+	}
+
+	// Specifies what API the volume will be accessed using. One of the
+	// following fields MUST be specified.
+	oneof access_type {
+		BlockVolume block = 1;
+		MountVolume mount = 2;
+	}
+
+	// This is a REQUIRED field.
+	AccessMode access_mode = 3;
+}

+ 174 - 76
vendor/github.com/docker/swarmkit/api/watch.pb.go

@@ -80,6 +80,7 @@ type Object struct {
 	//	*Object_Resource
 	//	*Object_Extension
 	//	*Object_Config
+	//	*Object_Volume
 	Object isObject_Object `protobuf_oneof:"Object"`
 }
 
@@ -148,6 +149,9 @@ type Object_Extension struct {
 type Object_Config struct {
 	Config *Config `protobuf:"bytes,9,opt,name=config,proto3,oneof" json:"config,omitempty"`
 }
+type Object_Volume struct {
+	Volume *Volume `protobuf:"bytes,10,opt,name=volume,proto3,oneof" json:"volume,omitempty"`
+}
 
 func (*Object_Node) isObject_Object()      {}
 func (*Object_Service) isObject_Object()   {}
@@ -158,6 +162,7 @@ func (*Object_Secret) isObject_Object()    {}
 func (*Object_Resource) isObject_Object()  {}
 func (*Object_Extension) isObject_Object() {}
 func (*Object_Config) isObject_Object()    {}
+func (*Object_Volume) isObject_Object()    {}
 
 func (m *Object) GetObject() isObject_Object {
 	if m != nil {
@@ -229,6 +234,13 @@ func (m *Object) GetConfig() *Config {
 	return nil
 }
 
+func (m *Object) GetVolume() *Volume {
+	if x, ok := m.GetObject().(*Object_Volume); ok {
+		return x.Volume
+	}
+	return nil
+}
+
 // XXX_OneofWrappers is for the internal use of the proto package.
 func (*Object) XXX_OneofWrappers() []interface{} {
 	return []interface{}{
@@ -241,6 +253,7 @@ func (*Object) XXX_OneofWrappers() []interface{} {
 		(*Object_Resource)(nil),
 		(*Object_Extension)(nil),
 		(*Object_Config)(nil),
+		(*Object_Volume)(nil),
 	}
 }
 
@@ -789,82 +802,83 @@ func init() {
 }
 
 var fileDescriptor_da25266013800cd9 = []byte{
-	// 1199 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xbd, 0x73, 0x1b, 0xc5,
-	0x1b, 0xc7, 0xef, 0x14, 0xf9, 0x24, 0x3d, 0xb6, 0x13, 0xcf, 0xc6, 0x49, 0xee, 0xa7, 0x5f, 0x90,
-	0x85, 0x78, 0xcb, 0x24, 0x41, 0x06, 0x13, 0x92, 0x01, 0x02, 0x33, 0x96, 0x2c, 0x46, 0x22, 0xe3,
-	0x97, 0x59, 0xdb, 0x49, 0xa9, 0x39, 0xdf, 0x3d, 0x56, 0x0e, 0xdf, 0xdd, 0x8a, 0xbd, 0x93, 0x1d,
-	0x77, 0x14, 0x14, 0x4c, 0x2a, 0x1a, 0x66, 0x68, 0x52, 0x41, 0x4d, 0x43, 0x07, 0xff, 0x40, 0x86,
-	0x2a, 0x65, 0x68, 0x3c, 0x44, 0xe9, 0x28, 0xf8, 0x0b, 0x28, 0x98, 0x7d, 0x39, 0xdb, 0x51, 0x4e,
-	0x36, 0xa9, 0xb4, 0xb7, 0xf7, 0xf9, 0x3e, 0xfb, 0xec, 0xf3, 0x76, 0x82, 0xab, 0x3d, 0x3f, 0xb9,
-	0x3f, 0xd8, 0xaa, 0xbb, 0x2c, 0x9c, 0xf7, 0x98, 0xbb, 0x83, 0x7c, 0x3e, 0xde, 0x73, 0x78, 0xb8,
-	0xe3, 0x27, 0xf3, 0x4e, 0xdf, 0x9f, 0xdf, 0x73, 0x12, 0xf7, 0x7e, 0xbd, 0xcf, 0x59, 0xc2, 0x08,
-	0x51, 0x40, 0x3d, 0x05, 0xea, 0xbb, 0xef, 0x97, 0x4f, 0xd3, 0xc7, 0x7d, 0x74, 0x63, 0xa5, 0x2f,
-	0x5f, 0x3f, 0x85, 0x65, 0x5b, 0x5f, 0xa2, 0x9b, 0xa4, 0xf4, 0x69, 0x96, 0x93, 0xfd, 0x3e, 0xa6,
-	0xec, 0x6c, 0x8f, 0xf5, 0x98, 0x5c, 0xce, 0x8b, 0x95, 0xde, 0xbd, 0x75, 0x82, 0x05, 0x49, 0x6c,
-	0x0d, 0xb6, 0xe7, 0xfb, 0xc1, 0xa0, 0xe7, 0x47, 0xfa, 0x47, 0x09, 0x6b, 0xdf, 0xe4, 0xc1, 0x5a,
-	0x95, 0xce, 0x90, 0x3a, 0xe4, 0x23, 0xe6, 0xa1, 0x6d, 0x56, 0xcd, 0x2b, 0x93, 0x0b, 0x76, 0xfd,
-	0xe5, 0x10, 0xd4, 0x57, 0x98, 0x87, 0x6d, 0x83, 0x4a, 0x8e, 0xdc, 0x82, 0x42, 0x8c, 0x7c, 0xd7,
-	0x77, 0xd1, 0xce, 0x49, 0xc9, 0xff, 0xb3, 0x24, 0xeb, 0x0a, 0x69, 0x1b, 0x34, 0xa5, 0x85, 0x30,
-	0xc2, 0x64, 0x8f, 0xf1, 0x1d, 0xfb, 0xcc, 0x78, 0xe1, 0x8a, 0x42, 0x84, 0x50, 0xd3, 0xc2, 0xc3,
-	0xc4, 0x89, 0x77, 0xec, 0xfc, 0x78, 0x0f, 0x37, 0x9c, 0x58, 0x48, 0x24, 0x27, 0x0e, 0x72, 0x83,
-	0x41, 0x9c, 0x20, 0xb7, 0x27, 0xc6, 0x1f, 0xd4, 0x54, 0x88, 0x38, 0x48, 0xd3, 0xe4, 0x06, 0x58,
-	0x31, 0xba, 0x1c, 0x13, 0xdb, 0x92, 0xba, 0x72, 0xf6, 0xcd, 0x04, 0xd1, 0x36, 0xa8, 0x66, 0xc9,
-	0xc7, 0x50, 0xe4, 0x18, 0xb3, 0x01, 0x77, 0xd1, 0x2e, 0x48, 0xdd, 0xe5, 0x2c, 0x1d, 0xd5, 0x4c,
-	0xdb, 0xa0, 0x87, 0x3c, 0xf9, 0x14, 0x4a, 0xf8, 0x20, 0xc1, 0x28, 0xf6, 0x59, 0x64, 0x17, 0xa5,
-	0xf8, 0xb5, 0x2c, 0x71, 0x2b, 0x85, 0xda, 0x06, 0x3d, 0x52, 0x08, 0x87, 0x5d, 0x16, 0x6d, 0xfb,
-	0x3d, 0xbb, 0x34, 0xde, 0xe1, 0xa6, 0x24, 0x84, 0xc3, 0x8a, 0x6d, 0x14, 0xd3, 0xdc, 0xd7, 0xd6,
-	0x60, 0x6a, 0x1d, 0x03, 0x74, 0x93, 0xc6, 0xfe, 0x7a, 0xc0, 0x12, 0x72, 0x1d, 0x40, 0x67, 0xab,
-	0xeb, 0x7b, 0xb2, 0x22, 0x4a, 0x8d, 0xe9, 0xe1, 0xc1, 0x5c, 0x49, 0xa7, 0xb3, 0xb3, 0x44, 0x4b,
-	0x1a, 0xe8, 0x78, 0x84, 0x40, 0x3e, 0x0e, 0x58, 0x22, 0xcb, 0x20, 0x4f, 0xe5, 0xba, 0xb6, 0x06,
-	0x67, 0x53, 0x8b, 0xcd, 0x41, 0x9c, 0xb0, 0x50, 0x50, 0x3b, 0x7e, 0xa4, 0xad, 0x51, 0xb9, 0x26,
-	0xb3, 0x30, 0xe1, 0x47, 0x1e, 0x3e, 0x90, 0xd2, 0x12, 0x55, 0x0f, 0x62, 0x77, 0xd7, 0x09, 0x06,
-	0x28, 0xcb, 0xa3, 0x44, 0xd5, 0x43, 0xed, 0x2f, 0x0b, 0x8a, 0xa9, 0x49, 0x62, 0x43, 0xee, 0xd0,
-	0x31, 0x6b, 0x78, 0x30, 0x97, 0xeb, 0x2c, 0xb5, 0x0d, 0x9a, 0xf3, 0x3d, 0x72, 0x0d, 0x4a, 0xbe,
-	0xd7, 0xed, 0x73, 0xdc, 0xf6, 0xb5, 0xd9, 0xc6, 0xd4, 0xf0, 0x60, 0xae, 0xd8, 0x59, 0x5a, 0x93,
-	0x7b, 0x22, 0xec, 0xbe, 0xa7, 0xd6, 0x64, 0x16, 0xf2, 0x91, 0x13, 0xea, 0x83, 0x64, 0x65, 0x3b,
-	0x21, 0x92, 0xd7, 0x61, 0x52, 0xfc, 0xa6, 0x46, 0xf2, 0xfa, 0x25, 0x88, 0x4d, 0x2d, 0xbc, 0x0d,
-	0x96, 0x2b, 0xaf, 0xa5, 0x2b, 0xab, 0x96, 0x5d, 0x21, 0xc7, 0x03, 0x20, 0x03, 0xaf, 0x42, 0xd1,
-	0x81, 0x69, 0xb5, 0x4a, 0x8f, 0xb0, 0x5e, 0xc1, 0xc8, 0x94, 0x92, 0x6a, 0x47, 0xea, 0x2f, 0x64,
-	0xaa, 0x90, 0x91, 0x29, 0x51, 0x29, 0x47, 0xb9, 0x7a, 0x0b, 0x0a, 0xa2, 0x7b, 0x05, 0x5c, 0x94,
-	0x30, 0x0c, 0x0f, 0xe6, 0x2c, 0xd1, 0xd8, 0x92, 0xb4, 0xc4, 0xcb, 0x8e, 0x47, 0x6e, 0xea, 0x94,
-	0xaa, 0x72, 0xaa, 0x9e, 0xe4, 0x98, 0x28, 0x18, 0x11, 0x3a, 0xc1, 0x93, 0x25, 0x98, 0xf6, 0x30,
-	0xf6, 0x39, 0x7a, 0xdd, 0x38, 0x71, 0x12, 0xb4, 0xa1, 0x6a, 0x5e, 0x39, 0x9b, 0x5d, 0xcb, 0xa2,
-	0x57, 0xd7, 0x05, 0x24, 0x2e, 0xa5, 0x55, 0xf2, 0x99, 0x2c, 0x40, 0x9e, 0xb3, 0x00, 0xed, 0x49,
-	0x29, 0xbe, 0x3c, 0x6e, 0x14, 0x51, 0x16, 0xc8, 0x71, 0x24, 0x58, 0xd2, 0x01, 0x08, 0x31, 0xdc,
-	0x42, 0x1e, 0xdf, 0xf7, 0xfb, 0xf6, 0x94, 0x54, 0xbe, 0x33, 0x4e, 0xb9, 0xde, 0x47, 0xb7, 0xbe,
-	0x7c, 0x88, 0x8b, 0xe4, 0x1e, 0x89, 0xc9, 0x32, 0x5c, 0xe0, 0xb8, 0x8d, 0x1c, 0x23, 0x17, 0xbd,
-	0xae, 0x9e, 0x3e, 0x22, 0x62, 0xd3, 0x32, 0x62, 0x97, 0x86, 0x07, 0x73, 0xe7, 0xe9, 0x21, 0xa0,
-	0x07, 0x95, 0x0c, 0xdf, 0x79, 0xfe, 0xd2, 0xb6, 0x47, 0xbe, 0x80, 0xd9, 0x63, 0xe6, 0xd4, 0xb0,
-	0x10, 0xd6, 0xce, 0x4a, 0x6b, 0x17, 0x87, 0x07, 0x73, 0xe4, 0xc8, 0x9a, 0x9a, 0x2a, 0xd2, 0x18,
-	0xe1, 0xa3, 0xbb, 0xa3, 0xb6, 0x54, 0x1f, 0x0b, 0x5b, 0x33, 0x59, 0xb6, 0x54, 0xc3, 0x8f, 0xda,
-	0xd2, 0xbb, 0xa2, 0xf9, 0x54, 0x43, 0x9e, 0x4b, 0x8b, 0x5f, 0x3c, 0x35, 0xf2, 0x90, 0x6b, 0xec,
-	0xd7, 0xfe, 0xc8, 0xc1, 0xd4, 0x3d, 0xf1, 0x41, 0xa4, 0xf8, 0xd5, 0x00, 0xe3, 0x84, 0xb4, 0xa0,
-	0x80, 0x51, 0xc2, 0x7d, 0x8c, 0x6d, 0xb3, 0x7a, 0xe6, 0xca, 0xe4, 0xc2, 0xb5, 0xac, 0xd8, 0x1e,
-	0x97, 0xa8, 0x87, 0x56, 0x94, 0xf0, 0x7d, 0x9a, 0x6a, 0xc9, 0x6d, 0x98, 0xe4, 0x18, 0x0f, 0x42,
-	0xec, 0x6e, 0x73, 0x16, 0x9e, 0xf4, 0xe1, 0xb8, 0x8b, 0x5c, 0x8c, 0x36, 0x0a, 0x8a, 0xff, 0x9c,
-	0xb3, 0x90, 0x5c, 0x07, 0xe2, 0x47, 0x6e, 0x30, 0xf0, 0xb0, 0xcb, 0x02, 0xaf, 0xab, 0xbe, 0xa2,
-	0xb2, 0x79, 0x8b, 0x74, 0x46, 0xbf, 0x59, 0x0d, 0x3c, 0x35, 0xd4, 0xca, 0xdf, 0x9b, 0x00, 0x47,
-	0x3e, 0x64, 0xce, 0x9f, 0x4f, 0xc0, 0x72, 0xdc, 0x44, 0xcc, 0xdc, 0x9c, 0x2c, 0x98, 0x37, 0xc6,
-	0x5e, 0x6a, 0x51, 0x62, 0x77, 0xfc, 0xc8, 0xa3, 0x5a, 0x42, 0x6e, 0x42, 0x61, 0xdb, 0x0f, 0x12,
-	0xe4, 0xb1, 0x7d, 0x46, 0x86, 0xe4, 0xf2, 0x49, 0x6d, 0x42, 0x53, 0xb8, 0xf6, 0x5b, 0x1a, 0xdb,
-	0x65, 0x8c, 0x63, 0xa7, 0x87, 0xe4, 0x33, 0xb0, 0x70, 0x17, 0xa3, 0x24, 0x0d, 0xed, 0xdb, 0x63,
-	0xbd, 0xd0, 0x8a, 0x7a, 0x4b, 0xe0, 0x54, 0xab, 0xc8, 0x87, 0x50, 0xd8, 0x55, 0xd1, 0xfa, 0x2f,
-	0x01, 0x4d, 0xd9, 0xf2, 0x2f, 0x26, 0x4c, 0x48, 0x43, 0xc7, 0xc2, 0x60, 0xbe, 0x7a, 0x18, 0x16,
-	0xc0, 0xd2, 0x89, 0xc8, 0x8d, 0xff, 0xf6, 0xa8, 0x94, 0x50, 0x4d, 0x92, 0x8f, 0x00, 0x46, 0x12,
-	0x78, 0xb2, 0xae, 0xc4, 0xd2, 0xac, 0x5e, 0xfd, 0xc7, 0x84, 0x73, 0x23, 0xae, 0x90, 0x1b, 0x30,
-	0x7b, 0x6f, 0x71, 0xa3, 0xd9, 0xee, 0x2e, 0x36, 0x37, 0x3a, 0xab, 0x2b, 0xdd, 0xcd, 0x95, 0x3b,
-	0x2b, 0xab, 0xf7, 0x56, 0x66, 0x8c, 0x72, 0xf9, 0xe1, 0xa3, 0xea, 0xc5, 0x11, 0x7c, 0x33, 0xda,
-	0x89, 0xd8, 0x9e, 0x70, 0xfc, 0xfc, 0x0b, 0xaa, 0x26, 0x6d, 0x2d, 0x6e, 0xb4, 0x66, 0xcc, 0xf2,
-	0xff, 0x1e, 0x3e, 0xaa, 0x5e, 0x18, 0x11, 0x35, 0x39, 0xaa, 0xc9, 0xf4, 0xa2, 0x66, 0x73, 0x6d,
-	0x49, 0x68, 0x72, 0x99, 0x9a, 0xcd, 0xbe, 0x97, 0xa5, 0xa1, 0xad, 0xe5, 0xd5, 0xbb, 0xad, 0x99,
-	0x7c, 0xa6, 0x86, 0x62, 0xc8, 0x76, 0xb1, 0x7c, 0xe9, 0xdb, 0x1f, 0x2b, 0xc6, 0xaf, 0x3f, 0x55,
-	0x46, 0xaf, 0xba, 0x10, 0xc2, 0x84, 0xdc, 0x22, 0x5e, 0xba, 0xa8, 0x9e, 0xd6, 0x88, 0xe5, 0xea,
-	0x69, 0xf5, 0x54, 0xbb, 0xf0, 0xfb, 0xcf, 0x7f, 0xff, 0x90, 0x3b, 0x07, 0xd3, 0x92, 0x78, 0x37,
-	0x74, 0x22, 0xa7, 0x87, 0xfc, 0x3d, 0xb3, 0xf1, 0xe6, 0xe3, 0x67, 0x15, 0xe3, 0xe9, 0xb3, 0x8a,
-	0xf1, 0xf5, 0xb0, 0x62, 0x3e, 0x1e, 0x56, 0xcc, 0x27, 0xc3, 0x8a, 0xf9, 0xe7, 0xb0, 0x62, 0x7e,
-	0xf7, 0xbc, 0x62, 0x3c, 0x79, 0x5e, 0x31, 0x9e, 0x3e, 0xaf, 0x18, 0x5b, 0x96, 0xfc, 0x33, 0xf9,
-	0xc1, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x96, 0x4e, 0x58, 0x61, 0x63, 0x0b, 0x00, 0x00,
+	// 1210 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xcd, 0x73, 0xdb, 0xc4,
+	0x1b, 0xc7, 0x25, 0xd7, 0x51, 0xec, 0x27, 0x49, 0x9b, 0xd9, 0xa6, 0xad, 0x7e, 0xfe, 0x15, 0xc7,
+	0x98, 0xb7, 0x4e, 0x5b, 0x1c, 0x08, 0xa5, 0x1d, 0xa0, 0x30, 0x13, 0x3b, 0x66, 0x6c, 0x3a, 0x79,
+	0x99, 0x4d, 0xd2, 0x1e, 0x3d, 0x8a, 0xf4, 0xc4, 0x15, 0x91, 0xb4, 0x66, 0x25, 0x3b, 0xcd, 0x8d,
+	0x23, 0xd3, 0x13, 0x17, 0x66, 0xb8, 0xf4, 0x04, 0x67, 0x2e, 0xdc, 0xca, 0x3f, 0xd0, 0xe1, 0xd4,
+	0x63, 0xb9, 0x64, 0xa8, 0x7b, 0xe3, 0xc0, 0x5f, 0xc0, 0x81, 0xd9, 0x17, 0x25, 0xa9, 0x2b, 0x27,
+	0xf4, 0xe4, 0xd5, 0xea, 0xf3, 0x7d, 0xf6, 0xd9, 0xe7, 0x4d, 0x86, 0xab, 0x5d, 0x3f, 0xb9, 0xdf,
+	0xdf, 0xae, 0xb9, 0x2c, 0x5c, 0xf0, 0x98, 0xbb, 0x8b, 0x7c, 0x21, 0xde, 0x73, 0x78, 0xb8, 0xeb,
+	0x27, 0x0b, 0x4e, 0xcf, 0x5f, 0xd8, 0x73, 0x12, 0xf7, 0x7e, 0xad, 0xc7, 0x59, 0xc2, 0x08, 0x51,
+	0x40, 0x2d, 0x05, 0x6a, 0x83, 0x0f, 0x4b, 0xa7, 0xe9, 0xe3, 0x1e, 0xba, 0xb1, 0xd2, 0x97, 0xae,
+	0x9f, 0xc2, 0xb2, 0xed, 0xaf, 0xd1, 0x4d, 0x52, 0xfa, 0x34, 0xcb, 0xc9, 0x7e, 0x0f, 0x53, 0x76,
+	0xae, 0xcb, 0xba, 0x4c, 0x2e, 0x17, 0xc4, 0x4a, 0xef, 0xde, 0x3a, 0xc1, 0x82, 0x24, 0xb6, 0xfb,
+	0x3b, 0x0b, 0xbd, 0xa0, 0xdf, 0xf5, 0x23, 0xfd, 0xa3, 0x84, 0xd5, 0xc7, 0x79, 0xb0, 0xd6, 0xa4,
+	0x33, 0xa4, 0x06, 0xf9, 0x88, 0x79, 0x68, 0x9b, 0x15, 0xf3, 0xca, 0xd4, 0xa2, 0x5d, 0x7b, 0x35,
+	0x04, 0xb5, 0x55, 0xe6, 0x61, 0xcb, 0xa0, 0x92, 0x23, 0xb7, 0x60, 0x32, 0x46, 0x3e, 0xf0, 0x5d,
+	0xb4, 0x73, 0x52, 0xf2, 0xff, 0x2c, 0xc9, 0x86, 0x42, 0x5a, 0x06, 0x4d, 0x69, 0x21, 0x8c, 0x30,
+	0xd9, 0x63, 0x7c, 0xd7, 0x3e, 0x33, 0x5e, 0xb8, 0xaa, 0x10, 0x21, 0xd4, 0xb4, 0xf0, 0x30, 0x71,
+	0xe2, 0x5d, 0x3b, 0x3f, 0xde, 0xc3, 0x4d, 0x27, 0x16, 0x12, 0xc9, 0x89, 0x83, 0xdc, 0xa0, 0x1f,
+	0x27, 0xc8, 0xed, 0x89, 0xf1, 0x07, 0x35, 0x14, 0x22, 0x0e, 0xd2, 0x34, 0xb9, 0x01, 0x56, 0x8c,
+	0x2e, 0xc7, 0xc4, 0xb6, 0xa4, 0xae, 0x94, 0x7d, 0x33, 0x41, 0xb4, 0x0c, 0xaa, 0x59, 0xf2, 0x29,
+	0x14, 0x38, 0xc6, 0xac, 0xcf, 0x5d, 0xb4, 0x27, 0xa5, 0xee, 0x72, 0x96, 0x8e, 0x6a, 0xa6, 0x65,
+	0xd0, 0x43, 0x9e, 0x7c, 0x0e, 0x45, 0x7c, 0x90, 0x60, 0x14, 0xfb, 0x2c, 0xb2, 0x0b, 0x52, 0xfc,
+	0x46, 0x96, 0xb8, 0x99, 0x42, 0x2d, 0x83, 0x1e, 0x29, 0x84, 0xc3, 0x2e, 0x8b, 0x76, 0xfc, 0xae,
+	0x5d, 0x1c, 0xef, 0x70, 0x43, 0x12, 0xc2, 0x61, 0xc5, 0x0a, 0xd5, 0x80, 0x05, 0xfd, 0x10, 0x6d,
+	0x18, 0xaf, 0xba, 0x2b, 0x09, 0xa1, 0x52, 0x6c, 0xbd, 0x90, 0x56, 0x4c, 0x75, 0x1d, 0xa6, 0x37,
+	0x30, 0x40, 0x37, 0xa9, 0xef, 0x6f, 0x04, 0x2c, 0x21, 0xd7, 0x01, 0x74, 0x8e, 0x3b, 0xbe, 0x27,
+	0xeb, 0xa8, 0x58, 0x9f, 0x19, 0x1e, 0xcc, 0x17, 0x75, 0x11, 0xb4, 0x97, 0x69, 0x51, 0x03, 0x6d,
+	0x8f, 0x10, 0xc8, 0xc7, 0x01, 0x4b, 0x64, 0xf1, 0xe4, 0xa9, 0x5c, 0x57, 0xd7, 0xe1, 0x6c, 0x6a,
+	0xb1, 0xd1, 0x8f, 0x13, 0x16, 0x0a, 0x6a, 0xd7, 0x8f, 0xb4, 0x35, 0x2a, 0xd7, 0x64, 0x0e, 0x26,
+	0xfc, 0xc8, 0xc3, 0x07, 0x52, 0x5a, 0xa4, 0xea, 0x41, 0xec, 0x0e, 0x9c, 0xa0, 0x8f, 0xb2, 0xa8,
+	0x8a, 0x54, 0x3d, 0x54, 0xff, 0xb2, 0xa0, 0x90, 0x9a, 0x24, 0x36, 0xe4, 0x0e, 0x1d, 0xb3, 0x86,
+	0x07, 0xf3, 0xb9, 0xf6, 0x72, 0xcb, 0xa0, 0x39, 0xdf, 0x23, 0xd7, 0xa0, 0xe8, 0x7b, 0x9d, 0x1e,
+	0xc7, 0x1d, 0x5f, 0x9b, 0xad, 0x4f, 0x0f, 0x0f, 0xe6, 0x0b, 0xed, 0xe5, 0x75, 0xb9, 0x27, 0x92,
+	0xe5, 0x7b, 0x6a, 0x4d, 0xe6, 0x20, 0x1f, 0x39, 0xa1, 0x3e, 0x48, 0xf6, 0x83, 0x13, 0x22, 0x79,
+	0x13, 0xa6, 0xc4, 0x6f, 0x6a, 0x24, 0xaf, 0x5f, 0x82, 0xd8, 0xd4, 0xc2, 0xdb, 0x60, 0xb9, 0xf2,
+	0x5a, 0xba, 0x1e, 0xab, 0xd9, 0x75, 0x75, 0x3c, 0x00, 0x32, 0x5d, 0x2a, 0x14, 0x6d, 0x98, 0x51,
+	0xab, 0xf4, 0x08, 0xeb, 0x35, 0x8c, 0x4c, 0x2b, 0xa9, 0x76, 0xa4, 0xf6, 0x52, 0xa6, 0x26, 0x33,
+	0x32, 0x25, 0xea, 0xeb, 0x28, 0x57, 0xef, 0xc0, 0xa4, 0xe8, 0x79, 0x01, 0x17, 0x24, 0x0c, 0xc3,
+	0x83, 0x79, 0x4b, 0x8c, 0x03, 0x49, 0x5a, 0xe2, 0x65, 0xdb, 0x23, 0x37, 0x75, 0x4a, 0x55, 0x11,
+	0x56, 0x4e, 0x72, 0x4c, 0x14, 0x8c, 0x08, 0x9d, 0xe0, 0xc9, 0x32, 0xcc, 0x78, 0x18, 0xfb, 0x1c,
+	0xbd, 0x4e, 0x9c, 0x38, 0x89, 0xaa, 0xc7, 0xb3, 0xd9, 0x1d, 0x20, 0x3a, 0x7c, 0x43, 0x40, 0xe2,
+	0x52, 0x5a, 0x25, 0x9f, 0xc9, 0x22, 0xe4, 0x39, 0x0b, 0xd0, 0x9e, 0x92, 0xe2, 0xcb, 0xe3, 0x06,
+	0x18, 0x65, 0x81, 0x1c, 0x62, 0x82, 0x25, 0x6d, 0x80, 0x10, 0xc3, 0x6d, 0xe4, 0xf1, 0x7d, 0xbf,
+	0x67, 0x4f, 0x4b, 0xe5, 0x7b, 0xe3, 0x94, 0x1b, 0x3d, 0x74, 0x6b, 0x2b, 0x87, 0xb8, 0x48, 0xee,
+	0x91, 0x98, 0xac, 0xc0, 0x05, 0x8e, 0x3b, 0xc8, 0x31, 0x72, 0xd1, 0xeb, 0xe8, 0x99, 0x25, 0x22,
+	0x36, 0x23, 0x23, 0x76, 0x69, 0x78, 0x30, 0x7f, 0x9e, 0x1e, 0x02, 0x7a, 0xbc, 0xc9, 0xf0, 0x9d,
+	0xe7, 0xaf, 0x6c, 0x7b, 0xe4, 0x2b, 0x98, 0x3b, 0x66, 0x4e, 0x8d, 0x18, 0x61, 0xed, 0xac, 0xb4,
+	0x76, 0x71, 0x78, 0x30, 0x4f, 0x8e, 0xac, 0xa9, 0x59, 0x24, 0x8d, 0x11, 0x3e, 0xba, 0x3b, 0x6a,
+	0x4b, 0x75, 0xbf, 0xb0, 0x35, 0x9b, 0x65, 0x4b, 0x8d, 0x89, 0x51, 0x5b, 0x7a, 0x57, 0x34, 0x9f,
+	0x6a, 0xc8, 0x73, 0x69, 0xf1, 0x8b, 0xa7, 0x7a, 0x1e, 0x72, 0xf5, 0xfd, 0xea, 0x1f, 0x39, 0x98,
+	0xbe, 0x27, 0x3e, 0xa3, 0x14, 0xbf, 0xe9, 0x63, 0x9c, 0x90, 0x26, 0x4c, 0x62, 0x94, 0x70, 0x1f,
+	0x63, 0xdb, 0xac, 0x9c, 0xb9, 0x32, 0xb5, 0x78, 0x2d, 0x2b, 0xb6, 0xc7, 0x25, 0xea, 0xa1, 0x19,
+	0x25, 0x7c, 0x9f, 0xa6, 0x5a, 0x72, 0x1b, 0xa6, 0x38, 0xc6, 0xfd, 0x10, 0x3b, 0x3b, 0x9c, 0x85,
+	0x27, 0x7d, 0x6e, 0xee, 0x22, 0x17, 0x03, 0x91, 0x82, 0xe2, 0xbf, 0xe4, 0x2c, 0x24, 0xd7, 0x81,
+	0xf8, 0x91, 0x1b, 0xf4, 0x3d, 0xec, 0xb0, 0xc0, 0xeb, 0xa8, 0x6f, 0xaf, 0x6c, 0xde, 0x02, 0x9d,
+	0xd5, 0x6f, 0xd6, 0x02, 0x4f, 0x0d, 0xb5, 0xd2, 0x0f, 0x26, 0xc0, 0x91, 0x0f, 0x99, 0xf3, 0xe7,
+	0x33, 0xb0, 0x1c, 0x37, 0x11, 0x93, 0x3a, 0x27, 0x0b, 0xe6, 0xad, 0xb1, 0x97, 0x5a, 0x92, 0xd8,
+	0x1d, 0x3f, 0xf2, 0xa8, 0x96, 0x90, 0x9b, 0x30, 0xb9, 0xe3, 0x07, 0x09, 0xf2, 0xd8, 0x3e, 0x23,
+	0x43, 0x72, 0xf9, 0xa4, 0x36, 0xa1, 0x29, 0x5c, 0xfd, 0x2d, 0x8d, 0xed, 0x0a, 0xc6, 0xb1, 0xd3,
+	0x45, 0xf2, 0x05, 0x58, 0x38, 0xc0, 0x28, 0x49, 0x43, 0xfb, 0xee, 0x58, 0x2f, 0xb4, 0xa2, 0xd6,
+	0x14, 0x38, 0xd5, 0x2a, 0xf2, 0x31, 0x4c, 0x0e, 0x54, 0xb4, 0xfe, 0x4b, 0x40, 0x53, 0xb6, 0xf4,
+	0xab, 0x09, 0x13, 0xd2, 0xd0, 0xb1, 0x30, 0x98, 0xaf, 0x1f, 0x86, 0x45, 0xb0, 0x74, 0x22, 0x72,
+	0xe3, 0xbf, 0x3d, 0x2a, 0x25, 0x54, 0x93, 0xe4, 0x13, 0x80, 0x91, 0x04, 0x9e, 0xac, 0x2b, 0xb2,
+	0x34, 0xab, 0x57, 0xff, 0x31, 0xe1, 0xdc, 0x88, 0x2b, 0xe4, 0x06, 0xcc, 0xdd, 0x5b, 0xda, 0x6c,
+	0xb4, 0x3a, 0x4b, 0x8d, 0xcd, 0xf6, 0xda, 0x6a, 0x67, 0x6b, 0xf5, 0xce, 0xea, 0xda, 0xbd, 0xd5,
+	0x59, 0xa3, 0x54, 0x7a, 0xf8, 0xa8, 0x72, 0x71, 0x04, 0xdf, 0x8a, 0x76, 0x23, 0xb6, 0x27, 0x1c,
+	0x3f, 0xff, 0x92, 0xaa, 0x41, 0x9b, 0x4b, 0x9b, 0xcd, 0x59, 0xb3, 0xf4, 0xbf, 0x87, 0x8f, 0x2a,
+	0x17, 0x46, 0x44, 0x0d, 0x8e, 0x6a, 0x32, 0xbd, 0xac, 0xd9, 0x5a, 0x5f, 0x16, 0x9a, 0x5c, 0xa6,
+	0x66, 0xab, 0xe7, 0x65, 0x69, 0x68, 0x73, 0x65, 0xed, 0x6e, 0x73, 0x36, 0x9f, 0xa9, 0xa1, 0x18,
+	0xb2, 0x01, 0x96, 0x2e, 0x7d, 0xf7, 0x53, 0xd9, 0x78, 0xfc, 0x73, 0x79, 0xf4, 0xaa, 0x8b, 0x21,
+	0x4c, 0xc8, 0x2d, 0xe2, 0xa5, 0x8b, 0xca, 0x69, 0x8d, 0x58, 0xaa, 0x9c, 0x56, 0x4f, 0xd5, 0x0b,
+	0xbf, 0xff, 0xf2, 0xf7, 0x8f, 0xb9, 0x73, 0x30, 0x23, 0x89, 0xf7, 0x43, 0x27, 0x72, 0xba, 0xc8,
+	0x3f, 0x30, 0xeb, 0x6f, 0x3f, 0x79, 0x5e, 0x36, 0x9e, 0x3d, 0x2f, 0x1b, 0xdf, 0x0e, 0xcb, 0xe6,
+	0x93, 0x61, 0xd9, 0x7c, 0x3a, 0x2c, 0x9b, 0x7f, 0x0e, 0xcb, 0xe6, 0xf7, 0x2f, 0xca, 0xc6, 0xd3,
+	0x17, 0x65, 0xe3, 0xd9, 0x8b, 0xb2, 0xb1, 0x6d, 0xc9, 0xbf, 0xa0, 0x1f, 0xfd, 0x1b, 0x00, 0x00,
+	0xff, 0xff, 0x36, 0x4b, 0xa7, 0x78, 0x99, 0x0b, 0x00, 0x00,
 }
 
 type authenticatedWrapperWatchServer struct {
@@ -956,6 +970,12 @@ func (m *Object) CopyFrom(src interface{}) {
 			}
 			github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig())
 			m.Object = &v
+		case *Object_Volume:
+			v := Object_Volume{
+				Volume: &Volume{},
+			}
+			github_com_docker_swarmkit_api_deepcopy.Copy(v.Volume, o.GetVolume())
+			m.Object = &v
 		}
 	}
 
@@ -1532,6 +1552,27 @@ func (m *Object_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	}
 	return len(dAtA) - i, nil
 }
+func (m *Object_Volume) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Object_Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.Volume != nil {
+		{
+			size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintWatch(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	return len(dAtA) - i, nil
+}
 func (m *SelectBySlot) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -2348,6 +2389,18 @@ func (m *Object_Config) Size() (n int) {
 	}
 	return n
 }
+func (m *Object_Volume) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Volume != nil {
+		l = m.Volume.Size()
+		n += 1 + l + sovWatch(uint64(l))
+	}
+	return n
+}
 func (m *SelectBySlot) Size() (n int) {
 	if m == nil {
 		return 0
@@ -2749,6 +2802,16 @@ func (this *Object_Config) String() string {
 	}, "")
 	return s
 }
+func (this *Object_Volume) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Object_Volume{`,
+		`Volume:` + strings.Replace(fmt.Sprintf("%v", this.Volume), "Volume", "Volume", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func (this *SelectBySlot) String() string {
 	if this == nil {
 		return "nil"
@@ -3356,6 +3419,41 @@ func (m *Object) Unmarshal(dAtA []byte) error {
 			}
 			m.Object = &Object_Config{v}
 			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowWatch
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthWatch
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthWatch
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &Volume{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Object = &Object_Volume{v}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipWatch(dAtA[iNdEx:])

+ 1 - 0
vendor/github.com/docker/swarmkit/api/watch.proto

@@ -19,6 +19,7 @@ message Object {
 		Resource resource = 7;
 		Extension extension = 8;
 		Config config = 9;
+		Volume volume = 10;
 	}
 }
 

+ 4 - 5
vendor/github.com/docker/swarmkit/ca/certificates.go

@@ -14,7 +14,6 @@ import (
 	"encoding/pem"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"time"
@@ -687,7 +686,7 @@ func ensureCertKeyMatch(cert *x509.Certificate, key crypto.PublicKey) error {
 // CA certificate, and returns the PEM-encoded Certificate if so
 func GetLocalRootCA(paths CertPaths) (RootCA, error) {
 	// Check if we have a Certificate file
-	cert, err := ioutil.ReadFile(paths.Cert)
+	cert, err := os.ReadFile(paths.Cert)
 	if err != nil {
 		if os.IsNotExist(err) {
 			err = ErrNoLocalRootCA
@@ -697,7 +696,7 @@ func GetLocalRootCA(paths CertPaths) (RootCA, error) {
 	}
 	signingCert := cert
 
-	key, err := ioutil.ReadFile(paths.Key)
+	key, err := os.ReadFile(paths.Key)
 	if err != nil {
 		if !os.IsNotExist(err) {
 			return RootCA{}, err
@@ -910,13 +909,13 @@ func readCertValidity(kr KeyReader) (time.Time, time.Time, error) {
 // SaveRootCA saves a RootCA object to disk
 func SaveRootCA(rootCA RootCA, paths CertPaths) error {
 	// Make sure the necessary dirs exist and they are writable
-	err := os.MkdirAll(filepath.Dir(paths.Cert), 0755)
+	err := os.MkdirAll(filepath.Dir(paths.Cert), 0o755)
 	if err != nil {
 		return err
 	}
 
 	// If the root certificate got returned successfully, save the rootCA to disk.
-	return ioutils.AtomicWriteFile(paths.Cert, rootCA.Certs, 0644)
+	return ioutils.AtomicWriteFile(paths.Cert, rootCA.Certs, 0o644)
 }
 
 // GenerateNewCSR returns a newly generated key and CSR signed with said key

+ 1 - 2
vendor/github.com/docker/swarmkit/ca/external.go

@@ -10,7 +10,6 @@ import (
 	"encoding/json"
 	"encoding/pem"
 	"io"
-	"io/ioutil"
 	"net/http"
 	"sync"
 	"time"
@@ -193,7 +192,7 @@ func makeExternalSignRequest(ctx context.Context, client *http.Client, url strin
 	defer resp.Body.Close()
 
 	b := io.LimitReader(resp.Body, CertificateMaxSize)
-	body, err := ioutil.ReadAll(b)
+	body, err := io.ReadAll(b)
 	if err != nil {
 		return nil, recoverableErr{err: errors.Wrap(err, "unable to read CSR response body")}
 	}

+ 8 - 9
vendor/github.com/docker/swarmkit/ca/keyreadwriter.go

@@ -3,7 +3,6 @@ package ca
 import (
 	"crypto/x509"
 	"encoding/pem"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"strconv"
@@ -20,9 +19,9 @@ import (
 
 const (
 	// keyPerms are the permissions used to write the TLS keys
-	keyPerms = 0600
+	keyPerms = 0o600
 	// certPerms are the permissions used to write TLS certificates
-	certPerms = 0644
+	certPerms = 0o644
 	// versionHeader is the TLS PEM key header that contains the KEK version
 	versionHeader = "kek-version"
 )
@@ -157,14 +156,14 @@ func (k *KeyReadWriter) SetKeyFormatter(kf keyutils.Formatter) {
 // location than two possible key locations.
 func (k *KeyReadWriter) Migrate() error {
 	tmpPaths := k.genTempPaths()
-	keyBytes, err := ioutil.ReadFile(tmpPaths.Key)
+	keyBytes, err := os.ReadFile(tmpPaths.Key)
 	if err != nil {
 		return nil // no key?  no migration
 	}
 
 	// it does exist - no need to decrypt, because previous versions of swarmkit
 	// which supported this temporary key did not support encrypting TLS keys
-	cert, err := ioutil.ReadFile(k.paths.Cert)
+	cert, err := os.ReadFile(k.paths.Cert)
 	if err != nil {
 		return os.RemoveAll(tmpPaths.Key) // no cert?  no migration
 	}
@@ -202,7 +201,7 @@ func (k *KeyReadWriter) Read() ([]byte, []byte, error) {
 	}
 
 	keyBytes := pem.EncodeToMemory(keyBlock)
-	cert, err := ioutil.ReadFile(k.paths.Cert)
+	cert, err := os.ReadFile(k.paths.Cert)
 	// The cert is written to a temporary file first, then the key, and then
 	// the cert gets renamed - so, if interrupted, it's possible to end up with
 	// a cert that only exists in the temporary location.
@@ -219,7 +218,7 @@ func (k *KeyReadWriter) Read() ([]byte, []byte, error) {
 	if err != nil {
 		var tempErr error
 		tmpPaths := k.genTempPaths()
-		cert, tempErr = ioutil.ReadFile(tmpPaths.Cert)
+		cert, tempErr = os.ReadFile(tmpPaths.Cert)
 		if tempErr != nil {
 			return nil, nil, err // return the original error
 		}
@@ -308,7 +307,7 @@ func (k *KeyReadWriter) Write(certBytes, plaintextKeyBytes []byte, kekData *KEKD
 	defer k.mu.Unlock()
 
 	// current assumption is that the cert and key will be in the same directory
-	if err := os.MkdirAll(filepath.Dir(k.paths.Key), 0755); err != nil {
+	if err := os.MkdirAll(filepath.Dir(k.paths.Key), 0o755); err != nil {
 		return err
 	}
 
@@ -353,7 +352,7 @@ func (k *KeyReadWriter) Target() string {
 }
 
 func (k *KeyReadWriter) readKeyblock() (*pem.Block, error) {
-	key, err := ioutil.ReadFile(k.paths.Key)
+	key, err := os.ReadFile(k.paths.Key)
 	if err != nil {
 		return nil, err
 	}

+ 1 - 2
vendor/github.com/docker/swarmkit/ioutils/ioutils.go

@@ -2,7 +2,6 @@ package ioutils
 
 import (
 	"io"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 )
@@ -11,7 +10,7 @@ import (
 
 // AtomicWriteFile atomically writes data to a file specified by filename.
 func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
-	f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+	f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
 	if err != nil {
 		return err
 	}

+ 1 - 0
vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/drivers_unsupported.go

@@ -1,3 +1,4 @@
+//go:build !linux && !darwin && !windows
 // +build !linux,!darwin,!windows
 
 package cnmallocator

+ 1 - 1
vendor/github.com/docker/swarmkit/manager/controlapi/config.go

@@ -15,7 +15,7 @@ import (
 )
 
 // MaxConfigSize is the maximum byte length of the `Config.Spec.Data` field.
-const MaxConfigSize = 500 * 1024 // 500KB
+const MaxConfigSize = 1000 * 1024 // 1000KB
 
 // assumes spec is not nil
 func configFromConfigSpec(spec *api.ConfigSpec) *api.Config {

+ 256 - 0
vendor/github.com/docker/swarmkit/manager/controlapi/volume.go

@@ -0,0 +1,256 @@
+package controlapi
+
+import (
+	"context"
+	"strings"
+
+	"github.com/docker/swarmkit/api"
+	"github.com/docker/swarmkit/identity"
+	"github.com/docker/swarmkit/manager/state/store"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+func (s *Server) CreateVolume(ctx context.Context, request *api.CreateVolumeRequest) (*api.CreateVolumeResponse, error) {
+	if request.Spec == nil {
+		return nil, status.Errorf(codes.InvalidArgument, "spec must not be nil")
+	}
+
+	// validate the volume spec
+	if request.Spec.Driver == nil {
+		return nil, status.Errorf(codes.InvalidArgument, "driver must be specified")
+	}
+
+	if request.Spec.Annotations.Name == "" {
+		return nil, status.Errorf(codes.InvalidArgument, "meta: name must be provided")
+	}
+
+	if request.Spec.AccessMode == nil {
+		return nil, status.Errorf(codes.InvalidArgument, "AccessMode must not be nil")
+	}
+
+	if request.Spec.AccessMode.GetAccessType() == nil {
+		return nil, status.Errorf(codes.InvalidArgument, "Volume AccessMode must specify either Mount or Block access type")
+	}
+
+	volume := &api.Volume{
+		ID:   identity.NewID(),
+		Spec: *request.Spec,
+	}
+	err := s.store.Update(func(tx store.Tx) error {
+		// check all secrets, so that we can return an error indicating ALL
+		// missing secrets, instead of just the first one.
+		var missingSecrets []string
+		for _, secret := range volume.Spec.Secrets {
+			s := store.GetSecret(tx, secret.Secret)
+			if s == nil {
+				missingSecrets = append(missingSecrets, secret.Secret)
+			}
+		}
+
+		if len(missingSecrets) > 0 {
+			secretStr := "secrets"
+			if len(missingSecrets) == 1 {
+				secretStr = "secret"
+			}
+
+			return status.Errorf(codes.InvalidArgument, "%s not found: %v", secretStr, strings.Join(missingSecrets, ", "))
+
+		}
+
+		return store.CreateVolume(tx, volume)
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return &api.CreateVolumeResponse{
+		Volume: volume,
+	}, nil
+}
+
+func (s *Server) UpdateVolume(ctx context.Context, request *api.UpdateVolumeRequest) (*api.UpdateVolumeResponse, error) {
+	if request.VolumeID == "" {
+		return nil, status.Errorf(codes.InvalidArgument, "VolumeID must not be empty")
+	}
+	if request.Spec == nil {
+		return nil, status.Errorf(codes.InvalidArgument, "Spec must not be empty")
+	}
+	if request.VolumeVersion == nil {
+		return nil, status.Errorf(codes.InvalidArgument, "VolumeVersion must not be empty")
+	}
+
+	var volume *api.Volume
+	if err := s.store.Update(func(tx store.Tx) error {
+		volume = store.GetVolume(tx, request.VolumeID)
+		if volume == nil {
+			return status.Errorf(codes.NotFound, "volume %v not found", request.VolumeID)
+		}
+
+		// compare specs, to see if any invalid fields have changed
+		if request.Spec.Annotations.Name != volume.Spec.Annotations.Name {
+			return status.Errorf(codes.InvalidArgument, "Name cannot be updated")
+		}
+		if request.Spec.Group != volume.Spec.Group {
+			return status.Errorf(codes.InvalidArgument, "Group cannot be updated")
+		}
+		if request.Spec.AccessibilityRequirements != volume.Spec.AccessibilityRequirements {
+			return status.Errorf(codes.InvalidArgument, "AccessibilityRequirements cannot be updated")
+		}
+		if request.Spec.Driver == nil || request.Spec.Driver.Name != volume.Spec.Driver.Name {
+			return status.Errorf(codes.InvalidArgument, "Driver cannot be updated")
+		}
+		if request.Spec.AccessMode.Scope != volume.Spec.AccessMode.Scope || request.Spec.AccessMode.Sharing != volume.Spec.AccessMode.Sharing {
+			return status.Errorf(codes.InvalidArgument, "AccessMode cannot be updated")
+		}
+
+		volume.Spec = *request.Spec
+		volume.Meta.Version = *request.VolumeVersion
+		if err := store.UpdateVolume(tx, volume); err != nil {
+			return err
+		}
+		// read the volume back out, so it has the correct meta version
+		// TODO(dperny): this behavior, while likely more correct, may not be
+		// consistent with the rest of swarmkit...
+		volume = store.GetVolume(tx, request.VolumeID)
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+	return &api.UpdateVolumeResponse{
+		Volume: volume,
+	}, nil
+}
+
+func (s *Server) ListVolumes(ctx context.Context, request *api.ListVolumesRequest) (*api.ListVolumesResponse, error) {
+	var (
+		volumes []*api.Volume
+		err     error
+	)
+
+	// so the way we do this is with two filtering passes. first, we do a store
+	// request, filtering on one of the parameters. then, from the result of
+	// the store request, we filter on the remaining filters. This is necessary
+	// because the store filters do not expose an AND function.
+	s.store.View(func(tx store.ReadTx) {
+		var by store.By = store.All
+		switch {
+		case request.Filters == nil:
+			// short circuit to avoid nil pointer deref
+		case len(request.Filters.Names) > 0:
+			by = buildFilters(store.ByName, request.Filters.Names)
+		case len(request.Filters.IDPrefixes) > 0:
+			by = buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes)
+		case len(request.Filters.Groups) > 0:
+			by = buildFilters(store.ByVolumeGroup, request.Filters.Groups)
+		case len(request.Filters.Drivers) > 0:
+			by = buildFilters(store.ByDriver, request.Filters.Drivers)
+		case len(request.Filters.NamePrefixes) > 0:
+			by = buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes)
+		}
+		volumes, err = store.FindVolumes(tx, by)
+	})
+	if err != nil {
+		return nil, err
+	}
+	if request.Filters == nil {
+		return &api.ListVolumesResponse{Volumes: volumes}, nil
+	}
+
+	volumes = filterVolumes(volumes,
+		// Names
+		func(v *api.Volume) bool {
+			return filterContains(v.Spec.Annotations.Name, request.Filters.Names)
+		},
+		// NamePrefixes
+		func(v *api.Volume) bool {
+			return filterContainsPrefix(v.Spec.Annotations.Name, request.Filters.NamePrefixes)
+		},
+		// IDPrefixes
+		func(v *api.Volume) bool {
+			return filterContainsPrefix(v.ID, request.Filters.IDPrefixes)
+		},
+		// Labels
+		func(v *api.Volume) bool {
+			return filterMatchLabels(v.Spec.Annotations.Labels, request.Filters.Labels)
+		},
+		// Groups
+		func(v *api.Volume) bool {
+			return filterContains(v.Spec.Group, request.Filters.Groups)
+		},
+		// Drivers
+		func(v *api.Volume) bool {
+			return v.Spec.Driver != nil && filterContains(v.Spec.Driver.Name, request.Filters.Drivers)
+		},
+	)
+
+	return &api.ListVolumesResponse{
+		Volumes: volumes,
+	}, nil
+}
+
+func filterVolumes(candidates []*api.Volume, filters ...func(*api.Volume) bool) []*api.Volume {
+	result := []*api.Volume{}
+	for _, c := range candidates {
+		match := true
+		for _, f := range filters {
+			if !f(c) {
+				match = false
+				break
+			}
+		}
+
+		if match {
+			result = append(result, c)
+		}
+	}
+	return result
+}
+
+func (s *Server) GetVolume(ctx context.Context, request *api.GetVolumeRequest) (*api.GetVolumeResponse, error) {
+	var volume *api.Volume
+	s.store.View(func(tx store.ReadTx) {
+		volume = store.GetVolume(tx, request.VolumeID)
+	})
+	if volume == nil {
+		return nil, status.Errorf(codes.NotFound, "volume %v not found", request.VolumeID)
+	}
+	return &api.GetVolumeResponse{
+		Volume: volume,
+	}, nil
+}
+
+// RemoveVolume marks a Volume for removal. For a Volume to be removed, it must
+// have Availability set to Drain. RemoveVolume does not immediately delete the
+// volume, because some clean-up must occur before it can be removed. However,
+// calling RemoveVolume is an irrevocable action, and once it occurs, the
+// Volume can no longer be used in any way.
+func (s *Server) RemoveVolume(ctx context.Context, request *api.RemoveVolumeRequest) (*api.RemoveVolumeResponse, error) {
+	err := s.store.Update(func(tx store.Tx) error {
+		volume := store.GetVolume(tx, request.VolumeID)
+		if volume == nil {
+			return status.Errorf(codes.NotFound, "volume %s not found", request.VolumeID)
+		}
+
+		// If this is a force delete, we force the delete. No survivors. This
+		// is a last resort to resolve otherwise intractable problems with
+		// volumes. Using this has the potential to break other things in the
+		// cluster, because testing every case where we force-remove a volume
+		// is difficult at best.
+		if request.Force {
+			return store.DeleteVolume(tx, request.VolumeID)
+		}
+
+		if len(volume.PublishStatus) != 0 {
+			return status.Error(codes.FailedPrecondition, "volume is still in use")
+		}
+
+		volume.PendingDelete = true
+		return store.UpdateVolume(tx, volume)
+	})
+
+	if err != nil {
+		return nil, err
+	}
+	return &api.RemoveVolumeResponse{}, nil
+}

+ 138 - 0
vendor/github.com/docker/swarmkit/manager/csi/convert.go

@@ -0,0 +1,138 @@
+package csi
+
+import (
+	"github.com/container-storage-interface/spec/lib/go/csi"
+	"github.com/docker/swarmkit/api"
+)
+
+// convert.go contains functions for converting swarm objects into CSI requests
+// and back again.
+
+// makeTopology converts a swarmkit topology into a CSI topology.
+func makeTopologyRequirement(t *api.TopologyRequirement) *csi.TopologyRequirement {
+	if t == nil {
+		return nil
+	}
+	return &csi.TopologyRequirement{
+		Requisite: makeTopologies(t.Requisite),
+		Preferred: makeTopologies(t.Preferred),
+	}
+}
+
+// makeTopologies converts a slice of swarmkit topologies into a slice of CSI
+// topologies.
+func makeTopologies(ts []*api.Topology) []*csi.Topology {
+	if ts == nil {
+		return nil
+	}
+	csiTops := make([]*csi.Topology, len(ts))
+	for i, t := range ts {
+		csiTops[i] = makeTopology(t)
+	}
+
+	return csiTops
+}
+
+// makeTopology converts a swarmkit topology into a CSI topology. These types
+// are essentially homologous, with the swarm type being copied verbatim from
+// the CSI type (for build reasons).
+func makeTopology(t *api.Topology) *csi.Topology {
+	if t == nil {
+		return nil
+	}
+	return &csi.Topology{
+		Segments: t.Segments,
+	}
+}
+
+func makeCapability(am *api.VolumeAccessMode) *csi.VolumeCapability {
+	var mode csi.VolumeCapability_AccessMode_Mode
+	switch am.Scope {
+	case api.VolumeScopeSingleNode:
+		switch am.Sharing {
+		case api.VolumeSharingNone, api.VolumeSharingOneWriter, api.VolumeSharingAll:
+			mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
+		case api.VolumeSharingReadOnly:
+			mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY
+		}
+	case api.VolumeScopeMultiNode:
+		switch am.Sharing {
+		case api.VolumeSharingReadOnly:
+			mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
+		case api.VolumeSharingOneWriter:
+			mode = csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER
+		case api.VolumeSharingAll:
+			mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
+		}
+	}
+
+	capability := &csi.VolumeCapability{
+		AccessMode: &csi.VolumeCapability_AccessMode{
+			Mode: mode,
+		},
+	}
+
+	if block := am.GetBlock(); block != nil {
+		capability.AccessType = &csi.VolumeCapability_Block{
+			// Block type is empty.
+			Block: &csi.VolumeCapability_BlockVolume{},
+		}
+	}
+
+	if mount := am.GetMount(); mount != nil {
+		capability.AccessType = &csi.VolumeCapability_Mount{
+			Mount: &csi.VolumeCapability_MountVolume{
+				FsType:     mount.FsType,
+				MountFlags: mount.MountFlags,
+			},
+		}
+	}
+
+	return capability
+}
+
+// makeCapcityRange converts the swarmkit CapacityRange object to the
+// equivalent CSI object
+func makeCapacityRange(cr *api.CapacityRange) *csi.CapacityRange {
+	if cr == nil {
+		return nil
+	}
+
+	return &csi.CapacityRange{
+		RequiredBytes: cr.RequiredBytes,
+		LimitBytes:    cr.LimitBytes,
+	}
+}
+
+// unmakeTopologies transforms a CSI-type topology into the equivalent swarm
+// type. it is called "unmakeTopologies" because it performs the inverse of
+// "makeTopologies".
+func unmakeTopologies(topologies []*csi.Topology) []*api.Topology {
+	if topologies == nil {
+		return nil
+	}
+	swarmTopologies := make([]*api.Topology, len(topologies))
+	for i, t := range topologies {
+		swarmTopologies[i] = unmakeTopology(t)
+	}
+	return swarmTopologies
+}
+
+// unmakeTopology transforms a CSI-type topology into the equivalent swarm
+// type.
+func unmakeTopology(topology *csi.Topology) *api.Topology {
+	return &api.Topology{
+		Segments: topology.Segments,
+	}
+}
+
+// makeVolumeInfo converts a csi.Volume object into a swarmkit VolumeInfo
+// object.
+func makeVolumeInfo(csiVolume *csi.Volume) *api.VolumeInfo {
+	return &api.VolumeInfo{
+		CapacityBytes:      csiVolume.CapacityBytes,
+		VolumeContext:      csiVolume.VolumeContext,
+		VolumeID:           csiVolume.VolumeId,
+		AccessibleTopology: unmakeTopologies(csiVolume.AccessibleTopology),
+	}
+}

+ 29 - 0
vendor/github.com/docker/swarmkit/manager/csi/doc.go

@@ -0,0 +1,29 @@
+package csi
+
+// The `csi` package contains code for managing Swarmkit Cluster Volumes,
+// which are powered by CSI drivers.
+//
+// This package stands separately from other manager components because of the
+// unique nature of volumes. Volumes need to be allocated before they can be
+// used, but the availability of a volume also imposes a scheduling constraint
+// on the node. Further, the CSI lifecycle requires many different RPC calls at
+// many points in the volume's life, which brings it out of the purview of any
+// one component.
+//
+// In an ideal world, this package would live wholely within the allocator
+// package, but the allocator is very fragile, and modifying it is more trouble
+// than it's worth.
+
+// Volume Lifecycle in Swarm
+//
+// Creation
+//
+// When a volume is created, the first thing the allocator does is contact the
+// relevant CSI plugin in order to ensure that the volume is created, and to
+// retrieve the associated volume ID. Volumes are always created when the
+// swarmkit object is created, as opposed to being created when demanded by a
+// Service.
+//
+// Assignment
+//
+// After a volume has been created, it may be used by one or more Tasks.

+ 481 - 0
vendor/github.com/docker/swarmkit/manager/csi/manager.go

@@ -0,0 +1,481 @@
+package csi
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+
+	"github.com/docker/go-events"
+	"github.com/sirupsen/logrus"
+
+	"github.com/docker/docker/pkg/plugingetter"
+
+	"github.com/docker/swarmkit/api"
+	"github.com/docker/swarmkit/log"
+	"github.com/docker/swarmkit/manager/state/store"
+	"github.com/docker/swarmkit/volumequeue"
+)
+
+const (
+	// DockerCSIPluginCap is the capability name of the plugins we use with the
+	// PluginGetter to get only the plugins we need. The full name of the
+	// plugin interface is "docker.csicontroller/1.0". This gets only the CSI
+	// plugins with Controller capability.
+	DockerCSIPluginCap = "csicontroller"
+)
+
+type Manager struct {
+	store *store.MemoryStore
+	// provider is the SecretProvider which allows retrieving secrets. Used
+	// when creating new Plugin objects.
+	provider SecretProvider
+
+	// pg is the plugingetter, which allows us to access the Docker Engine's
+	// plugin store.
+	pg plugingetter.PluginGetter
+
+	// newPlugin is a function which returns an object implementing the Plugin
+	// interface. It allows us to swap out the implementation of plugins while
+	// unit-testing the Manager
+	newPlugin func(pc plugingetter.CompatPlugin, pa plugingetter.PluginAddr, provider SecretProvider) Plugin
+
+	// synchronization for starting and stopping the Manager
+	startOnce sync.Once
+
+	stopChan chan struct{}
+	stopOnce sync.Once
+	doneChan chan struct{}
+
+	plugins map[string]Plugin
+
+	pendingVolumes *volumequeue.VolumeQueue
+}
+
+func NewManager(s *store.MemoryStore, pg plugingetter.PluginGetter) *Manager {
+	return &Manager{
+		store:          s,
+		stopChan:       make(chan struct{}),
+		doneChan:       make(chan struct{}),
+		newPlugin:      NewPlugin,
+		pg:             pg,
+		plugins:        map[string]Plugin{},
+		provider:       NewSecretProvider(s),
+		pendingVolumes: volumequeue.NewVolumeQueue(),
+	}
+}
+
+// Run runs the manager. The provided context is used as the parent for all RPC
+// calls made to the CSI plugins. Canceling this context will cancel those RPC
+// calls by the nature of contexts, but this is not the preferred way to stop
+// the Manager. Instead, Stop should be called, which cause all RPC calls to be
+// canceled anyway. The context is also used to get the logging context for the
+// Manager.
+func (vm *Manager) Run(ctx context.Context) {
+	vm.startOnce.Do(func() {
+		vm.run(ctx)
+	})
+}
+
+// run performs the actual meat of the run operation.
+//
+// the argument is called pctx because it's the parent context, from which we
+// immediately resolve a new child context.
+func (vm *Manager) run(pctx context.Context) {
+	defer close(vm.doneChan)
+	ctx, ctxCancel := context.WithCancel(
+		log.WithModule(pctx, "csi/manager"),
+	)
+	defer ctxCancel()
+
+	watch, cancel, err := store.ViewAndWatch(vm.store, func(tx store.ReadTx) error {
+		// TODO(dperny): change this from ViewAndWatch to one that's just
+		// Watch.
+		return nil
+	})
+
+	if err != nil {
+		log.G(ctx).WithError(err).Error("error in store view and watch")
+		return
+	}
+	defer cancel()
+
+	vm.init(ctx)
+
+	// run a goroutine which periodically processes incoming volumes. the
+	// handle function will trigger processing every time new events come in
+	// by writing to the channel
+
+	doneProc := make(chan struct{})
+	go func() {
+		for {
+			id, attempt := vm.pendingVolumes.Wait()
+			// this case occurs when the stop method has been called on
+			// pendingVolumes. stop is called on pendingVolumes when Stop is
+			// called on the CSI manager.
+			if id == "" && attempt == 0 {
+				break
+			}
+			// TODO(dperny): we can launch some number of workers and process
+			// more than one volume at a time, if desired.
+			vm.processVolume(ctx, id, attempt)
+		}
+
+		// closing doneProc signals that this routine has exited, and allows
+		// the main Run routine to exit.
+		close(doneProc)
+	}()
+
+	// defer read from doneProc. doneProc is closed in the goroutine above,
+	// and this defer will block until then. Because defers are executed as a
+	// stack, this in turn blocks the final defer (closing doneChan) from
+	// running. Ultimately, this prevents Stop from returning until the above
+	// goroutine is closed.
+	defer func() {
+		<-doneProc
+	}()
+
+	for {
+		select {
+		case ev := <-watch:
+			vm.handleEvent(ev)
+		case <-vm.stopChan:
+			vm.pendingVolumes.Stop()
+			return
+		}
+	}
+}
+
+// processVolumes encapuslates the logic for processing pending Volumes.
+func (vm *Manager) processVolume(ctx context.Context, id string, attempt uint) {
+	// set up log fields for a derrived context to pass to handleVolume.
+	dctx := log.WithFields(ctx, logrus.Fields{
+		"volume.id": id,
+		"attempt":   attempt,
+	})
+
+	err := vm.handleVolume(dctx, id)
+	// TODO(dperny): differentiate between retryable and non-retryable
+	// errors.
+	if err != nil {
+		log.G(dctx).WithError(err).Info("error handling volume")
+		vm.pendingVolumes.Enqueue(id, attempt+1)
+	}
+}
+
+// init does one-time setup work for the Manager, like creating all of
+// the Plugins and initializing the local state of the component.
+func (vm *Manager) init(ctx context.Context) {
+	var (
+		nodes   []*api.Node
+		volumes []*api.Volume
+	)
+	vm.store.View(func(tx store.ReadTx) {
+		var err error
+		nodes, err = store.FindNodes(tx, store.All)
+		if err != nil {
+			// this should *never happen*. Find only returns errors if the find
+			// by is invalid.
+			log.G(ctx).WithError(err).Error("error finding nodes")
+		}
+		volumes, err = store.FindVolumes(tx, store.All)
+		if err != nil {
+			// likewise, should never happen.
+			log.G(ctx).WithError(err).Error("error finding volumes")
+		}
+	})
+
+	for _, node := range nodes {
+		vm.handleNode(node)
+	}
+
+	// on initialization, we enqueue all of the Volumes. The easiest way to
+	// know if a Volume needs some work performed is to just pass it through
+	// the VolumeManager. If it doesn't need any work, then we will quickly
+	// skip by it. Otherwise, the needed work will be performed.
+	for _, volume := range volumes {
+		vm.enqueueVolume(volume.ID)
+	}
+}
+
+func (vm *Manager) Stop() {
+	vm.stopOnce.Do(func() {
+		close(vm.stopChan)
+	})
+
+	<-vm.doneChan
+}
+
+func (vm *Manager) handleEvent(ev events.Event) {
+	switch e := ev.(type) {
+	case api.EventCreateVolume:
+		vm.enqueueVolume(e.Volume.ID)
+	case api.EventUpdateVolume:
+		vm.enqueueVolume(e.Volume.ID)
+	case api.EventCreateNode:
+		vm.handleNode(e.Node)
+	case api.EventUpdateNode:
+		// for updates, we're only adding the node to every plugin. if the node
+		// no longer reports CSIInfo for a specific plugin, we will just leave
+		// the stale data in the plugin. this should not have any adverse
+		// effect, because the memory impact is small, and this operation
+		// should not be frequent. this may change as the code for volumes
+		// becomes more polished.
+		vm.handleNode(e.Node)
+	case api.EventDeleteNode:
+		vm.handleNodeRemove(e.Node.ID)
+	}
+}
+
+func (vm *Manager) createVolume(ctx context.Context, v *api.Volume) error {
+	l := log.G(ctx).WithField("volume.id", v.ID).WithField("driver", v.Spec.Driver.Name)
+	l.Info("creating volume")
+
+	p, err := vm.getPlugin(v.Spec.Driver.Name)
+	if err != nil {
+		l.Errorf("volume creation failed: %s", err.Error())
+		return err
+	}
+
+	info, err := p.CreateVolume(ctx, v)
+	if err != nil {
+		l.WithError(err).Error("volume create failed")
+		return err
+	}
+
+	err = vm.store.Update(func(tx store.Tx) error {
+		v2 := store.GetVolume(tx, v.ID)
+		// the volume should never be missing. I don't know of even any race
+		// condition that could result in this behavior. nevertheless, it's
+		// better to do this than to segfault.
+		if v2 == nil {
+			return nil
+		}
+
+		v2.VolumeInfo = info
+
+		return store.UpdateVolume(tx, v2)
+	})
+	if err != nil {
+		l.WithError(err).Error("committing created volume to store failed")
+	}
+	return err
+}
+
+// enqueueVolume enqueues a new volume event, placing the Volume ID into
+// pendingVolumes to be processed. Because enqueueVolume is only called in
+// response to a new Volume update event, not for a retry, the retry number is
+// always reset to 0.
+func (vm *Manager) enqueueVolume(id string) {
+	vm.pendingVolumes.Enqueue(id, 0)
+}
+
+// handleVolume processes a Volume. It determines if any relevant update has
+// occurred, and does the required work to handle that update if so.
+//
+// returns an error if handling the volume failed and needs to be retried.
+//
+// even if an error is returned, the store may still be updated.
+func (vm *Manager) handleVolume(ctx context.Context, id string) error {
+	var volume *api.Volume
+	vm.store.View(func(tx store.ReadTx) {
+		volume = store.GetVolume(tx, id)
+	})
+	if volume == nil {
+		// if the volume no longer exists, there is nothing to do, nothing to
+		// retry, and no relevant error.
+		return nil
+	}
+
+	if volume.VolumeInfo == nil {
+		return vm.createVolume(ctx, volume)
+	}
+
+	if volume.PendingDelete {
+		return vm.deleteVolume(ctx, volume)
+	}
+
+	updated := false
+	// TODO(dperny): it's just pointers, but copying the entire PublishStatus
+	// on each update might be intensive.
+
+	// we take a copy of the PublishStatus slice, because if we succeed in an
+	// unpublish operation, we will delete that status from PublishStatus.
+	statuses := make([]*api.VolumePublishStatus, len(volume.PublishStatus))
+	copy(statuses, volume.PublishStatus)
+
+	// failedPublishOrUnpublish is a slice of nodes where publish or unpublish
+	// operations failed. Publishing or unpublishing a volume can succeed or
+	// fail in part. If any failures occur, we will add the node ID of the
+	// publish operation that failed to this slice. Then, at the end of this
+	// function, after we update the store, if there are any failed operations,
+	// we will still return an error.
+	failedPublishOrUnpublish := []string{}
+
+	// adjustIndex is the number of entries deleted from volume.PublishStatus.
+	// when we're deleting entries from volume.PublishStatus, the index of the
+	// entry in statuses will no longer match the index of the same entry in
+	// volume.PublishStatus. we subtract adjustIndex from i to get the index
+	// where the entry is found after taking into account the deleted entries.
+	adjustIndex := 0
+
+	for i, status := range statuses {
+		switch status.State {
+		case api.VolumePublishStatus_PENDING_PUBLISH:
+			plug, err := vm.getPlugin(volume.Spec.Driver.Name)
+			if err != nil {
+				status.Message = fmt.Sprintf("error publishing volume: %v", err)
+				failedPublishOrUnpublish = append(failedPublishOrUnpublish, status.NodeID)
+			} else {
+				publishContext, err := plug.PublishVolume(ctx, volume, status.NodeID)
+				if err == nil {
+					status.State = api.VolumePublishStatus_PUBLISHED
+					status.PublishContext = publishContext
+					status.Message = ""
+				} else {
+					status.Message = fmt.Sprintf("error publishing volume: %v", err)
+					failedPublishOrUnpublish = append(failedPublishOrUnpublish, status.NodeID)
+				}
+			}
+			updated = true
+		case api.VolumePublishStatus_PENDING_UNPUBLISH:
+			plug, err := vm.getPlugin(volume.Spec.Driver.Name)
+			if err != nil {
+				status.Message = fmt.Sprintf("error unpublishing volume: %v", err)
+				failedPublishOrUnpublish = append(failedPublishOrUnpublish, status.NodeID)
+			} else {
+				err := plug.UnpublishVolume(ctx, volume, status.NodeID)
+				if err == nil {
+					// if there is no error with unpublishing, then we delete the
+					// status from the statuses slice.
+					j := i - adjustIndex
+					volume.PublishStatus = append(volume.PublishStatus[:j], volume.PublishStatus[j+1:]...)
+					adjustIndex++
+				} else {
+					status.Message = fmt.Sprintf("error unpublishing volume: %v", err)
+					failedPublishOrUnpublish = append(failedPublishOrUnpublish, status.NodeID)
+				}
+			}
+
+			updated = true
+		}
+	}
+
+	if updated {
+		if err := vm.store.Update(func(tx store.Tx) error {
+			// the publish status is now authoritative. read-update-write the
+			// volume object.
+			v := store.GetVolume(tx, volume.ID)
+			if v == nil {
+				// volume should never be deleted with pending publishes. if
+				// this does occur somehow, then we will just ignore it, rather
+				// than crashing.
+				return nil
+			}
+
+			v.PublishStatus = volume.PublishStatus
+			return store.UpdateVolume(tx, v)
+		}); err != nil {
+			return err
+		}
+	}
+
+	if len(failedPublishOrUnpublish) > 0 {
+		return fmt.Errorf("error publishing or unpublishing to some nodes: %v", failedPublishOrUnpublish)
+	}
+	return nil
+}
+
+// handleNode handles one node event
+func (vm *Manager) handleNode(n *api.Node) {
+	if n.Description == nil {
+		return
+	}
+	// we just call AddNode on every update. Because it's just a map
+	// assignment, this is probably faster than checking if something changed.
+	for _, info := range n.Description.CSIInfo {
+		p, err := vm.getPlugin(info.PluginName)
+		if err != nil {
+			log.L.Warnf("error handling node: %v", err)
+			// TODO(dperny): log something
+			continue
+		}
+		p.AddNode(n.ID, info.NodeID)
+	}
+}
+
+// handleNodeRemove handles a node delete event
+func (vm *Manager) handleNodeRemove(nodeID string) {
+	// we just call RemoveNode on every plugin, because it's probably quicker
+	// than checking if the node was using that plugin.
+	//
+	// we don't need to worry about lazy-loading here, because if don't have
+	// the plugin loaded, there's no need to call remove.
+	for _, plugin := range vm.plugins {
+		plugin.RemoveNode(nodeID)
+	}
+}
+
+func (vm *Manager) deleteVolume(ctx context.Context, v *api.Volume) error {
+	// TODO(dperny): handle missing plugin
+	plug, err := vm.getPlugin(v.Spec.Driver.Name)
+	if err != nil {
+		return err
+	}
+	err = plug.DeleteVolume(ctx, v)
+	if err != nil {
+		return err
+	}
+
+	// TODO(dperny): handle update error
+	return vm.store.Update(func(tx store.Tx) error {
+		return store.DeleteVolume(tx, v.ID)
+	})
+}
+
+// getPlugin returns the plugin with the given name.
+//
+// In a previous iteration of the architecture of this component, plugins were
+// added to the manager through an update to the Cluster object, which
+// triggered an event. In other words, they were eagerly loaded.
+//
+// When rearchitecting to use the plugingetter.PluginGetter interface, that
+// eager loading is no longer practical, because the method for getting events
+// about new plugins would be difficult to plumb this deep into swarm.
+//
+// Instead, we change from what was previously a bunch of raw map lookups to
+// instead a method call which lazy-loads the plugins as needed. This is fine,
+// because in the Plugin object itself, the network connection is made lazily
+// as well.
+//
+// TODO(dperny): There is no way to unload a plugin. Unloading plugins will
+// happen as part of a leadership change, but otherwise, on especially
+// long-lived managers with especially high plugin churn, this is a memory
+// leak. It's acceptable for now because we expect neither exceptionally long
+// lived managers nor exceptionally high plugin churn.
+func (vm *Manager) getPlugin(name string) (Plugin, error) {
+	// if the plugin already exists, we can just return it.
+	if p, ok := vm.plugins[name]; ok {
+		return p, nil
+	}
+
+	// otherwise, we need to load the plugin.
+	pc, err := vm.pg.Get(name, DockerCSIPluginCap, plugingetter.Lookup)
+	if err != nil {
+		return nil, err
+	}
+
+	if pc == nil {
+		return nil, errors.New("driver \"" + name + "\" not found")
+	}
+
+	pa, ok := pc.(plugingetter.PluginAddr)
+	if !ok {
+		return nil, errors.New("plugin for driver \"" + name + "\" does not implement PluginAddr")
+	}
+
+	p := vm.newPlugin(pc, pa, vm.provider)
+	vm.plugins[name] = p
+
+	return p, nil
+}

+ 334 - 0
vendor/github.com/docker/swarmkit/manager/csi/plugin.go

@@ -0,0 +1,334 @@
+package csi
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	"google.golang.org/grpc"
+
+	"github.com/container-storage-interface/spec/lib/go/csi"
+	"github.com/docker/docker/pkg/plugingetter"
+	"github.com/docker/swarmkit/api"
+)
+
+// Plugin is the interface for a CSI controller plugin.
+//
+// In this package, the word "plugin" is unfortunately overused. This
+// particular "Plugin" is the interface used by volume Manager to interact with
+// CSI controller plugins. It should not be confused with the "plugin" returned
+// from the plugingetter interface, which is the interface that gives us the
+// information we need to create this Plugin.
+type Plugin interface {
+	CreateVolume(context.Context, *api.Volume) (*api.VolumeInfo, error)
+	DeleteVolume(context.Context, *api.Volume) error
+	PublishVolume(context.Context, *api.Volume, string) (map[string]string, error)
+	UnpublishVolume(context.Context, *api.Volume, string) error
+	AddNode(swarmID, csiID string)
+	RemoveNode(swarmID string)
+}
+
+// plugin represents an individual CSI controller plugin
+type plugin struct {
+	// name is the name of the plugin, which is also the name used as the
+	// Driver.Name field
+	name string
+
+	// socket is the unix socket to connect to this plugin at.
+	socket string
+
+	// provider is the SecretProvider, which allows retrieving secrets for CSI
+	// calls.
+	provider SecretProvider
+
+	// cc is the grpc client connection
+	// TODO(dperny): the client is never closed. it may be closed when it goes
+	// out of scope, but this should be verified.
+	cc *grpc.ClientConn
+	// idClient is the identity service client
+	idClient csi.IdentityClient
+	// controllerClient is the controller service client
+	controllerClient csi.ControllerClient
+
+	// controller indicates that the plugin has controller capabilities.
+	controller bool
+
+	// publisher indicates that the controller plugin has
+	// PUBLISH_UNPUBLISH_VOLUME capability.
+	publisher bool
+
+	// swarmToCSI maps a swarm node ID to the corresponding CSI node ID
+	swarmToCSI map[string]string
+
+	// csiToSwarm maps a CSI node ID back to the swarm node ID.
+	csiToSwarm map[string]string
+}
+
+// NewPlugin creates a new Plugin object.
+//
+// NewPlugin takes both the CompatPlugin and the PluginAddr. These should be
+// the same object. By taking both parts here, we can push off the work of
+// assuring that the given plugin implements the PluginAddr interface without
+// having to typecast in this constructor.
+func NewPlugin(pc plugingetter.CompatPlugin, pa plugingetter.PluginAddr, provider SecretProvider) Plugin {
+	return &plugin{
+		name: pc.Name(),
+		// TODO(dperny): verify that we do not need to include the Network()
+		// portion of the Addr.
+		socket:     fmt.Sprintf("%s://%s", pa.Addr().Network(), pa.Addr().String()),
+		provider:   provider,
+		swarmToCSI: map[string]string{},
+		csiToSwarm: map[string]string{},
+	}
+}
+
+// connect is a private method that initializes a gRPC ClientConn and creates
+// the IdentityClient and ControllerClient.
+func (p *plugin) connect(ctx context.Context) error {
+	cc, err := grpc.DialContext(ctx, p.socket, grpc.WithInsecure())
+	if err != nil {
+		return err
+	}
+
+	p.cc = cc
+
+	// first, probe the plugin, to ensure that it exists and is ready to go
+	idc := csi.NewIdentityClient(cc)
+	p.idClient = idc
+
+	// controllerClient may not do anything if the plugin does not support
+	// the controller service, but it should not be an error to create it now
+	// anyway
+	p.controllerClient = csi.NewControllerClient(cc)
+
+	return p.init(ctx)
+}
+
+// init checks uses the identity service to check the properties of the plugin,
+// most importantly, its capabilities.
+func (p *plugin) init(ctx context.Context) error {
+	probe, err := p.idClient.Probe(ctx, &csi.ProbeRequest{})
+	if err != nil {
+		return err
+	}
+
+	if probe.Ready != nil && !probe.Ready.Value {
+		return errors.New("plugin not ready")
+	}
+
+	resp, err := p.idClient.GetPluginCapabilities(ctx, &csi.GetPluginCapabilitiesRequest{})
+	if err != nil {
+		return err
+	}
+
+	if resp == nil {
+		return nil
+	}
+
+	for _, c := range resp.Capabilities {
+		if sc := c.GetService(); sc != nil {
+			switch sc.Type {
+			case csi.PluginCapability_Service_CONTROLLER_SERVICE:
+				p.controller = true
+			}
+		}
+	}
+
+	if p.controller {
+		cCapResp, err := p.controllerClient.ControllerGetCapabilities(
+			ctx, &csi.ControllerGetCapabilitiesRequest{},
+		)
+		if err != nil {
+			return err
+		}
+
+		for _, c := range cCapResp.Capabilities {
+			rpc := c.GetRpc()
+			if rpc.Type == csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME {
+				p.publisher = true
+			}
+		}
+	}
+
+	return nil
+}
+
+// CreateVolume wraps and abstracts the CSI CreateVolume logic and returns
+// the volume info, or an error.
+func (p *plugin) CreateVolume(ctx context.Context, v *api.Volume) (*api.VolumeInfo, error) {
+	c, err := p.Client(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	if !p.controller {
+		// TODO(dperny): come up with a scheme to handle headless plugins
+		// TODO(dperny): handle plugins without create volume capabilities
+		return &api.VolumeInfo{VolumeID: v.Spec.Annotations.Name}, nil
+	}
+
+	createVolumeRequest := p.makeCreateVolume(v)
+	resp, err := c.CreateVolume(ctx, createVolumeRequest)
+	if err != nil {
+		return nil, err
+	}
+
+	return makeVolumeInfo(resp.Volume), nil
+}
+
+func (p *plugin) DeleteVolume(ctx context.Context, v *api.Volume) error {
+	if v.VolumeInfo == nil {
+		return errors.New("VolumeInfo must not be nil")
+	}
+	// we won't use a fancy createDeleteVolumeRequest method because the
+	// request is simple enough to not bother with it
+	secrets := p.makeSecrets(v)
+	req := &csi.DeleteVolumeRequest{
+		VolumeId: v.VolumeInfo.VolumeID,
+		Secrets:  secrets,
+	}
+	c, err := p.Client(ctx)
+	if err != nil {
+		return err
+	}
+	// response from RPC intentionally left blank
+	_, err = c.DeleteVolume(ctx, req)
+	return err
+}
+
+// PublishVolume calls ControllerPublishVolume to publish the given Volume to
+// the Node with the given swarmkit ID. It returns a map, which is the
+// PublishContext for this Volume on this Node.
+func (p *plugin) PublishVolume(ctx context.Context, v *api.Volume, nodeID string) (map[string]string, error) {
+	if !p.publisher {
+		return nil, nil
+	}
+
+	req := p.makeControllerPublishVolumeRequest(v, nodeID)
+	c, err := p.Client(ctx)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := c.ControllerPublishVolume(ctx, req)
+
+	if err != nil {
+		return nil, err
+	}
+	return resp.PublishContext, nil
+}
+
+// UnpublishVolume calls ControllerUnpublishVolume to unpublish the given
+// Volume from the Node with the given swarmkit ID. It returns an error if the
+// unpublish does not succeed
+func (p *plugin) UnpublishVolume(ctx context.Context, v *api.Volume, nodeID string) error {
+	if !p.publisher {
+		return nil
+	}
+
+	req := p.makeControllerUnpublishVolumeRequest(v, nodeID)
+	c, err := p.Client(ctx)
+	if err != nil {
+		return err
+	}
+
+	// response of the RPC intentionally left blank
+	_, err = c.ControllerUnpublishVolume(ctx, req)
+	return err
+}
+
+// AddNode adds a mapping for a node's swarm ID to the ID provided by this CSI
+// plugin. This allows future calls to the plugin to be done entirely in terms
+// of the swarm node ID.
+//
+// The CSI node ID is provided by the node as part of the NodeDescription.
+func (p *plugin) AddNode(swarmID, csiID string) {
+	p.swarmToCSI[swarmID] = csiID
+	p.csiToSwarm[csiID] = swarmID
+}
+
+// RemoveNode removes a node from this plugin's node mappings.
+func (p *plugin) RemoveNode(swarmID string) {
+	csiID := p.swarmToCSI[swarmID]
+	delete(p.swarmToCSI, swarmID)
+	delete(p.csiToSwarm, csiID)
+}
+
+// Client retrieves a csi.ControllerClient for this plugin
+//
+// If this is the first time client has been called and no client yet exists,
+// it will initialize the gRPC connection to the remote plugin and create a new
+// ControllerClient.
+func (p *plugin) Client(ctx context.Context) (csi.ControllerClient, error) {
+	if p.controllerClient == nil {
+		if err := p.connect(ctx); err != nil {
+			return nil, err
+		}
+	}
+	return p.controllerClient, nil
+}
+
+// makeCreateVolume makes a csi.CreateVolumeRequest from the volume object and
+// spec. it uses the Plugin's SecretProvider to retrieve relevant secrets.
+func (p *plugin) makeCreateVolume(v *api.Volume) *csi.CreateVolumeRequest {
+	secrets := p.makeSecrets(v)
+	return &csi.CreateVolumeRequest{
+		Name:       v.Spec.Annotations.Name,
+		Parameters: v.Spec.Driver.Options,
+		VolumeCapabilities: []*csi.VolumeCapability{
+			makeCapability(v.Spec.AccessMode),
+		},
+		Secrets:                   secrets,
+		AccessibilityRequirements: makeTopologyRequirement(v.Spec.AccessibilityRequirements),
+		CapacityRange:             makeCapacityRange(v.Spec.CapacityRange),
+	}
+}
+
+// makeSecrets uses the plugin's SecretProvider to make the secrets map to pass
+// to CSI RPCs.
+func (p *plugin) makeSecrets(v *api.Volume) map[string]string {
+	secrets := map[string]string{}
+	for _, vs := range v.Spec.Secrets {
+		// a secret should never be nil, but check just to be sure
+		if vs != nil {
+			secret := p.provider.GetSecret(vs.Secret)
+			if secret != nil {
+				// TODO(dperny): return an error, but this should never happen,
+				// as secrets should be validated at volume creation time
+				secrets[vs.Key] = string(secret.Spec.Data)
+			}
+		}
+	}
+	return secrets
+}
+
+func (p *plugin) makeControllerPublishVolumeRequest(v *api.Volume, nodeID string) *csi.ControllerPublishVolumeRequest {
+	if v.VolumeInfo == nil {
+		return nil
+	}
+
+	secrets := p.makeSecrets(v)
+	capability := makeCapability(v.Spec.AccessMode)
+	capability.AccessType = &csi.VolumeCapability_Mount{
+		Mount: &csi.VolumeCapability_MountVolume{},
+	}
+	return &csi.ControllerPublishVolumeRequest{
+		VolumeId:         v.VolumeInfo.VolumeID,
+		NodeId:           p.swarmToCSI[nodeID],
+		Secrets:          secrets,
+		VolumeCapability: capability,
+		VolumeContext:    v.VolumeInfo.VolumeContext,
+	}
+}
+
+func (p *plugin) makeControllerUnpublishVolumeRequest(v *api.Volume, nodeID string) *csi.ControllerUnpublishVolumeRequest {
+	if v.VolumeInfo == nil {
+		return nil
+	}
+
+	secrets := p.makeSecrets(v)
+	return &csi.ControllerUnpublishVolumeRequest{
+		VolumeId: v.VolumeInfo.VolumeID,
+		NodeId:   p.swarmToCSI[nodeID],
+		Secrets:  secrets,
+	}
+}

+ 34 - 0
vendor/github.com/docker/swarmkit/manager/csi/secret.go

@@ -0,0 +1,34 @@
+package csi
+
+import (
+	"github.com/docker/swarmkit/api"
+	"github.com/docker/swarmkit/manager/state/store"
+)
+
+// SecretProvider is an interface for retrieving secrets to use with CSI calls.
+type SecretProvider interface {
+	// GetSecret returns the secret with the given ID, or nil if not found.
+	GetSecret(id string) *api.Secret
+}
+
+type secretProvider struct {
+	s *store.MemoryStore
+}
+
+func NewSecretProvider(s *store.MemoryStore) SecretProvider {
+	return &secretProvider{
+		s: s,
+	}
+}
+
+// GetSecret returns the secret with the given ID, or nil if not found.
+//
+// This method accesses the store, and so should not be called from inside
+// another store transaction
+func (p *secretProvider) GetSecret(id string) *api.Secret {
+	var secret *api.Secret
+	p.s.View(func(tx store.ReadTx) {
+		secret = store.GetSecret(tx, id)
+	})
+	return secret
+}

+ 138 - 10
vendor/github.com/docker/swarmkit/manager/dispatcher/assignments.go

@@ -18,18 +18,30 @@ type typeAndID struct {
 }
 
 type assignmentSet struct {
-	dp                   *drivers.DriverProvider
-	tasksMap             map[string]*api.Task
+	nodeID   string
+	dp       *drivers.DriverProvider
+	tasksMap map[string]*api.Task
+	// volumesMap keeps track of the VolumePublishStatus of the given volumes.
+	// this tells us both which volumes are assigned to the node, and what the
+	// last known VolumePublishStatus was, so we can understand if we need to
+	// send an update.
+	volumesMap map[string]*api.VolumePublishStatus
+	// tasksUsingDependency tracks both tasks and volumes using a given
+	// dependency. this works because the ID generated for swarm comes from a
+	// large enough space that it is reliably astronomically unlikely that IDs
+	// will ever collide.
 	tasksUsingDependency map[typeAndID]map[string]struct{}
 	changes              map[typeAndID]*api.AssignmentChange
 	log                  *logrus.Entry
 }
 
-func newAssignmentSet(log *logrus.Entry, dp *drivers.DriverProvider) *assignmentSet {
+func newAssignmentSet(nodeID string, log *logrus.Entry, dp *drivers.DriverProvider) *assignmentSet {
 	return &assignmentSet{
+		nodeID:               nodeID,
 		dp:                   dp,
 		changes:              make(map[typeAndID]*api.AssignmentChange),
 		tasksMap:             make(map[string]*api.Task),
+		volumesMap:           make(map[string]*api.VolumePublishStatus),
 		tasksUsingDependency: make(map[typeAndID]map[string]struct{}),
 		log:                  log,
 	}
@@ -48,15 +60,17 @@ func assignSecret(a *assignmentSet, readTx store.ReadTx, mapKey typeAndID, t *ap
 		}).Debug("failed to fetch secret")
 		return
 	}
-	// If the secret should not be reused for other tasks, give it a unique ID for the task to allow different values for different tasks.
+	// If the secret should not be reused for other tasks, give it a unique ID
+	// for the task to allow different values for different tasks.
 	if doNotReuse {
 		// Give the secret a new ID and mark it as internal
 		originalSecretID := secret.ID
 		taskSpecificID := identity.CombineTwoIDs(originalSecretID, t.ID)
 		secret.ID = taskSpecificID
 		secret.Internal = true
-		// Create a new mapKey with the new ID and insert it into the dependencies map for the task.
-		// This will make the changes map contain an entry with the new ID rather than the original one.
+		// Create a new mapKey with the new ID and insert it into the
+		// dependencies map for the task.  This will make the changes map
+		// contain an entry with the new ID rather than the original one.
 		mapKey = typeAndID{objType: mapKey.objType, id: secret.ID}
 		a.tasksUsingDependency[mapKey] = make(map[string]struct{})
 		a.tasksUsingDependency[mapKey][t.ID] = struct{}{}
@@ -92,8 +106,12 @@ func assignConfig(a *assignmentSet, readTx store.ReadTx, mapKey typeAndID) {
 }
 
 func (a *assignmentSet) addTaskDependencies(readTx store.ReadTx, t *api.Task) {
+	// first, we go through all ResourceReferences, which give us the necessary
+	// information about which secrets and configs are in use.
 	for _, resourceRef := range t.Spec.ResourceReferences {
 		mapKey := typeAndID{objType: resourceRef.ResourceType, id: resourceRef.ResourceID}
+		// if there are no tasks using this dependency yet, then we can assign
+		// it.
 		if len(a.tasksUsingDependency[mapKey]) == 0 {
 			switch resourceRef.ResourceType {
 			case api.ResourceType_SECRET:
@@ -107,6 +125,8 @@ func (a *assignmentSet) addTaskDependencies(readTx store.ReadTx, t *api.Task) {
 				continue
 			}
 		}
+		// otherwise, we don't need to add a new assignment. we just need to
+		// track the fact that another task is now using this dependency.
 		a.tasksUsingDependency[mapKey][t.ID] = struct{}{}
 	}
 
@@ -160,7 +180,9 @@ func (a *assignmentSet) releaseDependency(mapKey typeAndID, assignment *api.Assi
 	return true
 }
 
-func (a *assignmentSet) releaseTaskDependencies(t *api.Task) bool {
+// releaseTaskDependencies needs a store transaction because volumes have
+// associated Secrets which need to be released.
+func (a *assignmentSet) releaseTaskDependencies(readTx store.ReadTx, t *api.Task) bool {
 	var modified bool
 
 	for _, resourceRef := range t.Spec.ResourceReferences {
@@ -251,7 +273,7 @@ func (a *assignmentSet) addOrUpdateTask(readTx store.ReadTx, t *api.Task) bool {
 				// If releasing the dependencies caused us to
 				// remove something from the assignment set,
 				// mark one modification.
-				return a.releaseTaskDependencies(t)
+				return a.releaseTaskDependencies(readTx, t)
 			}
 			return false
 		}
@@ -274,7 +296,113 @@ func (a *assignmentSet) addOrUpdateTask(readTx store.ReadTx, t *api.Task) bool {
 	return true
 }
 
-func (a *assignmentSet) removeTask(t *api.Task) bool {
+// addOrUpdateVolume tracks a Volume assigned to a node.
+func (a *assignmentSet) addOrUpdateVolume(readTx store.ReadTx, v *api.Volume) bool {
+	var publishStatus *api.VolumePublishStatus
+	for _, status := range v.PublishStatus {
+		if status.NodeID == a.nodeID {
+			publishStatus = status
+			break
+		}
+	}
+
+	// if there is no publishStatus for this Volume on this Node, or if the
+	// Volume has not yet been published to this node, then we do not need to
+	// track this assignment.
+	if publishStatus == nil || publishStatus.State < api.VolumePublishStatus_PUBLISHED {
+		return false
+	}
+
+	// check if we are already tracking this volume, and what its old status
+	// is. if the states are identical, then we don't have any update to make.
+	if oldStatus, ok := a.volumesMap[v.ID]; ok && oldStatus.State == publishStatus.State {
+		return false
+	}
+
+	// if the volume has already been confirmed as unpublished, we can stop
+	// tracking it and remove its dependencies.
+	if publishStatus.State > api.VolumePublishStatus_PENDING_NODE_UNPUBLISH {
+		return a.removeVolume(readTx, v)
+	}
+
+	for _, secret := range v.Spec.Secrets {
+		mapKey := typeAndID{objType: api.ResourceType_SECRET, id: secret.Secret}
+		if len(a.tasksUsingDependency[mapKey]) == 0 {
+			// we can call assignSecret with task being nil, but it does mean
+			// that any secret that uses a driver will not work. we'll call
+			// that a limitation of volumes for now.
+			assignSecret(a, readTx, mapKey, nil)
+		}
+		a.tasksUsingDependency[mapKey][v.ID] = struct{}{}
+	}
+
+	// volumes are sent to nodes as VolumeAssignments. This is because a node
+	// needs node-specific information (the PublishContext from
+	// ControllerPublishVolume).
+	assignment := &api.VolumeAssignment{
+		ID:             v.ID,
+		VolumeID:       v.VolumeInfo.VolumeID,
+		Driver:         v.Spec.Driver,
+		VolumeContext:  v.VolumeInfo.VolumeContext,
+		PublishContext: publishStatus.PublishContext,
+		AccessMode:     v.Spec.AccessMode,
+		Secrets:        v.Spec.Secrets,
+	}
+
+	volumeKey := typeAndID{objType: api.ResourceType_VOLUME, id: v.ID}
+	// assignmentChange is the whole assignment without the action, which we
+	// will set next
+	assignmentChange := &api.AssignmentChange{
+		Assignment: &api.Assignment{
+			Item: &api.Assignment_Volume{
+				Volume: assignment,
+			},
+		},
+	}
+
+	// if we're in state PENDING_NODE_UNPUBLISH, we actually need to send a
+	// remove message. we do this every time, even if the node never got the
+	// first add assignment. This is because the node might not know that it
+	// has a volume published; for example, the node may be restarting, and
+	// the in-memory store does not have knowledge of the volume.
+	if publishStatus.State == api.VolumePublishStatus_PENDING_NODE_UNPUBLISH {
+		assignmentChange.Action = api.AssignmentChange_AssignmentActionRemove
+	} else {
+		assignmentChange.Action = api.AssignmentChange_AssignmentActionUpdate
+	}
+	a.changes[volumeKey] = assignmentChange
+	a.volumesMap[v.ID] = publishStatus
+	return true
+}
+
+func (a *assignmentSet) removeVolume(readTx store.ReadTx, v *api.Volume) bool {
+	if _, exists := a.volumesMap[v.ID]; !exists {
+		return false
+	}
+
+	modified := false
+
+	// if the volume does exists, we can release its secrets
+	for _, secret := range v.Spec.Secrets {
+		mapKey := typeAndID{objType: api.ResourceType_SECRET, id: secret.Secret}
+		assignment := &api.Assignment{
+			Item: &api.Assignment_Secret{
+				Secret: &api.Secret{ID: secret.Secret},
+			},
+		}
+		if a.releaseDependency(mapKey, assignment, v.ID) {
+			modified = true
+		}
+	}
+
+	// we don't need to add a removal message. the removal of the
+	// VolumeAssignment will have already happened.
+	delete(a.volumesMap, v.ID)
+
+	return modified
+}
+
+func (a *assignmentSet) removeTask(readTx store.ReadTx, t *api.Task) bool {
 	if _, exists := a.tasksMap[t.ID]; !exists {
 		return false
 	}
@@ -293,7 +421,7 @@ func (a *assignmentSet) removeTask(t *api.Task) bool {
 	// Release the dependencies being used by this task.
 	// Ignoring the return here. We will always mark this as a
 	// modification, since a task is being removed.
-	a.releaseTaskDependencies(t)
+	a.releaseTaskDependencies(readTx, t)
 	return true
 }
 

+ 154 - 9
vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go

@@ -154,6 +154,12 @@ type Dispatcher struct {
 	nodeUpdates     map[string]nodeUpdate // indexed by node ID
 	nodeUpdatesLock sync.Mutex
 
+	// unpublishedVolumes keeps track of Volumes that Nodes have reported as
+	// unpublished. it maps the volume ID to a list of nodes it has been
+	// unpublished on.
+	unpublishedVolumes     map[string][]string
+	unpublishedVolumesLock sync.Mutex
+
 	downNodes *nodeStore
 
 	processUpdatesTrigger chan struct{}
@@ -223,6 +229,10 @@ func (d *Dispatcher) Run(ctx context.Context) error {
 	d.nodeUpdates = make(map[string]nodeUpdate)
 	d.nodeUpdatesLock.Unlock()
 
+	d.unpublishedVolumesLock.Lock()
+	d.unpublishedVolumes = make(map[string][]string)
+	d.unpublishedVolumesLock.Unlock()
+
 	d.mu.Lock()
 	if d.isRunning() {
 		d.mu.Unlock()
@@ -305,6 +315,8 @@ func (d *Dispatcher) Run(ctx context.Context) error {
 			// batch timer has already expired, so no need to drain
 			batchTimer.Reset(maxBatchInterval)
 		case v := <-configWatcher:
+			// TODO(dperny): remove extraneous log message
+			log.G(ctx).Info("cluster update event")
 			cluster := v.(api.EventUpdateCluster)
 			d.mu.Lock()
 			if cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod != nil {
@@ -664,14 +676,61 @@ func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStat
 		case <-dctx.Done():
 		}
 	}
-	return nil, nil
+
+	return &api.UpdateTaskStatusResponse{}, nil
+}
+
+func (d *Dispatcher) UpdateVolumeStatus(ctx context.Context, r *api.UpdateVolumeStatusRequest) (*api.UpdateVolumeStatusResponse, error) {
+	d.rpcRW.RLock()
+	defer d.rpcRW.RUnlock()
+
+	_, err := d.isRunningLocked()
+	if err != nil {
+		return nil, err
+	}
+
+	nodeInfo, err := ca.RemoteNode(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	nodeID := nodeInfo.NodeID
+	fields := logrus.Fields{
+		"node.id":      nodeID,
+		"node.session": r.SessionID,
+		"method":       "(*Dispatcher).UpdateVolumeStatus",
+	}
+	if nodeInfo.ForwardedBy != nil {
+		fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
+	}
+	log := log.G(ctx).WithFields(fields)
+
+	if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
+		return nil, err
+	}
+
+	d.unpublishedVolumesLock.Lock()
+	for _, status := range r.Updates {
+		if status.Unpublished {
+			// it's ok if nodes is nil, because append works on a nil slice.
+			nodes := append(d.unpublishedVolumes[status.ID], nodeID)
+			d.unpublishedVolumes[status.ID] = nodes
+			log.Debugf("volume %s unpublished on node %s", status.ID, nodeID)
+		}
+	}
+	d.unpublishedVolumesLock.Unlock()
+
+	// we won't kick off a batch here, we'll just wait for the timer.
+	return &api.UpdateVolumeStatusResponse{}, nil
 }
 
 func (d *Dispatcher) processUpdates(ctx context.Context) {
 	var (
-		taskUpdates map[string]*api.TaskStatus
-		nodeUpdates map[string]nodeUpdate
+		taskUpdates        map[string]*api.TaskStatus
+		nodeUpdates        map[string]nodeUpdate
+		unpublishedVolumes map[string][]string
 	)
+
 	d.taskUpdatesLock.Lock()
 	if len(d.taskUpdates) != 0 {
 		taskUpdates = d.taskUpdates
@@ -686,7 +745,14 @@ func (d *Dispatcher) processUpdates(ctx context.Context) {
 	}
 	d.nodeUpdatesLock.Unlock()
 
-	if len(taskUpdates) == 0 && len(nodeUpdates) == 0 {
+	d.unpublishedVolumesLock.Lock()
+	if len(d.unpublishedVolumes) != 0 {
+		unpublishedVolumes = d.unpublishedVolumes
+		d.unpublishedVolumes = make(map[string][]string)
+	}
+	d.unpublishedVolumesLock.Unlock()
+
+	if len(taskUpdates) == 0 && len(nodeUpdates) == 0 && len(unpublishedVolumes) == 0 {
 		return
 	}
 
@@ -749,7 +815,7 @@ func (d *Dispatcher) processUpdates(ctx context.Context) {
 				logger := log.WithField("node.id", nodeID)
 				node := store.GetNode(tx, nodeID)
 				if node == nil {
-					logger.Errorf("node unavailable")
+					logger.Error("node unavailable")
 					return nil
 				}
 
@@ -776,6 +842,37 @@ func (d *Dispatcher) processUpdates(ctx context.Context) {
 			}
 		}
 
+		for volumeID, nodes := range unpublishedVolumes {
+			err := batch.Update(func(tx store.Tx) error {
+				logger := log.WithField("volume.id", volumeID)
+				volume := store.GetVolume(tx, volumeID)
+				if volume == nil {
+					logger.Error("volume unavailable")
+				}
+
+				// buckle your seatbelts, we're going quadratic.
+			nodesLoop:
+				for _, nodeID := range nodes {
+					for _, status := range volume.PublishStatus {
+						if status.NodeID == nodeID {
+							status.State = api.VolumePublishStatus_PENDING_UNPUBLISH
+							continue nodesLoop
+						}
+					}
+				}
+
+				if err := store.UpdateVolume(tx, volume); err != nil {
+					logger.WithError(err).Error("failed to update volume")
+					return nil
+				}
+				return nil
+			})
+
+			if err != nil {
+				log.WithError(err).Error("dispatcher volume update transaction failed")
+			}
+		}
+
 		return nil
 	})
 	if err != nil {
@@ -947,7 +1044,7 @@ func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatche
 	var (
 		sequence    int64
 		appliesTo   string
-		assignments = newAssignmentSet(log, d.dp)
+		assignments = newAssignmentSet(nodeID, log, d.dp)
 	)
 
 	sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
@@ -974,12 +1071,45 @@ func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatche
 				assignments.addOrUpdateTask(readTx, t)
 			}
 
+			// there is no quick index for which nodes are using a volume, but
+			// there should not be thousands of volumes in a typical
+			// deployment, so this should be ok
+			volumes, err := store.FindVolumes(readTx, store.All)
+			if err != nil {
+				return err
+			}
+
+			for _, v := range volumes {
+				for _, status := range v.PublishStatus {
+					if status.NodeID == nodeID {
+						assignments.addOrUpdateVolume(readTx, v)
+					}
+				}
+			}
+
 			return nil
 		},
 		api.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
 			Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
 		api.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
 			Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
+		api.EventUpdateVolume{
+			// typically, a check function takes an object from this
+			// prototypical event and compares it to the object from the
+			// incoming event. However, because this is a bespoke, in-line
+			// matcher, we can discard the first argument (the prototype) and
+			// instead pass the desired node ID in as part of a closure.
+			Checks: []api.VolumeCheckFunc{
+				func(v1, v2 *api.Volume) bool {
+					for _, status := range v2.PublishStatus {
+						if status.NodeID == nodeID {
+							return true
+						}
+					}
+					return false
+				},
+			},
+		},
 	)
 	if err != nil {
 		return err
@@ -1035,11 +1165,26 @@ func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatche
 						}
 					})
 				case api.EventDeleteTask:
-					if assignments.removeTask(v.Task) {
-						oneModification()
-					}
+					d.store.View(func(readTx store.ReadTx) {
+						if assignments.removeTask(readTx, v.Task) {
+							oneModification()
+						}
+					})
 					// TODO(aaronl): For node secrets, we'll need to handle
 					// EventCreateSecret.
+				case api.EventUpdateVolume:
+					d.store.View(func(readTx store.ReadTx) {
+						vol := store.GetVolume(readTx, v.Volume.ID)
+						// check through the PublishStatus to see if there is
+						// one for this node.
+						for _, status := range vol.PublishStatus {
+							if status.NodeID == nodeID {
+								if assignments.addOrUpdateVolume(readTx, vol) {
+									oneModification()
+								}
+							}
+						}
+					})
 				}
 			case <-batchingTimeout:
 				break batchingLoop

+ 26 - 3
vendor/github.com/docker/swarmkit/manager/manager.go

@@ -25,6 +25,7 @@ import (
 	"github.com/docker/swarmkit/manager/allocator/cnmallocator"
 	"github.com/docker/swarmkit/manager/allocator/networkallocator"
 	"github.com/docker/swarmkit/manager/controlapi"
+	"github.com/docker/swarmkit/manager/csi"
 	"github.com/docker/swarmkit/manager/dispatcher"
 	"github.com/docker/swarmkit/manager/drivers"
 	"github.com/docker/swarmkit/manager/health"
@@ -36,6 +37,7 @@ import (
 	"github.com/docker/swarmkit/manager/orchestrator/jobs"
 	"github.com/docker/swarmkit/manager/orchestrator/replicated"
 	"github.com/docker/swarmkit/manager/orchestrator/taskreaper"
+	"github.com/docker/swarmkit/manager/orchestrator/volumeenforcer"
 	"github.com/docker/swarmkit/manager/resourceapi"
 	"github.com/docker/swarmkit/manager/scheduler"
 	"github.com/docker/swarmkit/manager/state/raft"
@@ -150,8 +152,10 @@ type Manager struct {
 	jobsOrchestrator       *jobs.Orchestrator
 	taskReaper             *taskreaper.TaskReaper
 	constraintEnforcer     *constraintenforcer.ConstraintEnforcer
+	volumeEnforcer         *volumeenforcer.VolumeEnforcer
 	scheduler              *scheduler.Scheduler
 	allocator              *allocator.Allocator
+	volumeManager          *csi.Manager
 	keyManager             *keymanager.KeyManager
 	server                 *grpc.Server
 	localserver            *grpc.Server
@@ -200,13 +204,13 @@ func (l *closeOnceListener) Close() error {
 
 // New creates a Manager which has not started to accept requests yet.
 func New(config *Config) (*Manager, error) {
-	err := os.MkdirAll(config.StateDir, 0700)
+	err := os.MkdirAll(config.StateDir, 0o700)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to create state directory")
 	}
 
 	raftStateDir := filepath.Join(config.StateDir, "raft")
-	err = os.MkdirAll(raftStateDir, 0700)
+	err = os.MkdirAll(raftStateDir, 0o700)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to create raft state directory")
 	}
@@ -328,7 +332,7 @@ func (m *Manager) BindControl(addr string) error {
 
 	// don't create a socket directory if we're on windows. we used named pipe
 	if runtime.GOOS != "windows" {
-		err := os.MkdirAll(filepath.Dir(addr), 0700)
+		err := os.MkdirAll(filepath.Dir(addr), 0o700)
 		if err != nil {
 			return errors.Wrap(err, "failed to create socket directory")
 		}
@@ -692,6 +696,9 @@ func (m *Manager) Stop(ctx context.Context, clearData bool) {
 	if m.constraintEnforcer != nil {
 		m.constraintEnforcer.Stop()
 	}
+	if m.volumeEnforcer != nil {
+		m.volumeEnforcer.Stop()
+	}
 	if m.scheduler != nil {
 		m.scheduler.Stop()
 	}
@@ -998,12 +1005,14 @@ func (m *Manager) becomeLeader(ctx context.Context) {
 
 	m.replicatedOrchestrator = replicated.NewReplicatedOrchestrator(s)
 	m.constraintEnforcer = constraintenforcer.New(s)
+	m.volumeEnforcer = volumeenforcer.New(s)
 	m.globalOrchestrator = global.NewGlobalOrchestrator(s)
 	m.jobsOrchestrator = jobs.NewOrchestrator(s)
 	m.taskReaper = taskreaper.New(s)
 	m.scheduler = scheduler.New(s)
 	m.keyManager = keymanager.New(s, keymanager.DefaultConfig())
 	m.roleManager = newRoleManager(s, m.raftNode)
+	m.volumeManager = csi.NewManager(s, m.config.PluginGetter)
 
 	// TODO(stevvooe): Allocate a context that can be used to
 	// shutdown underlying manager processes when leadership isTestUpdaterRollback
@@ -1095,6 +1104,10 @@ func (m *Manager) becomeLeader(ctx context.Context) {
 		constraintEnforcer.Run()
 	}(m.constraintEnforcer)
 
+	go func(volumeEnforcer *volumeenforcer.VolumeEnforcer) {
+		volumeEnforcer.Run()
+	}(m.volumeEnforcer)
+
 	go func(taskReaper *taskreaper.TaskReaper) {
 		taskReaper.Run(ctx)
 	}(m.taskReaper)
@@ -1119,6 +1132,10 @@ func (m *Manager) becomeLeader(ctx context.Context) {
 	go func(roleManager *roleManager) {
 		roleManager.Run(ctx)
 	}(m.roleManager)
+
+	go func(volumeManager *csi.Manager) {
+		volumeManager.Run(ctx)
+	}(m.volumeManager)
 }
 
 // becomeFollower shuts down the subsystems that are only run by the leader.
@@ -1139,6 +1156,9 @@ func (m *Manager) becomeFollower() {
 	m.constraintEnforcer.Stop()
 	m.constraintEnforcer = nil
 
+	m.volumeEnforcer.Stop()
+	m.volumeEnforcer = nil
+
 	m.replicatedOrchestrator.Stop()
 	m.replicatedOrchestrator = nil
 
@@ -1158,6 +1178,9 @@ func (m *Manager) becomeFollower() {
 		m.keyManager.Stop()
 		m.keyManager = nil
 	}
+
+	m.volumeManager.Stop()
+	m.volumeManager = nil
 }
 
 // defaultClusterObject creates a default cluster.

+ 114 - 0
vendor/github.com/docker/swarmkit/manager/orchestrator/volumeenforcer/volume_enforcer.go

@@ -0,0 +1,114 @@
+package volumeenforcer
+
+import (
+	"github.com/docker/swarmkit/api"
+	"github.com/docker/swarmkit/log"
+	"github.com/docker/swarmkit/manager/state/store"
+)
+
+// VolumeEnforcer is a component, styled off of the ConstraintEnforcer, that
+// watches for updates to Volumes, and shuts down tasks if those Volumes are
+// being drained.
+type VolumeEnforcer struct {
+	store    *store.MemoryStore
+	stopChan chan struct{}
+	doneChan chan struct{}
+}
+
+func New(s *store.MemoryStore) *VolumeEnforcer {
+	return &VolumeEnforcer{
+		store:    s,
+		stopChan: make(chan struct{}),
+		doneChan: make(chan struct{}),
+	}
+}
+
+func (ve *VolumeEnforcer) Run() {
+	defer close(ve.doneChan)
+
+	var volumes []*api.Volume
+	watcher, cancelWatch, _ := store.ViewAndWatch(ve.store, func(tx store.ReadTx) error {
+		var err error
+		volumes, err = store.FindVolumes(tx, store.All)
+		return err
+	}, api.EventUpdateVolume{})
+	defer cancelWatch()
+
+	for _, volume := range volumes {
+		ve.rejectNoncompliantTasks(volume)
+	}
+
+	for {
+		select {
+		case event := <-watcher:
+			v := event.(api.EventUpdateVolume).Volume
+			ve.rejectNoncompliantTasks(v)
+		case <-ve.stopChan:
+			return
+		}
+	}
+
+}
+
+func (ve *VolumeEnforcer) Stop() {
+	close(ve.stopChan)
+	<-ve.doneChan
+}
+
+func (ve *VolumeEnforcer) rejectNoncompliantTasks(v *api.Volume) {
+	if v.Spec.Availability != api.VolumeAvailabilityDrain {
+		return
+	}
+
+	var volumeTasks []*api.Task
+
+	ve.store.View(func(tx store.ReadTx) {
+		// ignore the error, it only happens if you pass an invalid find by
+		volumeTasks, _ = store.FindTasks(tx, store.ByVolumeAttachment(v.ID))
+	})
+	if len(volumeTasks) != 0 {
+		err := ve.store.Batch(func(batch *store.Batch) error {
+			for _, t := range volumeTasks {
+				// skip any tasks we know are already shut down or shutting
+				// down. Do this before we open the transaction. This saves us
+				// copying volumeTasks while still avoiding unnecessary
+				// transactions. we will still need to check again once we
+				// start the transaction against the latest version of the
+				// task.
+				if t.DesiredState > api.TaskStateCompleted || t.Status.State >= api.TaskStateCompleted {
+					continue
+				}
+
+				err := batch.Update(func(tx store.Tx) error {
+					t = store.GetTask(tx, t.ID)
+					// another check for task liveness.
+					if t == nil || t.DesiredState > api.TaskStateCompleted || t.Status.State >= api.TaskStateCompleted {
+						return nil
+					}
+
+					// as documented in the ConstraintEnforcer:
+					//
+					// We set the observed state to
+					// REJECTED, rather than the desired
+					// state. Desired state is owned by the
+					// orchestrator, and setting it directly
+					// will bypass actions such as
+					// restarting the task on another node
+					// (if applicable).
+					t.Status.State = api.TaskStateRejected
+					t.Status.Message = "task rejected by volume enforcer"
+					t.Status.Err = "attached to volume which is being drained"
+					return store.UpdateTask(tx, t)
+				})
+				if err != nil {
+					log.L.WithField("module", "volumeenforcer").WithError(err).Errorf("failed to shut down task %s", t.ID)
+				}
+			}
+			return nil
+		})
+
+		if err != nil {
+			log.L.WithField("module", "volumeenforcer").WithError(err).Errorf("failed to shut down tasks for volume %s", v.ID)
+		}
+	}
+}

+ 61 - 0
vendor/github.com/docker/swarmkit/manager/scheduler/filter.go

@@ -384,3 +384,64 @@ func (f *MaxReplicasFilter) Check(n *NodeInfo) bool {
 func (f *MaxReplicasFilter) Explain(nodes int) string {
 	return "max replicas per node limit exceed"
 }
+
+type VolumesFilter struct {
+	vs *volumeSet
+	t  *api.Task
+
+	// requestedVolumes is a set of volumes requested by the task. This can
+	// include either volume names or volume groups. Volume groups, as in the
+	// Mount.Source field, are prefixed with "group:"
+	requestedVolumes []*api.Mount
+}
+
+func (f *VolumesFilter) SetTask(t *api.Task) bool {
+	// if there is no volume Manager, skip this filter always
+	if f.vs == nil {
+		return false
+	}
+	f.t = t
+	// reset requestedVolumes every time we set a task, so we don't
+	// accidentally append to the last task's set of requested volumes.
+	f.requestedVolumes = []*api.Mount{}
+
+	// t should never be nil, but we should ensure that it is not just in case
+	// we make mistakes in the future.
+	if t == nil {
+		return false
+	}
+
+	c := t.Spec.GetContainer()
+	if c == nil {
+		return false
+	}
+
+	// hasCSI will be set true if one of the mounts is a CSI-type mount.
+	hasCSI := false
+	for _, mount := range c.Mounts {
+		if mount.Type == api.MountTypeCSI {
+			hasCSI = true
+			f.requestedVolumes = append(f.requestedVolumes, &mount)
+		}
+	}
+	return hasCSI
+}
+
+func (f *VolumesFilter) Check(nodeInfo *NodeInfo) bool {
+	for _, mount := range f.requestedVolumes {
+		if f.vs.isVolumeAvailableOnNode(mount, nodeInfo) != "" {
+			return true
+		}
+	}
+
+	return false
+}
+
+func (f *VolumesFilter) Explain(nodes int) string {
+	if nodes == 1 {
+		return "cannot fulfill requested CSI volume mounts on 1 node"
+	}
+	return fmt.Sprintf(
+		"cannot fulfill requested CSI volume mounts on %d nodes", nodes,
+	)
+}

+ 4 - 0
vendor/github.com/docker/swarmkit/manager/scheduler/pipeline.go

@@ -67,6 +67,10 @@ func (p *Pipeline) Process(n *NodeInfo) bool {
 	return true
 }
 
+func (p *Pipeline) AddFilter(f Filter) {
+	p.checklist = append(p.checklist, checklistEntry{f: f})
+}
+
 // SetTask sets up the filters to process a new task. Once this is called,
 // Process can be called repeatedly to try to assign the task various nodes.
 func (p *Pipeline) SetTask(t *api.Task) {

+ 210 - 3
vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go

@@ -2,6 +2,7 @@ package scheduler
 
 import (
 	"context"
+	"sync"
 	"time"
 
 	"github.com/docker/swarmkit/api"
@@ -39,7 +40,10 @@ type Scheduler struct {
 	nodeSet          nodeSet
 	allTasks         map[string]*api.Task
 	pipeline         *Pipeline
+	volumes          *volumeSet
 
+	// stopOnce is a sync.Once used to ensure that Stop is idempotent
+	stopOnce sync.Once
 	// stopChan signals to the state machine to stop running
 	stopChan chan struct{}
 	// doneChan is closed when the state machine terminates
@@ -57,10 +61,25 @@ func New(store *store.MemoryStore) *Scheduler {
 		stopChan:                make(chan struct{}),
 		doneChan:                make(chan struct{}),
 		pipeline:                NewPipeline(),
+		volumes:                 newVolumeSet(),
 	}
 }
 
 func (s *Scheduler) setupTasksList(tx store.ReadTx) error {
+	// add all volumes that are ready to the volumeSet
+	volumes, err := store.FindVolumes(tx, store.All)
+	if err != nil {
+		return err
+	}
+
+	for _, volume := range volumes {
+		// only add volumes that have been created, meaning they have a
+		// VolumeID.
+		if volume.VolumeInfo != nil && volume.VolumeInfo.VolumeID != "" {
+			s.volumes.addOrUpdateVolume(volume)
+		}
+	}
+
 	tasks, err := store.FindTasks(tx, store.All)
 	if err != nil {
 		return err
@@ -93,6 +112,9 @@ func (s *Scheduler) setupTasksList(tx store.ReadTx) error {
 			continue
 		}
 
+		// track the volumes in use by the task
+		s.volumes.reserveTaskVolumes(t)
+
 		if tasksByNode[t.NodeID] == nil {
 			tasksByNode[t.NodeID] = make(map[string]*api.Task)
 		}
@@ -103,9 +125,12 @@ func (s *Scheduler) setupTasksList(tx store.ReadTx) error {
 }
 
 // Run is the scheduler event loop.
-func (s *Scheduler) Run(ctx context.Context) error {
+func (s *Scheduler) Run(pctx context.Context) error {
+	ctx := log.WithModule(pctx, "scheduler")
 	defer close(s.doneChan)
 
+	s.pipeline.AddFilter(&VolumesFilter{vs: s.volumes})
+
 	updates, cancel, err := store.ViewAndWatch(s.store, s.setupTasksList)
 	if err != nil {
 		log.G(ctx).WithError(err).Errorf("snapshot store update failed")
@@ -172,6 +197,20 @@ func (s *Scheduler) Run(ctx context.Context) error {
 				tickRequired = true
 			case api.EventDeleteNode:
 				s.nodeSet.remove(v.Node.ID)
+			case api.EventUpdateVolume:
+				// there is no need for a EventCreateVolume case, because
+				// volumes are not ready to use until they've passed through
+				// the volume manager and been created with the plugin
+				//
+				// as such, only addOrUpdateVolume if the VolumeInfo exists and
+				// has a nonempty VolumeID
+				if v.Volume.VolumeInfo != nil && v.Volume.VolumeInfo.VolumeID != "" {
+					// TODO(dperny): verify that updating volumes doesn't break
+					// scheduling
+					log.G(ctx).WithField("volume.id", v.Volume.ID).Debug("updated volume")
+					s.volumes.addOrUpdateVolume(v.Volume)
+					tickRequired = true
+				}
 			case state.EventCommit:
 				if commitDebounceTimer != nil {
 					if time.Since(debouncingStarted) > maxLatency {
@@ -200,7 +239,10 @@ func (s *Scheduler) Run(ctx context.Context) error {
 
 // Stop causes the scheduler event loop to stop running.
 func (s *Scheduler) Stop() {
-	close(s.stopChan)
+	// ensure stop is called only once. this helps in some test cases.
+	s.stopOnce.Do(func() {
+		close(s.stopChan)
+	})
 	<-s.doneChan
 }
 
@@ -309,6 +351,12 @@ func (s *Scheduler) deleteTask(t *api.Task) bool {
 	delete(s.allTasks, t.ID)
 	delete(s.preassignedTasks, t.ID)
 	delete(s.pendingPreassignedTasks, t.ID)
+
+	// remove the task volume reservations as well, if any
+	for _, attachment := range t.Volumes {
+		s.volumes.releaseVolume(attachment.ID, t.ID)
+	}
+
 	nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID)
 	if err == nil && nodeInfo.removeTask(t) {
 		s.nodeSet.updateNode(nodeInfo)
@@ -370,6 +418,10 @@ func (s *Scheduler) processPreassignedTasks(ctx context.Context) {
 		if err == nil && nodeInfo.removeTask(decision.new) {
 			s.nodeSet.updateNode(nodeInfo)
 		}
+
+		for _, va := range decision.new.Volumes {
+			s.volumes.releaseVolume(va.ID, decision.new.ID)
+		}
 	}
 }
 
@@ -425,6 +477,11 @@ func (s *Scheduler) tick(ctx context.Context) {
 			s.nodeSet.updateNode(nodeInfo)
 		}
 
+		// release the volumes we tried to use
+		for _, va := range decision.new.Volumes {
+			s.volumes.releaseVolume(va.ID, decision.new.ID)
+		}
+
 		// enqueue task for next scheduling attempt
 		s.enqueue(decision.old)
 	}
@@ -443,6 +500,7 @@ func (s *Scheduler) applySchedulingDecisions(ctx context.Context, schedulingDeci
 			err := batch.Update(func(tx store.Tx) error {
 				// Update exactly one task inside this Update
 				// callback.
+			taskLoop:
 				for taskID, decision := range schedulingDecisions {
 					delete(schedulingDecisions, taskID)
 
@@ -474,11 +532,82 @@ func (s *Scheduler) applySchedulingDecisions(ctx context.Context, schedulingDeci
 						}
 					}
 
+					volumes := []*api.Volume{}
+					for _, va := range decision.new.Volumes {
+						v := store.GetVolume(tx, va.ID)
+						if v == nil {
+							log.G(ctx).Debugf(
+								"scheduler failed to update task %s because volume %s could not be found",
+								taskID,
+								va.ID,
+							)
+							failed = append(failed, decision)
+							continue taskLoop
+						}
+
+						// it's ok if the copy of the Volume we scheduled off
+						// of is out of date, because the Scheduler is the only
+						// component which add new uses of a particular Volume,
+						// which means that in most cases, no update to the
+						// volume could conflict with the copy the Scheduler
+						// used to make decisions.
+						//
+						// the exception is that the VolumeAvailability could
+						// have been changed. both Pause and Drain
+						// availabilities mean the Volume should not be
+						// scheduled, and so we call off our attempt to commit
+						// this scheduling decision. this is the only field we
+						// must check for conflicts.
+						//
+						// this is, additionally, the reason that a Volume must
+						// be set to Drain before it can be deleted. it stops
+						// us from having to worry about any other field when
+						// attempting to use the Volume.
+						if v.Spec.Availability != api.VolumeAvailabilityActive {
+							log.G(ctx).Debugf(
+								"scheduler failed to update task %s because volume %s has availability %s",
+								taskID, v.ID, v.Spec.Availability.String(),
+							)
+							failed = append(failed, decision)
+							continue taskLoop
+						}
+
+						alreadyPublished := false
+						for _, ps := range v.PublishStatus {
+							if ps.NodeID == decision.new.NodeID {
+								alreadyPublished = true
+								break
+							}
+						}
+						if !alreadyPublished {
+							v.PublishStatus = append(
+								v.PublishStatus,
+								&api.VolumePublishStatus{
+									NodeID: decision.new.NodeID,
+									State:  api.VolumePublishStatus_PENDING_PUBLISH,
+								},
+							)
+							volumes = append(volumes, v)
+						}
+					}
+
 					if err := store.UpdateTask(tx, decision.new); err != nil {
 						log.G(ctx).Debugf("scheduler failed to update task %s; will retry", taskID)
 						failed = append(failed, decision)
 						continue
 					}
+					for _, v := range volumes {
+						if err := store.UpdateVolume(tx, v); err != nil {
+							// TODO(dperny): handle the case of a partial
+							// update?
+							log.G(ctx).WithError(err).Debugf(
+								"scheduler failed to update task %v; volume %v could not be updated",
+								taskID, v.ID,
+							)
+							failed = append(failed, decision)
+							continue taskLoop
+						}
+					}
 					successful = append(successful, decision)
 					return nil
 				}
@@ -488,7 +617,11 @@ func (s *Scheduler) applySchedulingDecisions(ctx context.Context, schedulingDeci
 				return err
 			}
 		}
-		return nil
+		// finally, every time we make new scheduling decisions, take the
+		// opportunity to release volumes.
+		return batch.Update(func(tx store.Tx) error {
+			return s.volumes.freeVolumes(tx)
+		})
 	})
 
 	if err != nil {
@@ -516,6 +649,23 @@ func (s *Scheduler) taskFitNode(ctx context.Context, t *api.Task, nodeID string)
 
 		return &newT
 	}
+
+	// before doing all of the updating logic, get the volume attachments
+	// for the task on this node. this should always succeed, because we
+	// should already have filtered nodes based on volume availability, but
+	// just in case we missed something and it doesn't, we have an error
+	// case.
+	attachments, err := s.volumes.chooseTaskVolumes(t, &nodeInfo)
+	if err != nil {
+		newT.Status.Timestamp = ptypes.MustTimestampProto(time.Now())
+		newT.Status.Err = err.Error()
+		s.allTasks[t.ID] = &newT
+
+		return &newT
+	}
+
+	newT.Volumes = attachments
+
 	newT.Status = api.TaskStatus{
 		State:     api.TaskStateAssigned,
 		Timestamp: ptypes.MustTimestampProto(time.Now()),
@@ -587,6 +737,28 @@ func (s *Scheduler) scheduleTaskGroup(ctx context.Context, taskGroup map[string]
 	}
 }
 
+// scheduleNTasksOnSubtree schedules a set of tasks with identical constraints
+// onto a set of nodes, taking into account placement preferences.
+//
+// placement preferences are used to create a tree such that every branch
+// represents one subset of nodes across which tasks should be spread.
+//
+// because of this tree structure, scheduleNTasksOnSubtree is a recursive
+// function. If there are subtrees of the current tree, then we recurse. if we
+// are at a leaf node, past which there are no subtrees, then we try to
+// schedule a proportional number of tasks to the nodes of that branch.
+//
+// - n is the number of tasks being scheduled on this subtree
+// - taskGroup is a set of tasks to schedule, taking the form of a map from the
+//   task ID to the task object.
+// - tree is the decision tree we're scheduling on. this is, effectively, the
+//   set of nodes that meet scheduling constraints. these nodes are arranged
+//   into a tree so that placement preferences can be taken into account when
+//   spreading tasks across nodes.
+// - schedulingDecisions is a set of the scheduling decisions already made for
+//   this tree
+// - nodeLess is a comparator that chooses which of the two nodes is preferable
+//   to schedule on.
 func (s *Scheduler) scheduleNTasksOnSubtree(ctx context.Context, n int, taskGroup map[string]*api.Task, tree *decisionTree, schedulingDecisions map[string]schedulingDecision, nodeLess func(a *NodeInfo, b *NodeInfo) bool) int {
 	if tree.next == nil {
 		nodes := tree.orderedNodes(s.pipeline.Process, nodeLess)
@@ -639,6 +811,23 @@ func (s *Scheduler) scheduleNTasksOnSubtree(ctx context.Context, n int, taskGrou
 	return tasksScheduled
 }
 
+// scheduleNTasksOnNodes schedules some number of tasks on the set of provided
+// nodes. The number of tasks being scheduled may be less than the total number
+// of tasks, as the Nodes may be one branch of a tree used to spread tasks.
+//
+// returns the number of tasks actually scheduled to these nodes. this may be
+// fewer than the number of tasks desired to be scheduled, if there are
+// insufficient nodes to meet resource constraints.
+//
+// - n is the number of tasks desired to be scheduled to this set of nodes
+// - taskGroup is the tasks desired to be scheduled, in the form of a map from
+//   task ID to task object. this argument is mutated; tasks which have been
+//   scheduled are removed from the map.
+// - nodes is the set of nodes to schedule to
+// - schedulingDecisions is the set of scheduling decisions that have been made
+//   thus far, in the form of a map from task ID to the decision made.
+// - nodeLess is a simple comparator that chooses which of two nodes would be
+//   preferable to schedule on.
 func (s *Scheduler) scheduleNTasksOnNodes(ctx context.Context, n int, taskGroup map[string]*api.Task, nodes []NodeInfo, schedulingDecisions map[string]schedulingDecision, nodeLess func(a *NodeInfo, b *NodeInfo) bool) int {
 	tasksScheduled := 0
 	failedConstraints := make(map[int]bool) // key is index in nodes slice
@@ -652,10 +841,24 @@ func (s *Scheduler) scheduleNTasksOnNodes(ctx context.Context, n int, taskGroup
 		}
 
 		node := &nodes[nodeIter%nodeCount]
+		// before doing all of the updating logic, get the volume attachments
+		// for the task on this node. this should always succeed, because we
+		// should already have filtered nodes based on volume availability, but
+		// just in case we missed something and it doesn't, we have an error
+		// case.
+		attachments, err := s.volumes.chooseTaskVolumes(t, node)
+		if err != nil {
+			// TODO(dperny) if there's an error, then what? i'm frankly not
+			// sure.
+			log.G(ctx).WithField("task.id", t.ID).WithError(err).Error("could not find task volumes")
+		}
 
 		log.G(ctx).WithField("task.id", t.ID).Debugf("assigning to node %s", node.ID)
+		// she turned me into a newT!
 		newT := *t
+		newT.Volumes = attachments
 		newT.NodeID = node.ID
+		s.volumes.reserveTaskVolumes(&newT)
 		newT.Status = api.TaskStatus{
 			State:     api.TaskStateAssigned,
 			Timestamp: ptypes.MustTimestampProto(time.Now()),
@@ -663,6 +866,10 @@ func (s *Scheduler) scheduleNTasksOnNodes(ctx context.Context, n int, taskGroup
 		}
 		s.allTasks[t.ID] = &newT
 
+		// in each iteration of this loop, the node we choose will always be
+		// one which meets constraints. at the end of each iteration, we
+		// re-process nodes, allowing us to remove nodes which no longer meet
+		// resource constraints.
 		nodeInfo, err := s.nodeSet.nodeInfo(node.ID)
 		if err == nil && nodeInfo.addTask(&newT) {
 			s.nodeSet.updateNode(nodeInfo)

+ 47 - 0
vendor/github.com/docker/swarmkit/manager/scheduler/topology.go

@@ -0,0 +1,47 @@
+package scheduler
+
+import (
+	"github.com/docker/swarmkit/api"
+)
+
+// IsInTopology takes a Topology `top` (which is reported by a Node) and a list
+// of Topologies `accessible` (which comes from a created volume, in the form
+// of the AccessibleTopology) and returns true if `top` lies within
+// `accessible` (meaning a node with that Topology can access a volume with
+// that AccessibleTopology).
+//
+// In order for `top` to lie within `accessible`, there must exist a topology
+// in `accessible` such that for every subdomain/segment pair in that topology,
+// there exists an equivalent subdomain/segment pair in `top`.
+//
+// For examples, see the test for this function.
+//
+// NOTE(dperny): It is unclear whether a topology can be partial. For example,
+// can an accessible topology contain only a "region" subdomain, without a
+// "zone" subdomain? This function assumes yes.
+func IsInTopology(top *api.Topology, accessible []*api.Topology) bool {
+	// if any part of the topology equation is missing, then this does fit.
+	if top == nil || accessible == nil || len(accessible) == 0 {
+		return true
+	}
+	// go through each accessible topology
+topologies:
+	for _, topology := range accessible {
+		// and for each topology, go through every segment
+		for subdomain, segment := range topology.Segments {
+			// if the segment for this subdomain is different in the `top`,
+			// then, `top` does not lie within this topology.
+			if top.Segments[subdomain] != segment {
+				// go to the next topology in the list
+				continue topologies
+			}
+		}
+		// if we get through all of the segments specified in this topology,
+		// and they have all matched, then `top` lies within `accessible`.
+		return true
+	}
+	// if we have iterated through all topologies, and never once finished
+	// iterating through all topological segments, then `top` does not lie
+	// within `accessible`.
+	return false
+}

+ 319 - 0
vendor/github.com/docker/swarmkit/manager/scheduler/volumes.go

@@ -0,0 +1,319 @@
+package scheduler
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/docker/swarmkit/api"
+	"github.com/docker/swarmkit/manager/state/store"
+)
+
+// the scheduler package does double duty -- in addition to choosing nodes, it
+// must also choose volumes. this is because volumes are fungible, and can be
+// scheduled to several nodes, and used by several tasks. we should endeavor to
+// spread tasks across volumes, like we spread nodes. on the positive side,
+// unlike nodes, volumes are not heirarchical. that is, we don't need to
+// spread across multiple levels of a tree, only a flat set.
+
+// volumeSet is the set of all volumes currently managed
+type volumeSet struct {
+	// volumes is a mapping of volume IDs to volumeInfo
+	volumes map[string]volumeInfo
+	// byGroup is a mapping from a volume group name to a set of volumes in
+	// that group
+	byGroup map[string]map[string]struct{}
+	// byName is a mapping of volume names to swarmkit volume IDs.
+	byName map[string]string
+}
+
+// volumeUsage contains information about the usage of a Volume by a specific
+// task.
+type volumeUsage struct {
+	nodeID   string
+	readOnly bool
+}
+
+// volumeInfo contains scheduler information about a given volume
+type volumeInfo struct {
+	volume *api.Volume
+	tasks  map[string]volumeUsage
+	// nodes is a set of nodes a volume is in use on. it maps a node ID to a
+	// reference count for how many tasks are using the volume on that node.
+	nodes map[string]int
+}
+
+func newVolumeSet() *volumeSet {
+	return &volumeSet{
+		volumes: map[string]volumeInfo{},
+		byGroup: map[string]map[string]struct{}{},
+		byName:  map[string]string{},
+	}
+}
+
+func (vs *volumeSet) getVolume(id string) *api.Volume {
+	// getVolume returns the volume object for the given ID as stored in the
+	// volumeSet, or nil if none exists
+	return vs.volumes[id].volume
+}
+
+func (vs *volumeSet) addOrUpdateVolume(v *api.Volume) {
+	if info, ok := vs.volumes[v.ID]; !ok {
+		vs.volumes[v.ID] = volumeInfo{
+			volume: v,
+			nodes:  map[string]int{},
+			tasks:  map[string]volumeUsage{},
+		}
+	} else {
+		// if the volume already exists in the set, then only update the volume
+		// object, not the tasks map.
+		info.volume = v
+	}
+
+	if set, ok := vs.byGroup[v.Spec.Group]; ok {
+		set[v.ID] = struct{}{}
+	} else {
+		vs.byGroup[v.Spec.Group] = map[string]struct{}{v.ID: {}}
+	}
+	vs.byName[v.Spec.Annotations.Name] = v.ID
+}
+
+func (vs *volumeSet) removeVolume(volumeID string) {
+	if info, ok := vs.volumes[volumeID]; ok {
+		// if the volume exists in the set, look up its group ID and remove it
+		// from the byGroup mapping as well
+		group := info.volume.Spec.Group
+		delete(vs.byGroup[group], volumeID)
+		delete(vs.volumes, volumeID)
+		delete(vs.byName, info.volume.Spec.Annotations.Name)
+	}
+}
+
+// chooseTaskVolumes selects a set of VolumeAttachments for the task on the
+// given node. it expects that the node was already validated to have the
+// necessary volumes, but it will return an error if a full set of volumes is
+// not available.
+func (vs *volumeSet) chooseTaskVolumes(task *api.Task, nodeInfo *NodeInfo) ([]*api.VolumeAttachment, error) {
+	volumes := []*api.VolumeAttachment{}
+
+	// we'll reserve volumes in this loop, but release all of our reservations
+	// before we finish. the caller will need to call reserveTaskVolumes after
+	// calling this function
+	// TODO(dperny): this is probably not optimal
+	defer func() {
+		for _, volume := range volumes {
+			vs.releaseVolume(volume.ID, task.ID)
+		}
+	}()
+
+	// TODO(dperny): handle non-container tasks
+	c := task.Spec.GetContainer()
+	if c == nil {
+		return nil, nil
+	}
+	for _, mount := range task.Spec.GetContainer().Mounts {
+		if mount.Type == api.MountTypeCSI {
+			candidate := vs.isVolumeAvailableOnNode(&mount, nodeInfo)
+			if candidate == "" {
+				// TODO(dperny): return structured error types, instead of
+				// error strings
+				return nil, fmt.Errorf("cannot find volume to satisfy mount with source %v", mount.Source)
+			}
+			vs.reserveVolume(candidate, task.ID, nodeInfo.Node.ID, mount.ReadOnly)
+			volumes = append(volumes, &api.VolumeAttachment{
+				ID:     candidate,
+				Source: mount.Source,
+				Target: mount.Target,
+			})
+		}
+	}
+
+	return volumes, nil
+}
+
+// reserveTaskVolumes identifies all volumes currently in use on a task and
+// marks them in the volumeSet as in use.
+func (vs *volumeSet) reserveTaskVolumes(task *api.Task) {
+	for _, va := range task.Volumes {
+		// we shouldn't need to handle non-container tasks because those tasks
+		// won't have any entries in task.Volumes.
+		for _, mount := range task.Spec.GetContainer().Mounts {
+			if mount.Source == va.Source && mount.Target == va.Target {
+				vs.reserveVolume(va.ID, task.ID, task.NodeID, mount.ReadOnly)
+			}
+		}
+	}
+}
+
+func (vs *volumeSet) reserveVolume(volumeID, taskID, nodeID string, readOnly bool) {
+	info, ok := vs.volumes[volumeID]
+	if !ok {
+		// TODO(dperny): don't just return nothing.
+		return
+	}
+
+	info.tasks[taskID] = volumeUsage{nodeID: nodeID, readOnly: readOnly}
+	// increment the reference count for this node.
+	info.nodes[nodeID] = info.nodes[nodeID] + 1
+}
+
+func (vs *volumeSet) releaseVolume(volumeID, taskID string) {
+	info, ok := vs.volumes[volumeID]
+	if !ok {
+		// if the volume isn't in the set, no action to take.
+		return
+	}
+
+	// decrement the reference count for this task's node
+	usage, ok := info.tasks[taskID]
+	if ok {
+		// this is probably an unnecessarily high level of caution, but make
+		// sure we don't go below zero on node count.
+		if c := info.nodes[usage.nodeID]; c > 0 {
+			info.nodes[usage.nodeID] = c - 1
+		}
+		delete(info.tasks, taskID)
+	}
+}
+
+// freeVolumes finds volumes that are no longer in use on some nodes, and
+// updates them to be unpublished from those nodes.
+//
+// TODO(dperny): this is messy and has a lot of overhead. it should be reworked
+// to something more streamlined.
+func (vs *volumeSet) freeVolumes(tx store.Tx) error {
+	for volumeID, info := range vs.volumes {
+		v := store.GetVolume(tx, volumeID)
+		if v == nil {
+			continue
+		}
+
+		changed := false
+		for _, status := range v.PublishStatus {
+			if info.nodes[status.NodeID] == 0 && status.State == api.VolumePublishStatus_PUBLISHED {
+				status.State = api.VolumePublishStatus_PENDING_NODE_UNPUBLISH
+				changed = true
+			}
+		}
+		if changed {
+			if err := store.UpdateVolume(tx, v); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// isVolumeAvailableOnNode checks if a volume satisfying the given mount is
+// available on the given node.
+//
+// Returns the ID of the volume available, or an empty string if no such volume
+// is found.
+func (vs *volumeSet) isVolumeAvailableOnNode(mount *api.Mount, node *NodeInfo) string {
+	source := mount.Source
+	// first, discern whether we're looking for a group or a volume
+	// try trimming off the "group:" prefix. if the resulting string is
+	// different from the input string (meaning something has been trimmed),
+	// then this volume is actually a volume group.
+	if group := strings.TrimPrefix(source, "group:"); group != source {
+		ids, ok := vs.byGroup[group]
+		// if there are no volumes of this group specified, then no volume
+		// meets the moutn criteria.
+		if !ok {
+			return ""
+		}
+
+		// iterate through all ids in the group, checking if any one meets the
+		// spec.
+		for id := range ids {
+			if vs.checkVolume(id, node, mount.ReadOnly) {
+				return id
+			}
+		}
+		return ""
+	}
+
+	// if it's not a group, it's a name. resolve the volume name to its ID
+	id, ok := vs.byName[source]
+	if !ok || !vs.checkVolume(id, node, mount.ReadOnly) {
+		return ""
+	}
+	return id
+}
+
+// checkVolume checks if an individual volume with the given ID can be placed
+// on the given node.
+func (vs *volumeSet) checkVolume(id string, info *NodeInfo, readOnly bool) bool {
+	vi := vs.volumes[id]
+	// first, check if the volume's availability is even Active. If not. no
+	// reason to bother with anything further.
+	if vi.volume != nil && vi.volume.Spec.Availability != api.VolumeAvailabilityActive {
+		return false
+	}
+
+	// get the node topology for this volume
+	var top *api.Topology
+	// get the topology for this volume's driver on this node
+	for _, info := range info.Description.CSIInfo {
+		if info.PluginName == vi.volume.Spec.Driver.Name {
+			top = info.AccessibleTopology
+			break
+		}
+	}
+
+	// check if the volume is available on this node. a volume's
+	// availability on a node depends on its accessible topology, how it's
+	// already being used, and how this task intends to use it.
+
+	if vi.volume.Spec.AccessMode.Scope == api.VolumeScopeSingleNode {
+		// if the volume is not in use on this node already, then it can't
+		// be used here.
+		for _, usage := range vi.tasks {
+			if usage.nodeID != info.ID {
+				return false
+			}
+		}
+	}
+
+	// even if the volume is currently on this node, or it has multi-node
+	// access, the volume sharing needs to be compatible.
+	switch vi.volume.Spec.AccessMode.Sharing {
+	case api.VolumeSharingNone:
+		// if the volume sharing is none, then the volume cannot be
+		// used by another task
+		if len(vi.tasks) > 0 {
+			return false
+		}
+	case api.VolumeSharingOneWriter:
+		// if the mount is not ReadOnly, and the volume has a writer, then
+		// we this volume does not work.
+		if !readOnly && hasWriter(vi) {
+			return false
+		}
+	case api.VolumeSharingReadOnly:
+		// if the volume sharing is read-only, then the Mount must also
+		// be read-only
+		if !readOnly {
+			return false
+		}
+	}
+
+	// then, do the quick check of whether this volume is in the topology.  if
+	// the volume has an AccessibleTopology, and it does not lie within the
+	// node's topology, then this volume won't fit.
+	if !IsInTopology(top, vi.volume.VolumeInfo.AccessibleTopology) {
+		return false
+	}
+
+	return true
+}
+
+// hasWriter is a helper function that returns true if at least one task is
+// using this volume not in read-only mode.
+func hasWriter(info volumeInfo) bool {
+	for _, usage := range info.tasks {
+		if !usage.readOnly {
+			return true
+		}
+	}
+	return false
+}

+ 1 - 1
vendor/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go

@@ -4,10 +4,10 @@ import (
 	"errors"
 	"sync"
 
-	"github.com/coreos/etcd/raft/raftpb"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/watch"
 	"github.com/gogo/protobuf/proto"
+	"go.etcd.io/etcd/raft/v3/raftpb"
 )
 
 var (

+ 11 - 5
vendor/github.com/docker/swarmkit/manager/state/raft/raft.go

@@ -14,9 +14,6 @@ import (
 	"time"
 
 	"code.cloudfoundry.org/clock"
-	"github.com/coreos/etcd/pkg/idutil"
-	"github.com/coreos/etcd/raft"
-	"github.com/coreos/etcd/raft/raftpb"
 	"github.com/docker/go-events"
 	"github.com/docker/go-metrics"
 	"github.com/docker/swarmkit/api"
@@ -32,6 +29,9 @@ import (
 	"github.com/gogo/protobuf/proto"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
+	"go.etcd.io/etcd/pkg/v3/idutil"
+	"go.etcd.io/etcd/raft/v3"
+	"go.etcd.io/etcd/raft/v3/raftpb"
 	"golang.org/x/time/rate"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
@@ -447,7 +447,7 @@ func (n *Node) JoinAndStart(ctx context.Context) (err error) {
 	}
 
 	n.initTransport()
-	n.raftNode = raft.StartNode(n.Config, nil)
+	n.raftNode = raft.RestartNode(n.Config)
 
 	return nil
 }
@@ -607,6 +607,12 @@ func (n *Node) Run(ctx context.Context) error {
 			}
 
 			for _, msg := range rd.Messages {
+				// if the message is a snapshot, before we send it, we should
+				// overwrite the original ConfState from the snapshot with the
+				// current one
+				if msg.Type == raftpb.MsgSnap {
+					msg.Snapshot.Metadata.ConfState = n.confState
+				}
 				// Send raft messages to peers
 				if err := n.transport.Send(msg); err != nil {
 					log.G(ctx).WithError(err).Error("failed to send message to member")
@@ -2096,7 +2102,7 @@ func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raf
 func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
 	ids := make(map[uint64]struct{})
 	if snap != nil {
-		for _, id := range snap.Metadata.ConfState.Nodes {
+		for _, id := range snap.Metadata.ConfState.Voters {
 			ids[id] = struct{}{}
 		}
 	}

+ 2 - 2
vendor/github.com/docker/swarmkit/manager/state/raft/storage.go

@@ -4,8 +4,6 @@ import (
 	"context"
 	"fmt"
 
-	"github.com/coreos/etcd/raft"
-	"github.com/coreos/etcd/raft/raftpb"
 	"github.com/docker/go-metrics"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/log"
@@ -14,6 +12,8 @@ import (
 	"github.com/docker/swarmkit/manager/state/raft/storage"
 	"github.com/docker/swarmkit/manager/state/store"
 	"github.com/pkg/errors"
+	"go.etcd.io/etcd/raft/v3"
+	"go.etcd.io/etcd/raft/v3/raftpb"
 )
 
 var (

+ 9 - 10
vendor/github.com/docker/swarmkit/manager/state/raft/storage/snapwrap.go

@@ -1,24 +1,23 @@
 package storage
 
 import (
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"sort"
 	"strings"
 
-	"github.com/coreos/etcd/pkg/fileutil"
-	"github.com/coreos/etcd/raft/raftpb"
-	"github.com/coreos/etcd/snap"
 	"github.com/docker/swarmkit/manager/encryption"
 	"github.com/pkg/errors"
+	"go.etcd.io/etcd/client/pkg/v3/fileutil"
+	"go.etcd.io/etcd/raft/v3/raftpb"
+	"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
 )
 
-// This package wraps the github.com/coreos/etcd/snap package, and encrypts
+// This package wraps the go.etcd.io/etcd/server/v3/api/snap package, and encrypts
 // the bytes of whatever snapshot is passed to it, and decrypts the bytes of
 // whatever snapshot it reads.
 
-// Snapshotter is the interface presented by github.com/coreos/etcd/snap.Snapshotter that we depend upon
+// Snapshotter is the interface presented by go.etcd.io/etcd/server/v3/api/snap.Snapshotter that we depend upon
 type Snapshotter interface {
 	SaveSnap(snapshot raftpb.Snapshot) error
 	Load() (*raftpb.Snapshot, error)
@@ -34,7 +33,7 @@ var _ Snapshotter = &wrappedSnap{}
 var _ Snapshotter = &snap.Snapshotter{}
 var _ SnapFactory = snapCryptor{}
 
-// wrappedSnap wraps a github.com/coreos/etcd/snap.Snapshotter, and handles
+// wrappedSnap wraps a go.etcd.io/etcd/server/v3/api/snap.Snapshotter, and handles
 // encrypting/decrypting.
 type wrappedSnap struct {
 	*snap.Snapshotter
@@ -88,7 +87,7 @@ func NewSnapFactory(encrypter encryption.Encrypter, decrypter encryption.Decrypt
 // NewSnapshotter returns a new Snapshotter with the given encrypters and decrypters
 func (sc snapCryptor) New(dirpath string) Snapshotter {
 	return &wrappedSnap{
-		Snapshotter: snap.New(dirpath),
+		Snapshotter: snap.New(nil, dirpath),
 		encrypter:   sc.encrypter,
 		decrypter:   sc.decrypter,
 	}
@@ -97,7 +96,7 @@ func (sc snapCryptor) New(dirpath string) Snapshotter {
 type originalSnap struct{}
 
 func (o originalSnap) New(dirpath string) Snapshotter {
-	return snap.New(dirpath)
+	return snap.New(nil, dirpath)
 }
 
 // OriginalSnap is the original `snap` package as an implementation of the SnapFactory interface
@@ -140,7 +139,7 @@ func MigrateSnapshot(oldDir, newDir string, oldFactory, newFactory SnapFactory)
 // ListSnapshots lists all the snapshot files in a particular directory and returns
 // the snapshot files in reverse lexical order (newest first)
 func ListSnapshots(dirpath string) ([]string, error) {
-	dirents, err := ioutil.ReadDir(dirpath)
+	dirents, err := os.ReadDir(dirpath)
 	if err != nil {
 		return nil, err
 	}

+ 11 - 9
vendor/github.com/docker/swarmkit/manager/state/raft/storage/storage.go

@@ -7,14 +7,14 @@ import (
 	"path/filepath"
 	"sync"
 
-	"github.com/coreos/etcd/pkg/fileutil"
-	"github.com/coreos/etcd/raft/raftpb"
-	"github.com/coreos/etcd/snap"
-	"github.com/coreos/etcd/wal"
-	"github.com/coreos/etcd/wal/walpb"
 	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/manager/encryption"
 	"github.com/pkg/errors"
+	"go.etcd.io/etcd/client/pkg/v3/fileutil"
+	"go.etcd.io/etcd/raft/v3/raftpb"
+	"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+	"go.etcd.io/etcd/server/v3/wal"
+	"go.etcd.io/etcd/server/v3/wal/walpb"
 )
 
 // ErrNoWAL is returned if there are no WALs on disk
@@ -83,7 +83,7 @@ func (e *EncryptedRaftLogger) BootstrapFromDisk(ctx context.Context, oldEncrypti
 		}
 	}
 	// ensure the new directory exists
-	if err := os.MkdirAll(snapDir, 0700); err != nil {
+	if err := os.MkdirAll(snapDir, 0o700); err != nil {
 		return nil, WALData{}, errors.Wrap(err, "failed to create snapshot directory")
 	}
 
@@ -105,6 +105,7 @@ func (e *EncryptedRaftLogger) BootstrapFromDisk(ctx context.Context, oldEncrypti
 	if snapshot != nil {
 		walsnap.Index = snapshot.Metadata.Index
 		walsnap.Term = snapshot.Metadata.Term
+		walsnap.ConfState = &snapshot.Metadata.ConfState
 	}
 
 	if !wal.Exist(walDir) {
@@ -147,7 +148,7 @@ func (e *EncryptedRaftLogger) BootstrapNew(metadata []byte) error {
 	walFactory := NewWALFactory(encrypter, decrypter)
 
 	for _, dirpath := range []string{filepath.Dir(e.walDir()), e.snapDir()} {
-		if err := os.MkdirAll(dirpath, 0700); err != nil {
+		if err := os.MkdirAll(dirpath, 0o700); err != nil {
 			return errors.Wrapf(err, "failed to create %s", dirpath)
 		}
 	}
@@ -197,8 +198,9 @@ func (e *EncryptedRaftLogger) RotateEncryptionKey(newKey []byte) {
 func (e *EncryptedRaftLogger) SaveSnapshot(snapshot raftpb.Snapshot) error {
 
 	walsnap := walpb.Snapshot{
-		Index: snapshot.Metadata.Index,
-		Term:  snapshot.Metadata.Term,
+		Index:     snapshot.Metadata.Index,
+		Term:      snapshot.Metadata.Term,
+		ConfState: &snapshot.Metadata.ConfState,
 	}
 
 	e.encoderMu.RLock()

+ 13 - 14
vendor/github.com/docker/swarmkit/manager/state/raft/storage/walwrap.go

@@ -3,25 +3,24 @@ package storage
 import (
 	"context"
 	"io"
-	"io/ioutil"
 	"os"
 	"path/filepath"
 	"sort"
 	"strings"
 
-	"github.com/coreos/etcd/raft/raftpb"
-	"github.com/coreos/etcd/wal"
-	"github.com/coreos/etcd/wal/walpb"
 	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/manager/encryption"
 	"github.com/pkg/errors"
+	"go.etcd.io/etcd/raft/v3/raftpb"
+	"go.etcd.io/etcd/server/v3/wal"
+	"go.etcd.io/etcd/server/v3/wal/walpb"
 )
 
-// This package wraps the github.com/coreos/etcd/wal package, and encrypts
+// This package wraps the go.etcd.io/etcd/server/v3/storage/wal package, and encrypts
 // the bytes of whatever entry is passed to it, and decrypts the bytes of
 // whatever entry it reads.
 
-// WAL is the interface presented by github.com/coreos/etcd/wal.WAL that we depend upon
+// WAL is the interface presented by go.etcd.io/etcd/server/v3/storage/wal.WAL that we depend upon
 type WAL interface {
 	ReadAll() ([]byte, raftpb.HardState, []raftpb.Entry, error)
 	ReleaseLockTo(index uint64) error
@@ -41,7 +40,7 @@ var _ WAL = &wrappedWAL{}
 var _ WAL = &wal.WAL{}
 var _ WALFactory = walCryptor{}
 
-// wrappedWAL wraps a github.com/coreos/etcd/wal.WAL, and handles encrypting/decrypting
+// wrappedWAL wraps a go.etcd.io/etcd/server/v3/storage/wal.WAL, and handles encrypting/decrypting
 type wrappedWAL struct {
 	*wal.WAL
 	encrypter encryption.Encrypter
@@ -103,7 +102,7 @@ func NewWALFactory(encrypter encryption.Encrypter, decrypter encryption.Decrypte
 
 // Create returns a new WAL object with the given encrypters and decrypters.
 func (wc walCryptor) Create(dirpath string, metadata []byte) (WAL, error) {
-	w, err := wal.Create(dirpath, metadata)
+	w, err := wal.Create(nil, dirpath, metadata)
 	if err != nil {
 		return nil, err
 	}
@@ -116,7 +115,7 @@ func (wc walCryptor) Create(dirpath string, metadata []byte) (WAL, error) {
 
 // Open returns a new WAL object with the given encrypters and decrypters.
 func (wc walCryptor) Open(dirpath string, snap walpb.Snapshot) (WAL, error) {
-	w, err := wal.Open(dirpath, snap)
+	w, err := wal.Open(nil, dirpath, snap)
 	if err != nil {
 		return nil, err
 	}
@@ -130,10 +129,10 @@ func (wc walCryptor) Open(dirpath string, snap walpb.Snapshot) (WAL, error) {
 type originalWAL struct{}
 
 func (o originalWAL) Create(dirpath string, metadata []byte) (WAL, error) {
-	return wal.Create(dirpath, metadata)
+	return wal.Create(nil, dirpath, metadata)
 }
 func (o originalWAL) Open(dirpath string, walsnap walpb.Snapshot) (WAL, error) {
-	return wal.Open(dirpath, walsnap)
+	return wal.Open(nil, dirpath, walsnap)
 }
 
 // OriginalWAL is the original `wal` package as an implementation of the WALFactory interface
@@ -178,7 +177,7 @@ func ReadRepairWAL(
 			if repaired || err != io.ErrUnexpectedEOF {
 				return nil, WALData{}, errors.Wrap(err, "irreparable WAL error")
 			}
-			if !wal.Repair(walDir) {
+			if !wal.Repair(nil, walDir) {
 				return nil, WALData{}, errors.Wrap(err, "WAL error cannot be repaired")
 			}
 			log.G(ctx).WithError(err).Info("repaired WAL error")
@@ -203,7 +202,7 @@ func MigrateWALs(ctx context.Context, oldDir, newDir string, oldFactory, newFact
 	}
 	oldReader.Close()
 
-	if err := os.MkdirAll(filepath.Dir(newDir), 0700); err != nil {
+	if err := os.MkdirAll(filepath.Dir(newDir), 0o700); err != nil {
 		return errors.Wrap(err, "could not create parent directory")
 	}
 
@@ -237,7 +236,7 @@ func MigrateWALs(ctx context.Context, oldDir, newDir string, oldFactory, newFact
 // ListWALs lists all the wals in a directory and returns the list in lexical
 // order (oldest first)
 func ListWALs(dirpath string) ([]string, error) {
-	dirents, err := ioutil.ReadDir(dirpath)
+	dirents, err := os.ReadDir(dirpath)
 	if err != nil {
 		return nil, err
 	}

+ 2 - 2
vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go

@@ -9,12 +9,12 @@ import (
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 
-	"github.com/coreos/etcd/raft"
-	"github.com/coreos/etcd/raft/raftpb"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/log"
 	"github.com/docker/swarmkit/manager/state/raft/membership"
 	"github.com/pkg/errors"
+	"go.etcd.io/etcd/raft/v3"
+	"go.etcd.io/etcd/raft/v3/raftpb"
 	"google.golang.org/grpc/status"
 )
 

+ 2 - 2
vendor/github.com/docker/swarmkit/manager/state/raft/transport/transport.go

@@ -13,10 +13,10 @@ import (
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/credentials"
 
-	"github.com/coreos/etcd/raft"
-	"github.com/coreos/etcd/raft/raftpb"
 	"github.com/docker/swarmkit/log"
 	"github.com/pkg/errors"
+	"go.etcd.io/etcd/raft/v3"
+	"go.etcd.io/etcd/raft/v3/raftpb"
 )
 
 // ErrIsNotFound indicates that peer was never added to transport.

+ 32 - 0
vendor/github.com/docker/swarmkit/manager/state/store/by.go

@@ -165,6 +165,16 @@ func ByReferencedConfigID(configID string) By {
 	return byReferencedConfigID(configID)
 }
 
+type byVolumeAttachment string
+
+func (b byVolumeAttachment) isBy() {}
+
+// ByVolumeAttachment creates an object to pass to Find to search for a Task
+// that has been assigned the given ID.
+func ByVolumeAttachment(volumeID string) By {
+	return byVolumeAttachment(volumeID)
+}
+
 type byKind string
 
 func (b byKind) isBy() {
@@ -212,3 +222,25 @@ func ByCustomPrefix(objType, index, value string) By {
 		value:   value,
 	}
 }
+
+// ByVolumeGroup creates an object to pass to Find to search for volumes
+// belonging to a particular group.
+func ByVolumeGroup(group string) By {
+	return byVolumeGroup(group)
+}
+
+type byVolumeGroup string
+
+func (b byVolumeGroup) isBy() {
+}
+
+// ByDriver creates an object to pass to Find to search for objects using a
+// specific driver.
+func ByDriver(driver string) By {
+	return byDriver(driver)
+}
+
+type byDriver string
+
+func (b byDriver) isBy() {
+}

+ 36 - 15
vendor/github.com/docker/swarmkit/manager/state/store/memory.go

@@ -22,21 +22,24 @@ import (
 )
 
 const (
-	indexID           = "id"
-	indexName         = "name"
-	indexRuntime      = "runtime"
-	indexServiceID    = "serviceid"
-	indexNodeID       = "nodeid"
-	indexSlot         = "slot"
-	indexDesiredState = "desiredstate"
-	indexTaskState    = "taskstate"
-	indexRole         = "role"
-	indexMembership   = "membership"
-	indexNetwork      = "network"
-	indexSecret       = "secret"
-	indexConfig       = "config"
-	indexKind         = "kind"
-	indexCustom       = "custom"
+	indexID               = "id"
+	indexName             = "name"
+	indexRuntime          = "runtime"
+	indexServiceID        = "serviceid"
+	indexNodeID           = "nodeid"
+	indexSlot             = "slot"
+	indexDesiredState     = "desiredstate"
+	indexTaskState        = "taskstate"
+	indexRole             = "role"
+	indexMembership       = "membership"
+	indexNetwork          = "network"
+	indexSecret           = "secret"
+	indexConfig           = "config"
+	indexVolumeAttachment = "volumeattachment"
+	indexKind             = "kind"
+	indexCustom           = "custom"
+	indexVolumeGroup      = "volumegroup"
+	indexDriver           = "driver"
 
 	prefix = "_prefix"
 
@@ -736,12 +739,30 @@ func (tx readTx) findIterators(table string, by By, checkType func(By) error) ([
 			return nil, err
 		}
 		return []memdb.ResultIterator{it}, nil
+	case byVolumeAttachment:
+		it, err := tx.memDBTx.Get(table, indexVolumeAttachment, string(v))
+		if err != nil {
+			return nil, err
+		}
+		return []memdb.ResultIterator{it}, nil
 	case byKind:
 		it, err := tx.memDBTx.Get(table, indexKind, string(v))
 		if err != nil {
 			return nil, err
 		}
 		return []memdb.ResultIterator{it}, nil
+	case byVolumeGroup:
+		it, err := tx.memDBTx.Get(table, indexVolumeGroup, string(v))
+		if err != nil {
+			return nil, err
+		}
+		return []memdb.ResultIterator{it}, nil
+	case byDriver:
+		it, err := tx.memDBTx.Get(table, indexDriver, string(v))
+		if err != nil {
+			return nil, err
+		}
+		return []memdb.ResultIterator{it}, nil
 	case byCustom:
 		var key string
 		if v.objType != "" {

+ 26 - 1
vendor/github.com/docker/swarmkit/manager/state/store/tasks.go

@@ -69,6 +69,11 @@ func init() {
 					AllowMissing: true,
 					Indexer:      taskIndexerByConfig{},
 				},
+				indexVolumeAttachment: {
+					Name:         indexVolumeAttachment,
+					AllowMissing: true,
+					Indexer:      taskIndexerByVolumeAttachment{},
+				},
 				indexCustom: {
 					Name:         indexCustom,
 					Indexer:      api.TaskCustomIndexer{},
@@ -138,7 +143,7 @@ func GetTask(tx ReadTx, id string) *api.Task {
 func FindTasks(tx ReadTx, by By) ([]*api.Task, error) {
 	checkType := func(by By) error {
 		switch by.(type) {
-		case byName, byNamePrefix, byIDPrefix, byRuntime, byDesiredState, byTaskState, byNode, byService, bySlot, byReferencedNetworkID, byReferencedSecretID, byReferencedConfigID, byCustom, byCustomPrefix:
+		case byName, byNamePrefix, byIDPrefix, byRuntime, byDesiredState, byTaskState, byNode, byService, bySlot, byReferencedNetworkID, byReferencedSecretID, byReferencedConfigID, byVolumeAttachment, byCustom, byCustomPrefix:
 			return nil
 		default:
 			return ErrInvalidFindBy
@@ -317,6 +322,26 @@ func (ti taskIndexerByConfig) FromObject(obj interface{}) (bool, [][]byte, error
 	return len(configIDs) != 0, configIDs, nil
 }
 
+type taskIndexerByVolumeAttachment struct{}
+
+func (ti taskIndexerByVolumeAttachment) FromArgs(args ...interface{}) ([]byte, error) {
+	return fromArgs(args...)
+}
+
+func (ti taskIndexerByVolumeAttachment) FromObject(obj interface{}) (bool, [][]byte, error) {
+	t, ok := obj.(*api.Task)
+	if !ok {
+		panic("unexpected type passed to FromObject")
+	}
+
+	var volumeIDs [][]byte
+
+	for _, v := range t.Volumes {
+		volumeIDs = append(volumeIDs, []byte(v.ID+"\x00"))
+	}
+	return len(volumeIDs) != 0, volumeIDs, nil
+}
+
 type taskIndexerByTaskState struct{}
 
 func (ts taskIndexerByTaskState) FromArgs(args ...interface{}) ([]byte, error) {

+ 149 - 0
vendor/github.com/docker/swarmkit/manager/state/store/volumes.go

@@ -0,0 +1,149 @@
+package store
+
+import (
+	"strings"
+
+	"github.com/docker/swarmkit/api"
+	memdb "github.com/hashicorp/go-memdb"
+)
+
+const tableVolume = "volume"
+
+func init() {
+	register(ObjectStoreConfig{
+		Table: &memdb.TableSchema{
+			Name: tableVolume,
+			Indexes: map[string]*memdb.IndexSchema{
+				indexID: {
+					Name:    indexID,
+					Unique:  true,
+					Indexer: api.VolumeIndexerByID{},
+				},
+				indexName: {
+					Name:    indexName,
+					Unique:  true,
+					Indexer: api.VolumeIndexerByName{},
+				},
+				indexCustom: {
+					Name:         indexCustom,
+					Indexer:      api.VolumeCustomIndexer{},
+					AllowMissing: true,
+				},
+				indexVolumeGroup: {
+					Name:    indexVolumeGroup,
+					Indexer: volumeIndexerByGroup{},
+				},
+				indexDriver: {
+					Name:    indexDriver,
+					Indexer: volumeIndexerByDriver{},
+				},
+			},
+		},
+		Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error {
+			var err error
+			snapshot.Volumes, err = FindVolumes(tx, All)
+			return err
+		},
+		Restore: func(tx Tx, snapshot *api.StoreSnapshot) error {
+			toStoreObj := make([]api.StoreObject, len(snapshot.Volumes))
+			for i, x := range snapshot.Volumes {
+				toStoreObj[i] = x
+			}
+			return RestoreTable(tx, tableVolume, toStoreObj)
+		},
+		ApplyStoreAction: func(tx Tx, sa api.StoreAction) error {
+			switch v := sa.Target.(type) {
+			case *api.StoreAction_Volume:
+				obj := v.Volume
+				switch sa.Action {
+				case api.StoreActionKindCreate:
+					return CreateVolume(tx, obj)
+				case api.StoreActionKindUpdate:
+					return UpdateVolume(tx, obj)
+				case api.StoreActionKindRemove:
+					return DeleteVolume(tx, obj.ID)
+				}
+			}
+			return errUnknownStoreAction
+		},
+	})
+}
+
+func CreateVolume(tx Tx, v *api.Volume) error {
+	if tx.lookup(tableVolume, indexName, strings.ToLower(v.Spec.Annotations.Name)) != nil {
+		return ErrNameConflict
+	}
+
+	return tx.create(tableVolume, v)
+}
+
+func UpdateVolume(tx Tx, v *api.Volume) error {
+	// ensure the name is either not in use, or is in use by this volume.
+	if existing := tx.lookup(tableVolume, indexName, strings.ToLower(v.Spec.Annotations.Name)); existing != nil {
+		if existing.GetID() != v.ID {
+			return ErrNameConflict
+		}
+	}
+
+	return tx.update(tableVolume, v)
+}
+
+func DeleteVolume(tx Tx, id string) error {
+	return tx.delete(tableVolume, id)
+}
+
+func GetVolume(tx ReadTx, id string) *api.Volume {
+	n := tx.get(tableVolume, id)
+	if n == nil {
+		return nil
+	}
+	return n.(*api.Volume)
+}
+
+func FindVolumes(tx ReadTx, by By) ([]*api.Volume, error) {
+	checkType := func(by By) error {
+		switch by.(type) {
+		case byName, byNamePrefix, byIDPrefix, byVolumeGroup, byCustom, byCustomPrefix, byDriver:
+			return nil
+		default:
+			return ErrInvalidFindBy
+		}
+	}
+
+	volumeList := []*api.Volume{}
+	appendResult := func(o api.StoreObject) {
+		volumeList = append(volumeList, o.(*api.Volume))
+	}
+
+	err := tx.find(tableVolume, by, checkType, appendResult)
+	return volumeList, err
+}
+
+type volumeIndexerByGroup struct{}
+
+func (vi volumeIndexerByGroup) FromArgs(args ...interface{}) ([]byte, error) {
+	return fromArgs(args...)
+}
+
+func (vi volumeIndexerByGroup) FromObject(obj interface{}) (bool, []byte, error) {
+	v := obj.(*api.Volume)
+	val := v.Spec.Group + "\x00"
+	return true, []byte(val), nil
+}
+
+type volumeIndexerByDriver struct{}
+
+func (vi volumeIndexerByDriver) FromArgs(args ...interface{}) ([]byte, error) {
+	return fromArgs(args...)
+}
+
+func (vi volumeIndexerByDriver) FromObject(obj interface{}) (bool, []byte, error) {
+	v := obj.(*api.Volume)
+	// this should never happen -- existence of the volume driver is checked
+	// at the controlapi level. However, guard against the unforeseen.
+	if v.Spec.Driver == nil {
+		return false, nil, nil
+	}
+	val := v.Spec.Driver.Name + "\x00"
+	return true, []byte(val), nil
+}

+ 4 - 5
vendor/github.com/docker/swarmkit/node/node.go

@@ -5,7 +5,6 @@ import (
 	"context"
 	"crypto/tls"
 	"encoding/json"
-	"io/ioutil"
 	"math"
 	"net"
 	"os"
@@ -195,11 +194,11 @@ func (n *Node) RemoteAPIAddr() (string, error) {
 
 // New returns new Node instance.
 func New(c *Config) (*Node, error) {
-	if err := os.MkdirAll(c.StateDir, 0700); err != nil {
+	if err := os.MkdirAll(c.StateDir, 0o700); err != nil {
 		return nil, err
 	}
 	stateFile := filepath.Join(c.StateDir, stateFilename)
-	dt, err := ioutil.ReadFile(stateFile)
+	dt, err := os.ReadFile(stateFile)
 	var p []api.Peer
 	if err != nil && !os.IsNotExist(err) {
 		return nil, err
@@ -337,7 +336,7 @@ func (n *Node) run(ctx context.Context) (err error) {
 	// database if it doesn't already exist, and if it does already exist, no
 	// error will be returned, so we use this regardless of whether this node
 	// is new or not.
-	if err := os.MkdirAll(filepath.Dir(taskDBPath), 0777); err != nil {
+	if err := os.MkdirAll(filepath.Dir(taskDBPath), 0o777); err != nil {
 		return err
 	}
 
@@ -1248,7 +1247,7 @@ func (s *persistentRemotes) save() error {
 		return err
 	}
 	s.lastSavedState = remotes
-	return ioutils.AtomicWriteFile(s.storePath, dt, 0600)
+	return ioutils.AtomicWriteFile(s.storePath, dt, 0o600)
 }
 
 // WaitSelect waits until at least one remote becomes available and then selects one.

+ 9 - 0
vendor/github.com/docker/swarmkit/template/getter.go

@@ -98,6 +98,7 @@ func (t templatedConfigGetter) GetAndFlagSecretData(configID string) (*api.Confi
 type templatedDependencyGetter struct {
 	secrets exec.SecretGetter
 	configs TemplatedConfigGetter
+	volumes exec.VolumeGetter
 }
 
 // NewTemplatedDependencyGetter returns a DependencyGetter that evaluates templates.
@@ -105,6 +106,7 @@ func NewTemplatedDependencyGetter(dependencies exec.DependencyGetter, t *api.Tas
 	return templatedDependencyGetter{
 		secrets: NewTemplatedSecretGetter(dependencies, t, node),
 		configs: NewTemplatedConfigGetter(dependencies, t, node),
+		volumes: dependencies.Volumes(),
 	}
 }
 
@@ -115,3 +117,10 @@ func (t templatedDependencyGetter) Secrets() exec.SecretGetter {
 func (t templatedDependencyGetter) Configs() exec.ConfigGetter {
 	return t.configs
 }
+
+func (t templatedDependencyGetter) Volumes() exec.VolumeGetter {
+	// volumes are not templated, but we include that call (and pass it
+	// straight through to the underlying getter) in order to fulfill the
+	// DependencyGetter interface.
+	return t.volumes
+}

+ 4 - 0
vendor/github.com/docker/swarmkit/volumequeue/doc.go

@@ -0,0 +1,4 @@
+package volumequeue
+
+// The volumequeue package defines a type of priority queue which is used by
+// both the manager and the agent to manage retrying of CSI volume operations.

+ 215 - 0
vendor/github.com/docker/swarmkit/volumequeue/queue.go

@@ -0,0 +1,215 @@
+package volumequeue
+
+import (
+	"sync"
+	"time"
+)
+
+// baseRetryInterval is the base interval to retry volume operations. each
+// subsequent attempt is exponential from this one
+const baseRetryInterval = 100 * time.Millisecond
+
+// maxRetryInterval is the maximum amount of time we will wait between retrying
+// volume operations.
+const maxRetryInterval = 10 * time.Minute
+
+// vqTimerSource is an interface for creating timers for the volumeQueue
+type vqTimerSource interface {
+	// NewTimer takes an attempt number and returns a vqClockTrigger which will
+	// trigger after a set period based on that attempt number.
+	NewTimer(attempt uint) vqTimer
+}
+
+// vqTimer is an interface representing a timer. However, the timer
+// trigger channel, C, is instead wrapped in a Done method, so that in testing,
+// the timer can be substituted for a different object.
+type vqTimer interface {
+	Done() <-chan time.Time
+	Stop() bool
+}
+
+// timerSource is an empty struct type which is used to represent the default
+// vqTimerSource, which uses time.Timer.
+type timerSource struct{}
+
+// NewTimer creates a new timer.
+func (timerSource) NewTimer(attempt uint) vqTimer {
+	var waitFor time.Duration
+	if attempt == 0 {
+		waitFor = 0
+	} else {
+		// bit-shifting the base retry interval will raise it by 2 to the power
+		// of attempt. this is an easy way to do an exponent solely with
+		// integers
+		waitFor = baseRetryInterval << attempt
+		if waitFor > maxRetryInterval {
+			waitFor = maxRetryInterval
+		}
+	}
+	return timer{Timer: time.NewTimer(waitFor)}
+}
+
+// timer wraps a time.Timer to provide a Done method.
+type timer struct {
+	*time.Timer
+}
+
+// Done returns the timer's C channel, which triggers in response to the timer
+// expiring.
+func (t timer) Done() <-chan time.Time {
+	return t.C
+}
+
+// VolumeQueue manages the exponential backoff of retrying volumes. it behaves
+// somewhat like a priority queue. however, the key difference is that volumes
+// which are ready to process or reprocess are read off of an unbuffered
+// channel, meaning the order in which ready volumes are processed is at the
+// mercy of the golang scheduler. in practice, this does not matter.
+type VolumeQueue struct {
+	sync.Mutex
+	// next returns the next volumeQueueEntry when it is ready.
+	next chan *volumeQueueEntry
+	// outstanding is the set of all pending volumeQueueEntries, mapped by
+	// volume ID.
+	outstanding map[string]*volumeQueueEntry
+	// stopChan stops the volumeQueue and cancels all entries.
+	stopChan chan struct{}
+
+	// timerSource is an object which is used to create the timer for a
+	// volumeQueueEntry. it exists so that in testing, the timer can be
+	// substituted for an object that we control.
+	timerSource vqTimerSource
+}
+
+// volumeQueueEntry represents one entry in the volumeQueue
+type volumeQueueEntry struct {
+	// id is the id of the volume this entry represents. we only need the ID,
+	// because the CSI manager will look up the latest revision of the volume
+	// before doing any work on it.
+	id string
+	// attempt is the current retry attempt of the entry.
+	attempt uint
+	// cancel is a function which is called to abort the retry attempt.
+	cancel func()
+}
+
+// NewVolumeQueue returns a new VolumeQueue with the default timerSource.
+func NewVolumeQueue() *VolumeQueue {
+	return &VolumeQueue{
+		next:        make(chan *volumeQueueEntry),
+		outstanding: make(map[string]*volumeQueueEntry),
+		stopChan:    make(chan struct{}),
+		timerSource: timerSource{},
+	}
+}
+
+// Enqueue adds an entry to the VolumeQueue with the specified retry attempt.
+// if an entry for the specified id already exists, enqueue will remove it and
+// create a new entry.
+func (vq *VolumeQueue) Enqueue(id string, attempt uint) {
+	// we must lock the volumeQueue when we add entries, because we will be
+	// accessing the outstanding map
+	vq.Lock()
+	defer vq.Unlock()
+
+	if entry, ok := vq.outstanding[id]; ok {
+		entry.cancel()
+		delete(vq.outstanding, id)
+	}
+
+	cancelChan := make(chan struct{})
+	v := &volumeQueueEntry{
+		id:      id,
+		attempt: attempt,
+		cancel: func() {
+			close(cancelChan)
+		},
+	}
+
+	t := vq.timerSource.NewTimer(attempt)
+
+	// this goroutine is the meat of the volumeQueue. when the timer triggers,
+	// the volume queue entry is written out to the next channel.
+	//
+	// the nature of the select statement, and of goroutines and of
+	// ansynchronous operations means that this is not actually strictly
+	// ordered. if several entries are ready, then the one that actually gets
+	// dequeued is at the mercy of the golang scheduler.
+	//
+	// however, the flip side of this is that canceling an entry truly cancels
+	// it. because we're blocking on a write attempt, if we cancel, we don't
+	// do that write attempt, and there's no need to try to remove from the
+	// queue a ready-but-now-canceled entry before it is processed.
+	go func() {
+		select {
+		case <-t.Done():
+			// once the timer fires, we will try to write this entry to the
+			// next channel. however, because next is unbuffered, if we ended
+			// up in a situation where no read occurred, we would be
+			// deadlocked. to avoid this, we select on both a vq.next write and
+			// on a read from cancelChan, which allows us to abort our write
+			// attempt.
+			select {
+			case vq.next <- v:
+			case <-cancelChan:
+			}
+		case <-cancelChan:
+			// the documentation for timer recommends draining the channel like
+			// this.
+			if !t.Stop() {
+				<-t.Done()
+			}
+		}
+	}()
+
+	vq.outstanding[id] = v
+}
+
+// Wait returns the ID and attempt number of the next Volume ready to process.
+// If no volume is ready, wait blocks until one is ready. if the volumeQueue
+// is stopped, wait returns "", 0
+func (vq *VolumeQueue) Wait() (string, uint) {
+	select {
+	case v := <-vq.next:
+		vq.Lock()
+		defer vq.Unlock()
+		// we need to be certain that this entry is the same entry that we
+		// read, because otherwise there may be a race.
+		//
+		// it would be possible for the read from next to succeed, but before
+		// the lock is acquired, a new attempt is enqueued. enqueuing the new
+		// attempt deletes the old entry before replacing it with the new entry
+		// and releasing the lock. then, this routine may acquire the lock, and
+		// delete a new entry.
+		//
+		// in practice, it is unclear if this race could happen or would matter
+		// if it did, but always better safe than sorry.
+		e, ok := vq.outstanding[v.id]
+		if ok && e == v {
+			delete(vq.outstanding, v.id)
+		}
+
+		return v.id, v.attempt
+	case <-vq.stopChan:
+		// if the volumeQueue is stopped, then there may be no more writes, so
+		// we should return an empty result from wait
+		return "", 0
+	}
+}
+
+// Outstanding returns the number of items outstanding in this queue
+func (vq *VolumeQueue) Outstanding() int {
+	return len(vq.outstanding)
+}
+
+// Stop stops the volumeQueue and cancels all outstanding entries. stop may
+// only be called once.
+func (vq *VolumeQueue) Stop() {
+	vq.Lock()
+	defer vq.Unlock()
+	close(vq.stopChan)
+	for _, entry := range vq.outstanding {
+		entry.cancel()
+	}
+	return
+}

+ 1 - 0
vendor/github.com/docker/swarmkit/xnet/xnet_unix.go

@@ -1,3 +1,4 @@
+//go:build !windows
 // +build !windows
 
 package xnet

+ 1 - 0
vendor/github.com/docker/swarmkit/xnet/xnet_windows.go

@@ -1,3 +1,4 @@
+//go:build windows
 // +build windows
 
 package xnet

+ 21 - 0
vendor/github.com/dustin/go-humanize/.travis.yml

@@ -0,0 +1,21 @@
+sudo: false
+language: go
+go:
+  - 1.3.x
+  - 1.5.x
+  - 1.6.x
+  - 1.7.x
+  - 1.8.x
+  - 1.9.x
+  - master
+matrix:
+  allow_failures:
+    - go: master
+  fast_finish: true
+install:
+  - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+  - go get -t -v ./...
+  - diff -u <(echo -n) <(gofmt -d -s .)
+  - go tool vet .
+  - go test -v -race ./...

+ 21 - 0
vendor/github.com/dustin/go-humanize/LICENSE

@@ -0,0 +1,21 @@
+Copyright (c) 2005-2008  Dustin Sallings <dustin@spy.net>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+<http://www.opensource.org/licenses/mit-license.php>

+ 124 - 0
vendor/github.com/dustin/go-humanize/README.markdown

@@ -0,0 +1,124 @@
+# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
+
+Just a few functions for helping humanize times and sizes.
+
+`go get` it as `github.com/dustin/go-humanize`, import it as
+`"github.com/dustin/go-humanize"`, use it as `humanize`.
+
+See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
+complete documentation.
+
+## Sizes
+
+This lets you take numbers like `82854982` and convert them to useful
+strings like, `83 MB` or `79 MiB` (whichever you prefer).
+
+Example:
+
+```go
+fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
+```
+
+## Times
+
+This lets you take a `time.Time` and spit it out in relative terms.
+For example, `12 seconds ago` or `3 days from now`.
+
+Example:
+
+```go
+fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
+```
+
+Thanks to Kyle Lemons for the time implementation from an IRC
+conversation one day. It's pretty neat.
+
+## Ordinals
+
+From a [mailing list discussion][odisc] where a user wanted to be able
+to label ordinals.
+
+    0 -> 0th
+    1 -> 1st
+    2 -> 2nd
+    3 -> 3rd
+    4 -> 4th
+    [...]
+
+Example:
+
+```go
+fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
+```
+
+## Commas
+
+Want to shove commas into numbers? Be my guest.
+
+    0 -> 0
+    100 -> 100
+    1000 -> 1,000
+    1000000000 -> 1,000,000,000
+    -100000 -> -100,000
+
+Example:
+
+```go
+fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
+```
+
+## Ftoa
+
+Nicer float64 formatter that removes trailing zeros.
+
+```go
+fmt.Printf("%f", 2.24)                // 2.240000
+fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
+fmt.Printf("%f", 2.0)                 // 2.000000
+fmt.Printf("%s", humanize.Ftoa(2.0))  // 2
+```
+
+## SI notation
+
+Format numbers with [SI notation][sinotation].
+
+Example:
+
+```go
+humanize.SI(0.00000000223, "M") // 2.23 nM
+```
+
+## English-specific functions
+
+The following functions are in the `humanize/english` subpackage.
+
+### Plurals
+
+Simple English pluralization
+
+```go
+english.PluralWord(1, "object", "") // object
+english.PluralWord(42, "object", "") // objects
+english.PluralWord(2, "bus", "") // buses
+english.PluralWord(99, "locus", "loci") // loci
+
+english.Plural(1, "object", "") // 1 object
+english.Plural(42, "object", "") // 42 objects
+english.Plural(2, "bus", "") // 2 buses
+english.Plural(99, "locus", "loci") // 99 loci
+```
+
+### Word series
+
+Format comma-separated words lists with conjuctions:
+
+```go
+english.WordSeries([]string{"foo"}, "and") // foo
+english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
+english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
+
+english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
+```
+
+[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
+[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix

+ 31 - 0
vendor/github.com/dustin/go-humanize/big.go

@@ -0,0 +1,31 @@
+package humanize
+
+import (
+	"math/big"
+)
+
+// order of magnitude (to a max order)
+func oomm(n, b *big.Int, maxmag int) (float64, int) {
+	mag := 0
+	m := &big.Int{}
+	for n.Cmp(b) >= 0 {
+		n.DivMod(n, b, m)
+		mag++
+		if mag == maxmag && maxmag >= 0 {
+			break
+		}
+	}
+	return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
+
+// total order of magnitude
+// (same as above, but with no upper limit)
+func oom(n, b *big.Int) (float64, int) {
+	mag := 0
+	m := &big.Int{}
+	for n.Cmp(b) >= 0 {
+		n.DivMod(n, b, m)
+		mag++
+	}
+	return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}

+ 173 - 0
vendor/github.com/dustin/go-humanize/bigbytes.go

@@ -0,0 +1,173 @@
+package humanize
+
+import (
+	"fmt"
+	"math/big"
+	"strings"
+	"unicode"
+)
+
+var (
+	bigIECExp = big.NewInt(1024)
+
+	// BigByte is one byte in bit.Ints
+	BigByte = big.NewInt(1)
+	// BigKiByte is 1,024 bytes in bit.Ints
+	BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
+	// BigMiByte is 1,024 k bytes in bit.Ints
+	BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
+	// BigGiByte is 1,024 m bytes in bit.Ints
+	BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
+	// BigTiByte is 1,024 g bytes in bit.Ints
+	BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
+	// BigPiByte is 1,024 t bytes in bit.Ints
+	BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
+	// BigEiByte is 1,024 p bytes in bit.Ints
+	BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
+	// BigZiByte is 1,024 e bytes in bit.Ints
+	BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
+	// BigYiByte is 1,024 z bytes in bit.Ints
+	BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
+)
+
+var (
+	bigSIExp = big.NewInt(1000)
+
+	// BigSIByte is one SI byte in big.Ints
+	BigSIByte = big.NewInt(1)
+	// BigKByte is 1,000 SI bytes in big.Ints
+	BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
+	// BigMByte is 1,000 SI k bytes in big.Ints
+	BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
+	// BigGByte is 1,000 SI m bytes in big.Ints
+	BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
+	// BigTByte is 1,000 SI g bytes in big.Ints
+	BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
+	// BigPByte is 1,000 SI t bytes in big.Ints
+	BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
+	// BigEByte is 1,000 SI p bytes in big.Ints
+	BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
+	// BigZByte is 1,000 SI e bytes in big.Ints
+	BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
+	// BigYByte is 1,000 SI z bytes in big.Ints
+	BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
+)
+
+var bigBytesSizeTable = map[string]*big.Int{
+	"b":   BigByte,
+	"kib": BigKiByte,
+	"kb":  BigKByte,
+	"mib": BigMiByte,
+	"mb":  BigMByte,
+	"gib": BigGiByte,
+	"gb":  BigGByte,
+	"tib": BigTiByte,
+	"tb":  BigTByte,
+	"pib": BigPiByte,
+	"pb":  BigPByte,
+	"eib": BigEiByte,
+	"eb":  BigEByte,
+	"zib": BigZiByte,
+	"zb":  BigZByte,
+	"yib": BigYiByte,
+	"yb":  BigYByte,
+	// Without suffix
+	"":   BigByte,
+	"ki": BigKiByte,
+	"k":  BigKByte,
+	"mi": BigMiByte,
+	"m":  BigMByte,
+	"gi": BigGiByte,
+	"g":  BigGByte,
+	"ti": BigTiByte,
+	"t":  BigTByte,
+	"pi": BigPiByte,
+	"p":  BigPByte,
+	"ei": BigEiByte,
+	"e":  BigEByte,
+	"z":  BigZByte,
+	"zi": BigZiByte,
+	"y":  BigYByte,
+	"yi": BigYiByte,
+}
+
+var ten = big.NewInt(10)
+
+func humanateBigBytes(s, base *big.Int, sizes []string) string {
+	if s.Cmp(ten) < 0 {
+		return fmt.Sprintf("%d B", s)
+	}
+	c := (&big.Int{}).Set(s)
+	val, mag := oomm(c, base, len(sizes)-1)
+	suffix := sizes[mag]
+	f := "%.0f %s"
+	if val < 10 {
+		f = "%.1f %s"
+	}
+
+	return fmt.Sprintf(f, val, suffix)
+
+}
+
+// BigBytes produces a human readable representation of an SI size.
+//
+// See also: ParseBigBytes.
+//
+// BigBytes(82854982) -> 83 MB
+func BigBytes(s *big.Int) string {
+	sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+	return humanateBigBytes(s, bigSIExp, sizes)
+}
+
+// BigIBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBigBytes.
+//
+// BigIBytes(82854982) -> 79 MiB
+func BigIBytes(s *big.Int) string {
+	sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+	return humanateBigBytes(s, bigIECExp, sizes)
+}
+
+// ParseBigBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See also: BigBytes, BigIBytes.
+//
+// ParseBigBytes("42 MB") -> 42000000, nil
+// ParseBigBytes("42 mib") -> 44040192, nil
+func ParseBigBytes(s string) (*big.Int, error) {
+	lastDigit := 0
+	hasComma := false
+	for _, r := range s {
+		if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+			break
+		}
+		if r == ',' {
+			hasComma = true
+		}
+		lastDigit++
+	}
+
+	num := s[:lastDigit]
+	if hasComma {
+		num = strings.Replace(num, ",", "", -1)
+	}
+
+	val := &big.Rat{}
+	_, err := fmt.Sscanf(num, "%f", val)
+	if err != nil {
+		return nil, err
+	}
+
+	extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+	if m, ok := bigBytesSizeTable[extra]; ok {
+		mv := (&big.Rat{}).SetInt(m)
+		val.Mul(val, mv)
+		rv := &big.Int{}
+		rv.Div(val.Num(), val.Denom())
+		return rv, nil
+	}
+
+	return nil, fmt.Errorf("unhandled size name: %v", extra)
+}

+ 143 - 0
vendor/github.com/dustin/go-humanize/bytes.go

@@ -0,0 +1,143 @@
+package humanize
+
+import (
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+// IEC Sizes.
+// kibis of bits
+const (
+	Byte = 1 << (iota * 10)
+	KiByte
+	MiByte
+	GiByte
+	TiByte
+	PiByte
+	EiByte
+)
+
+// SI Sizes.
+const (
+	IByte = 1
+	KByte = IByte * 1000
+	MByte = KByte * 1000
+	GByte = MByte * 1000
+	TByte = GByte * 1000
+	PByte = TByte * 1000
+	EByte = PByte * 1000
+)
+
+var bytesSizeTable = map[string]uint64{
+	"b":   Byte,
+	"kib": KiByte,
+	"kb":  KByte,
+	"mib": MiByte,
+	"mb":  MByte,
+	"gib": GiByte,
+	"gb":  GByte,
+	"tib": TiByte,
+	"tb":  TByte,
+	"pib": PiByte,
+	"pb":  PByte,
+	"eib": EiByte,
+	"eb":  EByte,
+	// Without suffix
+	"":   Byte,
+	"ki": KiByte,
+	"k":  KByte,
+	"mi": MiByte,
+	"m":  MByte,
+	"gi": GiByte,
+	"g":  GByte,
+	"ti": TiByte,
+	"t":  TByte,
+	"pi": PiByte,
+	"p":  PByte,
+	"ei": EiByte,
+	"e":  EByte,
+}
+
+func logn(n, b float64) float64 {
+	return math.Log(n) / math.Log(b)
+}
+
+func humanateBytes(s uint64, base float64, sizes []string) string {
+	if s < 10 {
+		return fmt.Sprintf("%d B", s)
+	}
+	e := math.Floor(logn(float64(s), base))
+	suffix := sizes[int(e)]
+	val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
+	f := "%.0f %s"
+	if val < 10 {
+		f = "%.1f %s"
+	}
+
+	return fmt.Sprintf(f, val, suffix)
+}
+
+// Bytes produces a human readable representation of an SI size.
+//
+// See also: ParseBytes.
+//
+// Bytes(82854982) -> 83 MB
+func Bytes(s uint64) string {
+	sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
+	return humanateBytes(s, 1000, sizes)
+}
+
+// IBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBytes.
+//
+// IBytes(82854982) -> 79 MiB
+func IBytes(s uint64) string {
+	sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
+	return humanateBytes(s, 1024, sizes)
+}
+
+// ParseBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See Also: Bytes, IBytes.
+//
+// ParseBytes("42 MB") -> 42000000, nil
+// ParseBytes("42 mib") -> 44040192, nil
+func ParseBytes(s string) (uint64, error) {
+	lastDigit := 0
+	hasComma := false
+	for _, r := range s {
+		if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+			break
+		}
+		if r == ',' {
+			hasComma = true
+		}
+		lastDigit++
+	}
+
+	num := s[:lastDigit]
+	if hasComma {
+		num = strings.Replace(num, ",", "", -1)
+	}
+
+	f, err := strconv.ParseFloat(num, 64)
+	if err != nil {
+		return 0, err
+	}
+
+	extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+	if m, ok := bytesSizeTable[extra]; ok {
+		f *= float64(m)
+		if f >= math.MaxUint64 {
+			return 0, fmt.Errorf("too large: %v", s)
+		}
+		return uint64(f), nil
+	}
+
+	return 0, fmt.Errorf("unhandled size name: %v", extra)
+}

+ 116 - 0
vendor/github.com/dustin/go-humanize/comma.go

@@ -0,0 +1,116 @@
+package humanize
+
+import (
+	"bytes"
+	"math"
+	"math/big"
+	"strconv"
+	"strings"
+)
+
+// Comma produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Comma(834142) -> 834,142
+func Comma(v int64) string {
+	sign := ""
+
+	// Min int64 can't be negated to a usable value, so it has to be special cased.
+	if v == math.MinInt64 {
+		return "-9,223,372,036,854,775,808"
+	}
+
+	if v < 0 {
+		sign = "-"
+		v = 0 - v
+	}
+
+	parts := []string{"", "", "", "", "", "", ""}
+	j := len(parts) - 1
+
+	for v > 999 {
+		parts[j] = strconv.FormatInt(v%1000, 10)
+		switch len(parts[j]) {
+		case 2:
+			parts[j] = "0" + parts[j]
+		case 1:
+			parts[j] = "00" + parts[j]
+		}
+		v = v / 1000
+		j--
+	}
+	parts[j] = strconv.Itoa(int(v))
+	return sign + strings.Join(parts[j:], ",")
+}
+
+// Commaf produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Commaf(834142.32) -> 834,142.32
+func Commaf(v float64) string {
+	buf := &bytes.Buffer{}
+	if v < 0 {
+		buf.Write([]byte{'-'})
+		v = 0 - v
+	}
+
+	comma := []byte{','}
+
+	parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
+	pos := 0
+	if len(parts[0])%3 != 0 {
+		pos += len(parts[0]) % 3
+		buf.WriteString(parts[0][:pos])
+		buf.Write(comma)
+	}
+	for ; pos < len(parts[0]); pos += 3 {
+		buf.WriteString(parts[0][pos : pos+3])
+		buf.Write(comma)
+	}
+	buf.Truncate(buf.Len() - 1)
+
+	if len(parts) > 1 {
+		buf.Write([]byte{'.'})
+		buf.WriteString(parts[1])
+	}
+	return buf.String()
+}
+
+// CommafWithDigits works like the Commaf but limits the resulting
+// string to the given number of decimal places.
+//
+// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
+func CommafWithDigits(f float64, decimals int) string {
+	return stripTrailingDigits(Commaf(f), decimals)
+}
+
+// BigComma produces a string form of the given big.Int in base 10
+// with commas after every three orders of magnitude.
+func BigComma(b *big.Int) string {
+	sign := ""
+	if b.Sign() < 0 {
+		sign = "-"
+		b.Abs(b)
+	}
+
+	athousand := big.NewInt(1000)
+	c := (&big.Int{}).Set(b)
+	_, m := oom(c, athousand)
+	parts := make([]string, m+1)
+	j := len(parts) - 1
+
+	mod := &big.Int{}
+	for b.Cmp(athousand) >= 0 {
+		b.DivMod(b, athousand, mod)
+		parts[j] = strconv.FormatInt(mod.Int64(), 10)
+		switch len(parts[j]) {
+		case 2:
+			parts[j] = "0" + parts[j]
+		case 1:
+			parts[j] = "00" + parts[j]
+		}
+		j--
+	}
+	parts[j] = strconv.Itoa(int(b.Int64()))
+	return sign + strings.Join(parts[j:], ",")
+}

+ 40 - 0
vendor/github.com/dustin/go-humanize/commaf.go

@@ -0,0 +1,40 @@
+// +build go1.6
+
+package humanize
+
+import (
+	"bytes"
+	"math/big"
+	"strings"
+)
+
+// BigCommaf produces a string form of the given big.Float in base 10
+// with commas after every three orders of magnitude.
+func BigCommaf(v *big.Float) string {
+	buf := &bytes.Buffer{}
+	if v.Sign() < 0 {
+		buf.Write([]byte{'-'})
+		v.Abs(v)
+	}
+
+	comma := []byte{','}
+
+	parts := strings.Split(v.Text('f', -1), ".")
+	pos := 0
+	if len(parts[0])%3 != 0 {
+		pos += len(parts[0]) % 3
+		buf.WriteString(parts[0][:pos])
+		buf.Write(comma)
+	}
+	for ; pos < len(parts[0]); pos += 3 {
+		buf.WriteString(parts[0][pos : pos+3])
+		buf.Write(comma)
+	}
+	buf.Truncate(buf.Len() - 1)
+
+	if len(parts) > 1 {
+		buf.Write([]byte{'.'})
+		buf.WriteString(parts[1])
+	}
+	return buf.String()
+}

+ 46 - 0
vendor/github.com/dustin/go-humanize/ftoa.go

@@ -0,0 +1,46 @@
+package humanize
+
+import (
+	"strconv"
+	"strings"
+)
+
+func stripTrailingZeros(s string) string {
+	offset := len(s) - 1
+	for offset > 0 {
+		if s[offset] == '.' {
+			offset--
+			break
+		}
+		if s[offset] != '0' {
+			break
+		}
+		offset--
+	}
+	return s[:offset+1]
+}
+
+func stripTrailingDigits(s string, digits int) string {
+	if i := strings.Index(s, "."); i >= 0 {
+		if digits <= 0 {
+			return s[:i]
+		}
+		i++
+		if i+digits >= len(s) {
+			return s
+		}
+		return s[:i+digits]
+	}
+	return s
+}
+
+// Ftoa converts a float to a string with no trailing zeros.
+func Ftoa(num float64) string {
+	return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
+}
+
+// FtoaWithDigits converts a float to a string but limits the resulting string
+// to the given number of decimal places, and no trailing zeros.
+func FtoaWithDigits(num float64, digits int) string {
+	return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
+}

+ 8 - 0
vendor/github.com/dustin/go-humanize/humanize.go

@@ -0,0 +1,8 @@
+/*
+Package humanize converts boring ugly numbers to human-friendly strings and back.
+
+Durations can be turned into strings such as "3 days ago", numbers
+representing sizes like 82854982 into useful strings like, "83 MB" or
+"79 MiB" (whichever you prefer).
+*/
+package humanize

Some files were not shown because too many files changed in this diff