vendor: github.com/docker/swarmkit 616e8db4c3b0

Signed-off-by: Cory Snider <csnider@mirantis.com>
This commit is contained in:
Cory Snider 2022-03-10 16:07:02 -05:00
parent 38805f20f9
commit 06c797f517
273 changed files with 42983 additions and 5506 deletions

View file

@ -52,7 +52,7 @@ func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBac
pluginBackend: p,
imageBackend: i,
volumeBackend: v,
dependencies: agent.NewDependencyManager(),
dependencies: agent.NewDependencyManager(b.PluginGetter()),
}
}

View file

@ -179,7 +179,7 @@ func TestSetWindowsCredentialSpecInSpec(t *testing.T) {
t.Run("happy path with a 'config://' option", func(t *testing.T) {
configID := "my-cred-spec"
dependencyManager := swarmagent.NewDependencyManager()
dependencyManager := swarmagent.NewDependencyManager(nil)
dependencyManager.Configs().Add(swarmapi.Config{
ID: configID,
Spec: swarmapi.ConfigSpec{

View file

@ -33,7 +33,7 @@ require (
github.com/docker/go-units v0.4.0
github.com/docker/libkv v0.2.2-0.20211217103745-e480589147e3
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4
github.com/docker/swarmkit v1.12.1-0.20210726173615-3629f50980f6
github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0
github.com/fluent/fluent-logger-golang v1.9.0
github.com/fsnotify/fsnotify v1.5.1
github.com/godbus/dbus/v5 v5.0.6
@ -89,17 +89,17 @@ require (
require (
code.cloudfoundry.org/clock v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/akutz/memconn v0.1.0 // indirect
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cilium/ebpf v0.7.0 // indirect
github.com/container-storage-interface/spec v1.5.0 // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/go-runc v1.0.0 // indirect
github.com/containerd/ttrpc v1.1.0 // indirect
github.com/coreos/etcd v3.3.27+incompatible // indirect
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e // indirect
github.com/gofrs/flock v0.7.3 // indirect
github.com/gogo/googleapis v1.4.0 // indirect
@ -129,11 +129,19 @@ require (
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.10.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/rexray/gocsi v1.2.2 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/tinylib/msgp v1.1.0 // indirect
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.2 // indirect
go.etcd.io/etcd/pkg/v3 v3.5.2 // indirect
go.etcd.io/etcd/raft/v3 v3.5.2 // indirect
go.etcd.io/etcd/server/v3 v3.5.2 // indirect
go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.17.0 // indirect
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/mod v0.4.2 // indirect
@ -170,5 +178,8 @@ replace (
google.golang.org/grpc => google.golang.org/grpc v1.27.1
)
// Removes etcd dependency
replace github.com/rexray/gocsi => github.com/dperny/gocsi v1.2.3-pre
// autogen/winresources/dockerd is generated a build time, this replacement is only for the purpose of `go mod vendor`
replace github.com/docker/docker/autogen/winresources/dockerd => ./hack/make/.resources-windows

View file

@ -66,9 +66,14 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko
github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91 h1:vX+gnvBc56EbWYrmlhYbFYRaeikAke1GL84N4BEYOFE=
github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91/go.mod h1:cDLGBht23g0XQdLjzn6xOGXDkLK182YfINAaZEQLCHQ=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/akutz/gosync v0.1.0 h1:naxPT/aDYDh79PMwM3XmencmNQeYmpNFSZy4ZE9zIW0=
github.com/akutz/gosync v0.1.0/go.mod h1:I8I4aiqJI1nqaeYOOB1WS+CgRJVVPqhct9Y4njywM84=
github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A=
github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20150106224455-eb0af217e5e9 h1:j0r1R47jEcPk5M3GY3tFbv7q5J6j0Ppler3q4Guh6C0=
github.com/armon/go-metrics v0.0.0-20150106224455-eb0af217e5e9/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@ -78,6 +83,7 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.31.6 h1:nKjQbpXhdImctBh1e0iLg9iQW/X297LPPuY/9f92R2k=
github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
@ -98,6 +104,9 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0Bsq
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@ -117,7 +126,16 @@ github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2u
github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5 h1:PqZ3bA4yzwywivzk7PBQWngJp2/PAS0bWRZerKteicY=
github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo=
github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s=
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
@ -222,20 +240,16 @@ github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.27+incompatible h1:QIudLb9KeBsE5zyYxd1mjzRSkzLg9Wf9QlRwFgd6oTA=
github.com/coreos/etcd v3.3.27+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw=
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
@ -289,10 +303,13 @@ github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNE
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 h1:k8TfKGeAcDQFFQOGCQMRN04N4a9YrPlRMMKnzAuvM9Q=
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docker/swarmkit v1.12.1-0.20210726173615-3629f50980f6 h1:mFQcXSzzNXVKAnl0KltjSQ7rbgipTYcXJns4sucurKA=
github.com/docker/swarmkit v1.12.1-0.20210726173615-3629f50980f6/go.mod h1:n3Z4lIEl7g261ptkGDBcYi/3qBMDl9csaAhwi2MPejs=
github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0 h1:YehAv2BPLfTm58HW04wRnNy8Oo/CAzWji7mjJ6UJWgM=
github.com/docker/swarmkit v1.12.1-0.20220307221335-616e8db4c3b0/go.mod h1:n3Z4lIEl7g261ptkGDBcYi/3qBMDl9csaAhwi2MPejs=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dperny/gocsi v1.2.3-pre h1:GRTvl8G6yEXYPyul1h6YAqtyxzUHTrQHo6G3xZpb9oM=
github.com/dperny/gocsi v1.2.3-pre/go.mod h1:qQw5mIunz1RqMUfZcGJ9/Lt9EDaL0N3wPNYxFTuyLQo=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
@ -306,6 +323,7 @@ github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e/go.mod h1:2H9hjfb
github.com/fluent/fluent-logger-golang v1.9.0 h1:zUdY44CHX2oIUc7VTNZc+4m+ORuO/mldQDA7czhWXEg=
github.com/fluent/fluent-logger-golang v1.9.0/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@ -314,6 +332,8 @@ github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWp
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
@ -423,6 +443,7 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok=
@ -475,10 +496,12 @@ github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
@ -580,6 +603,7 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@ -590,6 +614,7 @@ github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
@ -665,6 +690,7 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@ -679,6 +705,7 @@ github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@ -689,9 +716,11 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
@ -699,6 +728,7 @@ github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@ -726,10 +756,12 @@ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/thecodeteam/gosync v0.1.0/go.mod h1:43QHsngcnWc8GE1aCmi7PEypslflHjCzXFleuWKEb00=
github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf/go.mod h1:lJAxK//iyZ3yGbQswdrPTxugZIDM7sd4bEsD0x3XMHk=
github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274 h1:wbyZxD6IPFp0sl5uscMOJRsz5UKGFiNiD16e+MVfKZY=
github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274/go.mod h1:oPAfvw32vlUJSjyDcQ3Bu0nb2ON2B+G0dtVN/SZNJiA=
@ -776,16 +808,46 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A=
go.etcd.io/etcd/client/pkg/v3 v3.5.2 h1:4hzqQ6hIb3blLyQ8usCU4h3NghkqcsohEQ3o3VetYxE=
go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI=
go.etcd.io/etcd/client/v3 v3.5.2/go.mod h1:kOOaWFFgHygyT0WlSmL8TJiXmMysO/nNUlEsSsN6W4o=
go.etcd.io/etcd/pkg/v3 v3.5.2 h1:YZUojdoPhOyl5QILYnR8LTUbbNefu/sV4ma+ZMr2tto=
go.etcd.io/etcd/pkg/v3 v3.5.2/go.mod h1:zsXz+9D/kijzRiG/UnFGDTyHKcVp0orwiO8iMLAi+k0=
go.etcd.io/etcd/raft/v3 v3.5.2 h1:uCC37qOXqBvKqTGHGyhASsaCsnTuJugl1GvneJNwHWo=
go.etcd.io/etcd/raft/v3 v3.5.2/go.mod h1:G6pCP1sFgbjod7/KnEHY0vHUViqxjkdt6AiKsD0GRr8=
go.etcd.io/etcd/server/v3 v3.5.2 h1:B6ytJvS4Fmt8nkjzS2/8POf4tuPhFMluE0lWd4dx/7U=
go.etcd.io/etcd/server/v3 v3.5.2/go.mod h1:mlG8znIEz4N/28GABrohZCBM11FqgGVQcpbcyJgh0j0=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -847,6 +909,7 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
@ -874,6 +937,7 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -956,6 +1020,7 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M=
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -972,6 +1037,7 @@ golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@ -1035,14 +1101,16 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=

View file

@ -1,4 +1,4 @@
Apache License
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@ -199,4 +199,3 @@ Apache License
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load diff

View file

@ -1,5 +0,0 @@
CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

View file

@ -1,284 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raft
import "fmt"
const (
ProgressStateProbe ProgressStateType = iota
ProgressStateReplicate
ProgressStateSnapshot
)
type ProgressStateType uint64
var prstmap = [...]string{
"ProgressStateProbe",
"ProgressStateReplicate",
"ProgressStateSnapshot",
}
func (st ProgressStateType) String() string { return prstmap[uint64(st)] }
// Progress represents a followers progress in the view of the leader. Leader maintains
// progresses of all followers, and sends entries to the follower based on its progress.
type Progress struct {
Match, Next uint64
// State defines how the leader should interact with the follower.
//
// When in ProgressStateProbe, leader sends at most one replication message
// per heartbeat interval. It also probes actual progress of the follower.
//
// When in ProgressStateReplicate, leader optimistically increases next
// to the latest entry sent after sending replication message. This is
// an optimized state for fast replicating log entries to the follower.
//
// When in ProgressStateSnapshot, leader should have sent out snapshot
// before and stops sending any replication message.
State ProgressStateType
// Paused is used in ProgressStateProbe.
// When Paused is true, raft should pause sending replication message to this peer.
Paused bool
// PendingSnapshot is used in ProgressStateSnapshot.
// If there is a pending snapshot, the pendingSnapshot will be set to the
// index of the snapshot. If pendingSnapshot is set, the replication process of
// this Progress will be paused. raft will not resend snapshot until the pending one
// is reported to be failed.
PendingSnapshot uint64
// RecentActive is true if the progress is recently active. Receiving any messages
// from the corresponding follower indicates the progress is active.
// RecentActive can be reset to false after an election timeout.
RecentActive bool
// inflights is a sliding window for the inflight messages.
// Each inflight message contains one or more log entries.
// The max number of entries per message is defined in raft config as MaxSizePerMsg.
// Thus inflight effectively limits both the number of inflight messages
// and the bandwidth each Progress can use.
// When inflights is full, no more message should be sent.
// When a leader sends out a message, the index of the last
// entry should be added to inflights. The index MUST be added
// into inflights in order.
// When a leader receives a reply, the previous inflights should
// be freed by calling inflights.freeTo with the index of the last
// received entry.
ins *inflights
// IsLearner is true if this progress is tracked for a learner.
IsLearner bool
}
func (pr *Progress) resetState(state ProgressStateType) {
pr.Paused = false
pr.PendingSnapshot = 0
pr.State = state
pr.ins.reset()
}
func (pr *Progress) becomeProbe() {
// If the original state is ProgressStateSnapshot, progress knows that
// the pending snapshot has been sent to this peer successfully, then
// probes from pendingSnapshot + 1.
if pr.State == ProgressStateSnapshot {
pendingSnapshot := pr.PendingSnapshot
pr.resetState(ProgressStateProbe)
pr.Next = max(pr.Match+1, pendingSnapshot+1)
} else {
pr.resetState(ProgressStateProbe)
pr.Next = pr.Match + 1
}
}
func (pr *Progress) becomeReplicate() {
pr.resetState(ProgressStateReplicate)
pr.Next = pr.Match + 1
}
func (pr *Progress) becomeSnapshot(snapshoti uint64) {
pr.resetState(ProgressStateSnapshot)
pr.PendingSnapshot = snapshoti
}
// maybeUpdate returns false if the given n index comes from an outdated message.
// Otherwise it updates the progress and returns true.
func (pr *Progress) maybeUpdate(n uint64) bool {
var updated bool
if pr.Match < n {
pr.Match = n
updated = true
pr.resume()
}
if pr.Next < n+1 {
pr.Next = n + 1
}
return updated
}
func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 }
// maybeDecrTo returns false if the given to index comes from an out of order message.
// Otherwise it decreases the progress next index to min(rejected, last) and returns true.
func (pr *Progress) maybeDecrTo(rejected, last uint64) bool {
if pr.State == ProgressStateReplicate {
// the rejection must be stale if the progress has matched and "rejected"
// is smaller than "match".
if rejected <= pr.Match {
return false
}
// directly decrease next to match + 1
pr.Next = pr.Match + 1
return true
}
// the rejection must be stale if "rejected" does not match next - 1
if pr.Next-1 != rejected {
return false
}
if pr.Next = min(rejected, last+1); pr.Next < 1 {
pr.Next = 1
}
pr.resume()
return true
}
func (pr *Progress) pause() { pr.Paused = true }
func (pr *Progress) resume() { pr.Paused = false }
// IsPaused returns whether sending log entries to this node has been
// paused. A node may be paused because it has rejected recent
// MsgApps, is currently waiting for a snapshot, or has reached the
// MaxInflightMsgs limit.
func (pr *Progress) IsPaused() bool {
switch pr.State {
case ProgressStateProbe:
return pr.Paused
case ProgressStateReplicate:
return pr.ins.full()
case ProgressStateSnapshot:
return true
default:
panic("unexpected state")
}
}
func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 }
// needSnapshotAbort returns true if snapshot progress's Match
// is equal or higher than the pendingSnapshot.
func (pr *Progress) needSnapshotAbort() bool {
return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot
}
func (pr *Progress) String() string {
return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot)
}
type inflights struct {
// the starting index in the buffer
start int
// number of inflights in the buffer
count int
// the size of the buffer
size int
// buffer contains the index of the last entry
// inside one message.
buffer []uint64
}
func newInflights(size int) *inflights {
return &inflights{
size: size,
}
}
// add adds an inflight into inflights
func (in *inflights) add(inflight uint64) {
if in.full() {
panic("cannot add into a full inflights")
}
next := in.start + in.count
size := in.size
if next >= size {
next -= size
}
if next >= len(in.buffer) {
in.growBuf()
}
in.buffer[next] = inflight
in.count++
}
// grow the inflight buffer by doubling up to inflights.size. We grow on demand
// instead of preallocating to inflights.size to handle systems which have
// thousands of Raft groups per process.
func (in *inflights) growBuf() {
newSize := len(in.buffer) * 2
if newSize == 0 {
newSize = 1
} else if newSize > in.size {
newSize = in.size
}
newBuffer := make([]uint64, newSize)
copy(newBuffer, in.buffer)
in.buffer = newBuffer
}
// freeTo frees the inflights smaller or equal to the given `to` flight.
func (in *inflights) freeTo(to uint64) {
if in.count == 0 || to < in.buffer[in.start] {
// out of the left side of the window
return
}
idx := in.start
var i int
for i = 0; i < in.count; i++ {
if to < in.buffer[idx] { // found the first large inflight
break
}
// increase index and maybe rotate
size := in.size
if idx++; idx >= size {
idx -= size
}
}
// free i inflights and set new start index
in.count -= i
in.start = idx
if in.count == 0 {
// inflights is empty, reset the start index so that we don't grow the
// buffer unnecessarily.
in.start = 0
}
}
func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }
// full returns true if the inflights is full.
func (in *inflights) full() bool {
return in.count == in.size
}
// resets frees all inflights.
func (in *inflights) reset() {
in.count = 0
in.start = 0
}

File diff suppressed because it is too large Load diff

View file

@ -1,95 +0,0 @@
syntax = "proto2";
package raftpb;
import "gogoproto/gogo.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.goproto_getters_all) = false;
option (gogoproto.goproto_enum_prefix_all) = false;
enum EntryType {
EntryNormal = 0;
EntryConfChange = 1;
}
message Entry {
optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
optional EntryType Type = 1 [(gogoproto.nullable) = false];
optional bytes Data = 4;
}
message SnapshotMetadata {
optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
optional uint64 index = 2 [(gogoproto.nullable) = false];
optional uint64 term = 3 [(gogoproto.nullable) = false];
}
message Snapshot {
optional bytes data = 1;
optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
}
enum MessageType {
MsgHup = 0;
MsgBeat = 1;
MsgProp = 2;
MsgApp = 3;
MsgAppResp = 4;
MsgVote = 5;
MsgVoteResp = 6;
MsgSnap = 7;
MsgHeartbeat = 8;
MsgHeartbeatResp = 9;
MsgUnreachable = 10;
MsgSnapStatus = 11;
MsgCheckQuorum = 12;
MsgTransferLeader = 13;
MsgTimeoutNow = 14;
MsgReadIndex = 15;
MsgReadIndexResp = 16;
MsgPreVote = 17;
MsgPreVoteResp = 18;
}
message Message {
optional MessageType type = 1 [(gogoproto.nullable) = false];
optional uint64 to = 2 [(gogoproto.nullable) = false];
optional uint64 from = 3 [(gogoproto.nullable) = false];
optional uint64 term = 4 [(gogoproto.nullable) = false];
optional uint64 logTerm = 5 [(gogoproto.nullable) = false];
optional uint64 index = 6 [(gogoproto.nullable) = false];
repeated Entry entries = 7 [(gogoproto.nullable) = false];
optional uint64 commit = 8 [(gogoproto.nullable) = false];
optional Snapshot snapshot = 9 [(gogoproto.nullable) = false];
optional bool reject = 10 [(gogoproto.nullable) = false];
optional uint64 rejectHint = 11 [(gogoproto.nullable) = false];
optional bytes context = 12;
}
message HardState {
optional uint64 term = 1 [(gogoproto.nullable) = false];
optional uint64 vote = 2 [(gogoproto.nullable) = false];
optional uint64 commit = 3 [(gogoproto.nullable) = false];
}
message ConfState {
repeated uint64 nodes = 1;
repeated uint64 learners = 2;
}
enum ConfChangeType {
ConfChangeAddNode = 0;
ConfChangeRemoveNode = 1;
ConfChangeUpdateNode = 2;
ConfChangeAddLearnerNode = 3;
}
message ConfChange {
optional uint64 ID = 1 [(gogoproto.nullable) = false];
optional ConfChangeType Type = 2 [(gogoproto.nullable) = false];
optional uint64 NodeID = 3 [(gogoproto.nullable) = false];
optional bytes Context = 4;
}

View file

@ -1,129 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raft
import (
"bytes"
"fmt"
pb "github.com/coreos/etcd/raft/raftpb"
)
func (st StateType) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("%q", st.String())), nil
}
// uint64Slice implements sort interface
type uint64Slice []uint64
func (p uint64Slice) Len() int { return len(p) }
func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func min(a, b uint64) uint64 {
if a > b {
return b
}
return a
}
func max(a, b uint64) uint64 {
if a > b {
return a
}
return b
}
func IsLocalMsg(msgt pb.MessageType) bool {
return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable ||
msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum
}
func IsResponseMsg(msgt pb.MessageType) bool {
return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp
}
// voteResponseType maps vote and prevote message types to their corresponding responses.
func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
switch msgt {
case pb.MsgVote:
return pb.MsgVoteResp
case pb.MsgPreVote:
return pb.MsgPreVoteResp
default:
panic(fmt.Sprintf("not a vote message: %s", msgt))
}
}
// EntryFormatter can be implemented by the application to provide human-readable formatting
// of entry data. Nil is a valid EntryFormatter and will use a default format.
type EntryFormatter func([]byte) string
// DescribeMessage returns a concise human-readable description of a
// Message for debugging.
func DescribeMessage(m pb.Message, f EntryFormatter) string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
if m.Reject {
fmt.Fprintf(&buf, " Rejected")
if m.RejectHint != 0 {
fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint)
}
}
if m.Commit != 0 {
fmt.Fprintf(&buf, " Commit:%d", m.Commit)
}
if len(m.Entries) > 0 {
fmt.Fprintf(&buf, " Entries:[")
for i, e := range m.Entries {
if i != 0 {
buf.WriteString(", ")
}
buf.WriteString(DescribeEntry(e, f))
}
fmt.Fprintf(&buf, "]")
}
if !IsEmptySnap(m.Snapshot) {
fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot)
}
return buf.String()
}
// DescribeEntry returns a concise human-readable description of an
// Entry for debugging.
func DescribeEntry(e pb.Entry, f EntryFormatter) string {
var formatted string
if e.Type == pb.EntryNormal && f != nil {
formatted = f(e.Data)
} else {
formatted = fmt.Sprintf("%q", e.Data)
}
return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted)
}
func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
if len(ents) == 0 {
return ents
}
size := ents[0].Size()
var limit int
for limit = 1; limit < len(ents); limit++ {
size += ents[limit].Size()
if uint64(size) > maxSize {
break
}
}
return ents[:limit]
}

View file

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,5 +0,0 @@
CoreOS Project
Copyright 2018 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

View file

@ -1,182 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package journal provides write bindings to the local systemd journal.
// It is implemented in pure Go and connects to the journal directly over its
// unix socket.
//
// To read from the journal, see the "sdjournal" package, which wraps the
// sd-journal a C API.
//
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
package journal
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strconv"
"strings"
"syscall"
)
// Priority of a journal message
type Priority int
const (
PriEmerg Priority = iota
PriAlert
PriCrit
PriErr
PriWarning
PriNotice
PriInfo
PriDebug
)
var conn net.Conn
func init() {
var err error
conn, err = net.Dial("unixgram", "/run/systemd/journal/socket")
if err != nil {
conn = nil
}
}
// Enabled returns true if the local systemd journal is available for logging
func Enabled() bool {
return conn != nil
}
// Send a message to the local systemd journal. vars is a map of journald
// fields to values. Fields must be composed of uppercase letters, numbers,
// and underscores, but must not start with an underscore. Within these
// restrictions, any arbitrary field name may be used. Some names have special
// significance: see the journalctl documentation
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
// for more details. vars may be nil.
func Send(message string, priority Priority, vars map[string]string) error {
if conn == nil {
return journalError("could not connect to journald socket")
}
data := new(bytes.Buffer)
appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
appendVariable(data, "MESSAGE", message)
for k, v := range vars {
appendVariable(data, k, v)
}
_, err := io.Copy(conn, data)
if err != nil && isSocketSpaceError(err) {
file, err := tempFd()
if err != nil {
return journalError(err.Error())
}
defer file.Close()
_, err = io.Copy(file, data)
if err != nil {
return journalError(err.Error())
}
rights := syscall.UnixRights(int(file.Fd()))
/* this connection should always be a UnixConn, but better safe than sorry */
unixConn, ok := conn.(*net.UnixConn)
if !ok {
return journalError("can't send file through non-Unix connection")
}
_, _, err = unixConn.WriteMsgUnix([]byte{}, rights, nil)
if err != nil {
return journalError(err.Error())
}
} else if err != nil {
return journalError(err.Error())
}
return nil
}
// Print prints a message to the local systemd journal using Send().
func Print(priority Priority, format string, a ...interface{}) error {
return Send(fmt.Sprintf(format, a...), priority, nil)
}
func appendVariable(w io.Writer, name, value string) {
if !validVarName(name) {
journalError("variable name contains invalid character, ignoring")
}
if strings.ContainsRune(value, '\n') {
/* When the value contains a newline, we write:
* - the variable name, followed by a newline
* - the size (in 64bit little endian format)
* - the data, followed by a newline
*/
fmt.Fprintln(w, name)
binary.Write(w, binary.LittleEndian, uint64(len(value)))
fmt.Fprintln(w, value)
} else {
/* just write the variable and value all on one line */
fmt.Fprintf(w, "%s=%s\n", name, value)
}
}
func validVarName(name string) bool {
/* The variable name must be in uppercase and consist only of characters,
* numbers and underscores, and may not begin with an underscore. (from the docs)
*/
valid := name[0] != '_'
for _, c := range name {
valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_'
}
return valid
}
func isSocketSpaceError(err error) bool {
opErr, ok := err.(*net.OpError)
if !ok {
return false
}
sysErr, ok := opErr.Err.(syscall.Errno)
if !ok {
return false
}
return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS
}
func tempFd() (*os.File, error) {
file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
if err != nil {
return nil, err
}
err = syscall.Unlink(file.Name())
if err != nil {
return nil, err
}
return file, nil
}
func journalError(s string) error {
s = "journal error: " + s
fmt.Fprintln(os.Stderr, s)
return errors.New(s)
}

View file

@ -1,5 +0,0 @@
CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

View file

@ -1,39 +0,0 @@
# capnslog, the CoreOS logging package
There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
### Design Principles
##### `package main` is the place where logging gets turned on and routed
A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
##### All log options are runtime-configurable.
Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly.
##### There is one log object per package. It is registered under its repository and package name.
`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application.
##### Log objects are an interface
An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
##### Log levels have specific meanings:
* Critical: Unrecoverable. Must fail.
* Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
* Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
* Notice: Normal, but important (uncommon) log information.
* Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
* Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
* Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.

View file

@ -1,157 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capnslog
import (
"bufio"
"fmt"
"io"
"log"
"runtime"
"strings"
"time"
)
type Formatter interface {
Format(pkg string, level LogLevel, depth int, entries ...interface{})
Flush()
}
func NewStringFormatter(w io.Writer) Formatter {
return &StringFormatter{
w: bufio.NewWriter(w),
}
}
type StringFormatter struct {
w *bufio.Writer
}
func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
now := time.Now().UTC()
s.w.WriteString(now.Format(time.RFC3339))
s.w.WriteByte(' ')
writeEntries(s.w, pkg, l, i, entries...)
s.Flush()
}
func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
if pkg != "" {
w.WriteString(pkg + ": ")
}
str := fmt.Sprint(entries...)
endsInNL := strings.HasSuffix(str, "\n")
w.WriteString(str)
if !endsInNL {
w.WriteString("\n")
}
}
func (s *StringFormatter) Flush() {
s.w.Flush()
}
func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
return &PrettyFormatter{
w: bufio.NewWriter(w),
debug: debug,
}
}
type PrettyFormatter struct {
w *bufio.Writer
debug bool
}
func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
now := time.Now()
ts := now.Format("2006-01-02 15:04:05")
c.w.WriteString(ts)
ms := now.Nanosecond() / 1000
c.w.WriteString(fmt.Sprintf(".%06d", ms))
if c.debug {
_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
if !ok {
file = "???"
line = 1
} else {
slash := strings.LastIndex(file, "/")
if slash >= 0 {
file = file[slash+1:]
}
}
if line < 0 {
line = 0 // not a real line number
}
c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
}
c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
writeEntries(c.w, pkg, l, depth, entries...)
c.Flush()
}
func (c *PrettyFormatter) Flush() {
c.w.Flush()
}
// LogFormatter emulates the form of the traditional built-in logger.
type LogFormatter struct {
logger *log.Logger
prefix string
}
// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the
// golang log package to actually do the logging work so that logs look similar.
func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter {
return &LogFormatter{
logger: log.New(w, "", flag), // don't use prefix here
prefix: prefix, // save it instead
}
}
// Format builds a log message for the LogFormatter. The LogLevel is ignored.
func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) {
str := fmt.Sprint(entries...)
prefix := lf.prefix
if pkg != "" {
prefix = fmt.Sprintf("%s%s: ", prefix, pkg)
}
lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5
}
// Flush is included so that the interface is complete, but is a no-op.
func (lf *LogFormatter) Flush() {
// noop
}
// NilFormatter is a no-op log formatter that does nothing.
type NilFormatter struct {
}
// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no
// messages so that you can cause part of your logging to be silent.
func NewNilFormatter() Formatter {
return &NilFormatter{}
}
// Format does nothing.
func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) {
// noop
}
// Flush is included so that the interface is complete, but is a no-op.
func (_ *NilFormatter) Flush() {
// noop
}

View file

@ -1,96 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capnslog
import (
"bufio"
"bytes"
"io"
"os"
"runtime"
"strconv"
"strings"
"time"
)
var pid = os.Getpid()
type GlogFormatter struct {
StringFormatter
}
func NewGlogFormatter(w io.Writer) *GlogFormatter {
g := &GlogFormatter{}
g.w = bufio.NewWriter(w)
return g
}
func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
g.w.Write(GlogHeader(level, depth+1))
g.StringFormatter.Format(pkg, level, depth+1, entries...)
}
func GlogHeader(level LogLevel, depth int) []byte {
// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
now := time.Now().UTC()
_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
if !ok {
file = "???"
line = 1
} else {
slash := strings.LastIndex(file, "/")
if slash >= 0 {
file = file[slash+1:]
}
}
if line < 0 {
line = 0 // not a real line number
}
buf := &bytes.Buffer{}
buf.Grow(30)
_, month, day := now.Date()
hour, minute, second := now.Clock()
buf.WriteString(level.Char())
twoDigits(buf, int(month))
twoDigits(buf, day)
buf.WriteByte(' ')
twoDigits(buf, hour)
buf.WriteByte(':')
twoDigits(buf, minute)
buf.WriteByte(':')
twoDigits(buf, second)
buf.WriteByte('.')
buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
buf.WriteByte('Z')
buf.WriteByte(' ')
buf.WriteString(strconv.Itoa(pid))
buf.WriteByte(' ')
buf.WriteString(file)
buf.WriteByte(':')
buf.WriteString(strconv.Itoa(line))
buf.WriteByte(']')
buf.WriteByte(' ')
return buf.Bytes()
}
const digits = "0123456789"
func twoDigits(b *bytes.Buffer, d int) {
c2 := digits[d%10]
d /= 10
c1 := digits[d%10]
b.WriteByte(c1)
b.WriteByte(c2)
}

View file

@ -1,49 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !windows
package capnslog
import (
"io"
"os"
"syscall"
)
// Here's where the opinionation comes in. We need some sensible defaults,
// especially after taking over the log package. Your project (whatever it may
// be) may see things differently. That's okay; there should be no defaults in
// the main package that cannot be controlled or overridden programatically,
// otherwise it's a bug. Doing so is creating your own init_log.go file much
// like this one.
func init() {
initHijack()
// Go `log` pacakge uses os.Stderr.
SetFormatter(NewDefaultFormatter(os.Stderr))
SetGlobalLogLevel(INFO)
}
func NewDefaultFormatter(out io.Writer) Formatter {
if syscall.Getppid() == 1 {
// We're running under init, which may be systemd.
f, err := NewJournaldFormatter()
if err == nil {
return f
}
}
return NewPrettyFormatter(out, false)
}

View file

@ -1,68 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !windows
package capnslog
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/coreos/go-systemd/journal"
)
func NewJournaldFormatter() (Formatter, error) {
if !journal.Enabled() {
return nil, errors.New("No systemd detected")
}
return &journaldFormatter{}, nil
}
type journaldFormatter struct{}
func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
var pri journal.Priority
switch l {
case CRITICAL:
pri = journal.PriCrit
case ERROR:
pri = journal.PriErr
case WARNING:
pri = journal.PriWarning
case NOTICE:
pri = journal.PriNotice
case INFO:
pri = journal.PriInfo
case DEBUG:
pri = journal.PriDebug
case TRACE:
pri = journal.PriDebug
default:
panic("Unhandled loglevel")
}
msg := fmt.Sprint(entries...)
tags := map[string]string{
"PACKAGE": pkg,
"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
}
err := journal.Send(msg, pri, tags)
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
}
func (j *journaldFormatter) Flush() {}

View file

@ -1,39 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capnslog
import (
"log"
)
func initHijack() {
pkg := NewPackageLogger("log", "")
w := packageWriter{pkg}
log.SetFlags(0)
log.SetPrefix("")
log.SetOutput(w)
}
type packageWriter struct {
pl *PackageLogger
}
func (p packageWriter) Write(b []byte) (int, error) {
if p.pl.level < INFO {
return 0, nil
}
p.pl.internalLog(calldepth+2, INFO, string(b))
return len(b), nil
}

View file

@ -1,245 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capnslog
import (
"errors"
"strings"
"sync"
)
// LogLevel is the set of all log levels.
type LogLevel int8
const (
// CRITICAL is the lowest log level; only errors which will end the program will be propagated.
CRITICAL LogLevel = iota - 1
// ERROR is for errors that are not fatal but lead to troubling behavior.
ERROR
// WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
WARNING
// NOTICE is for normal but significant conditions.
NOTICE
// INFO is a log level for common, everyday log updates.
INFO
// DEBUG is the default hidden level for more verbose updates about internal processes.
DEBUG
// TRACE is for (potentially) call by call tracing of programs.
TRACE
)
// Char returns a single-character representation of the log level.
func (l LogLevel) Char() string {
switch l {
case CRITICAL:
return "C"
case ERROR:
return "E"
case WARNING:
return "W"
case NOTICE:
return "N"
case INFO:
return "I"
case DEBUG:
return "D"
case TRACE:
return "T"
default:
panic("Unhandled loglevel")
}
}
// String returns a multi-character representation of the log level.
func (l LogLevel) String() string {
switch l {
case CRITICAL:
return "CRITICAL"
case ERROR:
return "ERROR"
case WARNING:
return "WARNING"
case NOTICE:
return "NOTICE"
case INFO:
return "INFO"
case DEBUG:
return "DEBUG"
case TRACE:
return "TRACE"
default:
panic("Unhandled loglevel")
}
}
// Update using the given string value. Fulfills the flag.Value interface.
func (l *LogLevel) Set(s string) error {
value, err := ParseLevel(s)
if err != nil {
return err
}
*l = value
return nil
}
// Returns an empty string, only here to fulfill the pflag.Value interface.
func (l *LogLevel) Type() string {
return ""
}
// ParseLevel translates some potential loglevel strings into their corresponding levels.
func ParseLevel(s string) (LogLevel, error) {
switch s {
case "CRITICAL", "C":
return CRITICAL, nil
case "ERROR", "0", "E":
return ERROR, nil
case "WARNING", "1", "W":
return WARNING, nil
case "NOTICE", "2", "N":
return NOTICE, nil
case "INFO", "3", "I":
return INFO, nil
case "DEBUG", "4", "D":
return DEBUG, nil
case "TRACE", "5", "T":
return TRACE, nil
}
return CRITICAL, errors.New("couldn't parse log level " + s)
}
type RepoLogger map[string]*PackageLogger
type loggerStruct struct {
sync.Mutex
repoMap map[string]RepoLogger
formatter Formatter
}
// logger is the global logger
var logger = new(loggerStruct)
// SetGlobalLogLevel sets the log level for all packages in all repositories
// registered with capnslog.
func SetGlobalLogLevel(l LogLevel) {
logger.Lock()
defer logger.Unlock()
for _, r := range logger.repoMap {
r.setRepoLogLevelInternal(l)
}
}
// GetRepoLogger may return the handle to the repository's set of packages' loggers.
func GetRepoLogger(repo string) (RepoLogger, error) {
logger.Lock()
defer logger.Unlock()
r, ok := logger.repoMap[repo]
if !ok {
return nil, errors.New("no packages registered for repo " + repo)
}
return r, nil
}
// MustRepoLogger returns the handle to the repository's packages' loggers.
func MustRepoLogger(repo string) RepoLogger {
r, err := GetRepoLogger(repo)
if err != nil {
panic(err)
}
return r
}
// SetRepoLogLevel sets the log level for all packages in the repository.
func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
logger.Lock()
defer logger.Unlock()
r.setRepoLogLevelInternal(l)
}
func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
for _, v := range r {
v.level = l
}
}
// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
// order, and returns a map of the results, for use in SetLogLevel.
func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
setlist := strings.Split(conf, ",")
out := make(map[string]LogLevel)
for _, setstring := range setlist {
setting := strings.Split(setstring, "=")
if len(setting) != 2 {
return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
}
l, err := ParseLevel(setting[1])
if err != nil {
return nil, err
}
out[setting[0]] = l
}
return out, nil
}
// SetLogLevel takes a map of package names within a repository to their desired
// loglevel, and sets the levels appropriately. Unknown packages are ignored.
// "*" is a special package name that corresponds to all packages, and will be
// processed first.
func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
logger.Lock()
defer logger.Unlock()
if l, ok := m["*"]; ok {
r.setRepoLogLevelInternal(l)
}
for k, v := range m {
l, ok := r[k]
if !ok {
continue
}
l.level = v
}
}
// SetFormatter sets the formatting function for all logs.
func SetFormatter(f Formatter) {
logger.Lock()
defer logger.Unlock()
logger.formatter = f
}
// NewPackageLogger creates a package logger object.
// This should be defined as a global var in your package, referencing your repo.
func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
logger.Lock()
defer logger.Unlock()
if logger.repoMap == nil {
logger.repoMap = make(map[string]RepoLogger)
}
r, rok := logger.repoMap[repo]
if !rok {
logger.repoMap[repo] = make(RepoLogger)
r = logger.repoMap[repo]
}
p, pok := r[pkg]
if !pok {
r[pkg] = &PackageLogger{
pkg: pkg,
level: INFO,
}
p = r[pkg]
}
return
}

View file

@ -1,191 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package capnslog
import (
"fmt"
"os"
)
type PackageLogger struct {
pkg string
level LogLevel
}
const calldepth = 2
func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
logger.Lock()
defer logger.Unlock()
if inLevel != CRITICAL && p.level < inLevel {
return
}
if logger.formatter != nil {
logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
}
}
// SetLevel allows users to change the current logging level.
func (p *PackageLogger) SetLevel(l LogLevel) {
logger.Lock()
defer logger.Unlock()
p.level = l
}
// LevelAt checks if the given log level will be outputted under current setting.
func (p *PackageLogger) LevelAt(l LogLevel) bool {
logger.Lock()
defer logger.Unlock()
return p.level >= l
}
// Log a formatted string at any level between ERROR and TRACE
func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
}
// Log a message at any level between ERROR and TRACE
func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
p.internalLog(calldepth, l, fmt.Sprint(args...))
}
// log stdlib compatibility
func (p *PackageLogger) Println(args ...interface{}) {
p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
}
func (p *PackageLogger) Printf(format string, args ...interface{}) {
p.Logf(INFO, format, args...)
}
func (p *PackageLogger) Print(args ...interface{}) {
p.internalLog(calldepth, INFO, fmt.Sprint(args...))
}
// Panic and fatal
func (p *PackageLogger) Panicf(format string, args ...interface{}) {
s := fmt.Sprintf(format, args...)
p.internalLog(calldepth, CRITICAL, s)
panic(s)
}
func (p *PackageLogger) Panic(args ...interface{}) {
s := fmt.Sprint(args...)
p.internalLog(calldepth, CRITICAL, s)
panic(s)
}
func (p *PackageLogger) Panicln(args ...interface{}) {
s := fmt.Sprintln(args...)
p.internalLog(calldepth, CRITICAL, s)
panic(s)
}
func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
p.Logf(CRITICAL, format, args...)
os.Exit(1)
}
func (p *PackageLogger) Fatal(args ...interface{}) {
s := fmt.Sprint(args...)
p.internalLog(calldepth, CRITICAL, s)
os.Exit(1)
}
func (p *PackageLogger) Fatalln(args ...interface{}) {
s := fmt.Sprintln(args...)
p.internalLog(calldepth, CRITICAL, s)
os.Exit(1)
}
// Error Functions
func (p *PackageLogger) Errorf(format string, args ...interface{}) {
p.Logf(ERROR, format, args...)
}
func (p *PackageLogger) Error(entries ...interface{}) {
p.internalLog(calldepth, ERROR, entries...)
}
// Warning Functions
func (p *PackageLogger) Warningf(format string, args ...interface{}) {
p.Logf(WARNING, format, args...)
}
func (p *PackageLogger) Warning(entries ...interface{}) {
p.internalLog(calldepth, WARNING, entries...)
}
// Notice Functions
func (p *PackageLogger) Noticef(format string, args ...interface{}) {
p.Logf(NOTICE, format, args...)
}
func (p *PackageLogger) Notice(entries ...interface{}) {
p.internalLog(calldepth, NOTICE, entries...)
}
// Info Functions
func (p *PackageLogger) Infof(format string, args ...interface{}) {
p.Logf(INFO, format, args...)
}
func (p *PackageLogger) Info(entries ...interface{}) {
p.internalLog(calldepth, INFO, entries...)
}
// Debug Functions
func (p *PackageLogger) Debugf(format string, args ...interface{}) {
if p.level < DEBUG {
return
}
p.Logf(DEBUG, format, args...)
}
func (p *PackageLogger) Debug(entries ...interface{}) {
if p.level < DEBUG {
return
}
p.internalLog(calldepth, DEBUG, entries...)
}
// Trace Functions
func (p *PackageLogger) Tracef(format string, args ...interface{}) {
if p.level < TRACE {
return
}
p.Logf(TRACE, format, args...)
}
func (p *PackageLogger) Trace(entries ...interface{}) {
if p.level < TRACE {
return
}
p.internalLog(calldepth, TRACE, entries...)
}
func (p *PackageLogger) Flush() {
logger.Lock()
defer logger.Unlock()
logger.formatter.Flush()
}

View file

@ -1,65 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !windows
package capnslog
import (
"fmt"
"log/syslog"
)
func NewSyslogFormatter(w *syslog.Writer) Formatter {
return &syslogFormatter{w}
}
func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
w, err := syslog.New(syslog.LOG_DEBUG, tag)
if err != nil {
return nil, err
}
return NewSyslogFormatter(w), nil
}
type syslogFormatter struct {
w *syslog.Writer
}
func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
for _, entry := range entries {
str := fmt.Sprint(entry)
switch l {
case CRITICAL:
s.w.Crit(str)
case ERROR:
s.w.Err(str)
case WARNING:
s.w.Warning(str)
case NOTICE:
s.w.Notice(str)
case INFO:
s.w.Info(str)
case DEBUG:
s.w.Debug(str)
case TRACE:
s.w.Debug(str)
default:
panic("Unhandled loglevel")
}
}
}
func (s *syslogFormatter) Flush() {
}

View file

@ -543,6 +543,40 @@ func (a *Agent) UpdateTaskStatus(ctx context.Context, taskID string, status *api
}
}
// ReportVolumeUnpublished sends a Volume status update to the manager
// indicating that the provided volume has been successfully unpublished.
func (a *Agent) ReportVolumeUnpublished(ctx context.Context, volumeID string) error {
l := log.G(ctx).WithField("volume.ID", volumeID)
l.Debug("(*Agent).ReportVolumeUnpublished")
ctx, cancel := context.WithCancel(ctx)
defer cancel()
errs := make(chan error, 1)
if err := a.withSession(ctx, func(session *session) error {
go func() {
err := session.reportVolumeUnpublished(ctx, []string{volumeID})
if err != nil {
l.WithError(err).Error("error reporting volume unpublished")
} else {
l.Debug("reported volume unpublished")
}
errs <- err
}()
return nil
}); err != nil {
return err
}
select {
case err := <-errs:
return err
case <-ctx.Done():
return ctx.Err()
}
}
// Publisher returns a LogPublisher for the given subscription
// as well as a cancel function that should be called when the log stream
// is completed.
@ -597,8 +631,8 @@ func (a *Agent) Publisher(ctx context.Context, subscriptionID string) (exec.LogP
func (a *Agent) nodeDescriptionWithHostname(ctx context.Context, tlsInfo *api.NodeTLSInfo) (*api.NodeDescription, error) {
desc, err := a.config.Executor.Describe(ctx)
// Override hostname and TLS info
if desc != nil {
// Override hostname and TLS info
if a.config.Hostname != "" {
desc.Hostname = a.config.Hostname
}

View file

@ -0,0 +1,120 @@
package plugin
import (
"context"
"fmt"
"sync"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/swarmkit/api"
)
const (
// DockerCSIPluginCap is the capability name of the plugins we use with the
// PluginGetter to get only the plugins we need. The full name of the
// plugin interface is "docker.csinode/1.0". This gets only plugins with
// Node capabilities.
DockerCSIPluginCap = "csinode"
)
// PluginManager manages the multiple CSI plugins that may be in use on the
// node. PluginManager should be thread-safe.
type PluginManager interface {
// Get gets the plugin with the given name
Get(name string) (NodePlugin, error)
// NodeInfo returns the NodeCSIInfo for every active plugin.
NodeInfo(ctx context.Context) ([]*api.NodeCSIInfo, error)
}
type pluginManager struct {
plugins map[string]NodePlugin
pluginsMu sync.Mutex
// newNodePluginFunc usually points to NewNodePlugin. However, for testing,
// NewNodePlugin can be swapped out with a function that creates fake node
// plugins
newNodePluginFunc func(string, plugingetter.CompatPlugin, plugingetter.PluginAddr, SecretGetter) NodePlugin
// secrets is a SecretGetter for use by node plugins.
secrets SecretGetter
pg plugingetter.PluginGetter
}
func NewPluginManager(pg plugingetter.PluginGetter, secrets SecretGetter) PluginManager {
return &pluginManager{
plugins: map[string]NodePlugin{},
newNodePluginFunc: NewNodePlugin,
secrets: secrets,
pg: pg,
}
}
func (pm *pluginManager) Get(name string) (NodePlugin, error) {
pm.pluginsMu.Lock()
defer pm.pluginsMu.Unlock()
plugin, err := pm.getPlugin(name)
if err != nil {
return nil, fmt.Errorf("cannot get plugin %v: %v", name, err)
}
return plugin, nil
}
func (pm *pluginManager) NodeInfo(ctx context.Context) ([]*api.NodeCSIInfo, error) {
// TODO(dperny): do not acquire this lock for the duration of the the
// function call. that's too long and too blocking.
pm.pluginsMu.Lock()
defer pm.pluginsMu.Unlock()
// first, we should make sure all of the plugins are initialized. do this
// by looking up all the current plugins with DockerCSIPluginCap.
plugins := pm.pg.GetAllManagedPluginsByCap(DockerCSIPluginCap)
for _, plugin := range plugins {
// TODO(dperny): use this opportunity to drop plugins that we're
// tracking but which no longer exist.
// we don't actually need the plugin returned, we just need it loaded
// as a side effect.
pm.getPlugin(plugin.Name())
}
nodeInfo := []*api.NodeCSIInfo{}
for _, plugin := range pm.plugins {
info, err := plugin.NodeGetInfo(ctx)
if err != nil {
// skip any plugin that returns an error
continue
}
nodeInfo = append(nodeInfo, info)
}
return nodeInfo, nil
}
// getPlugin looks up the plugin with the specified name. Loads the plugin if
// not yet loaded.
//
// pm.pluginsMu must be obtained before calling this method.
func (pm *pluginManager) getPlugin(name string) (NodePlugin, error) {
if p, ok := pm.plugins[name]; ok {
return p, nil
}
pc, err := pm.pg.Get(name, DockerCSIPluginCap, plugingetter.Lookup)
if err != nil {
return nil, err
}
pa, ok := pc.(plugingetter.PluginAddr)
if !ok {
return nil, fmt.Errorf("plugin does not implement PluginAddr interface")
}
p := pm.newNodePluginFunc(name, pc, pa, pm.secrets)
pm.plugins[name] = p
return p, nil
}

View file

@ -0,0 +1,459 @@
package plugin
import (
"context"
"fmt"
"path/filepath"
"sync"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
)
// SecretGetter is a reimplementation of the exec.SecretGetter interface in the
// scope of the plugin package. This avoids the needing to import exec into the
// plugin package.
type SecretGetter interface {
Get(secretID string) (*api.Secret, error)
}
type NodePlugin interface {
GetPublishedPath(volumeID string) string
NodeGetInfo(ctx context.Context) (*api.NodeCSIInfo, error)
NodeStageVolume(ctx context.Context, req *api.VolumeAssignment) error
NodeUnstageVolume(ctx context.Context, req *api.VolumeAssignment) error
NodePublishVolume(ctx context.Context, req *api.VolumeAssignment) error
NodeUnpublishVolume(ctx context.Context, req *api.VolumeAssignment) error
}
type volumePublishStatus struct {
// stagingPath is staging path of volume
stagingPath string
// isPublished keeps track if the volume is published.
isPublished bool
// publishedPath is published path of volume
publishedPath string
}
type nodePlugin struct {
// name is the name of the plugin, which is used in the Driver.Name field.
name string
// socket is the path of the unix socket to connect to this plugin at
socket string
// scopePath gets the provided path relative to the plugin directory.
scopePath func(s string) string
// secrets is the SecretGetter to get volume secret data
secrets SecretGetter
// volumeMap is the map from volume ID to Volume. Will place a volume once it is staged,
// remove it from the map for unstage.
// TODO: Make this map persistent if the swarm node goes down
volumeMap map[string]*volumePublishStatus
// mu for volumeMap
mu sync.RWMutex
// staging indicates that the plugin has staging capabilities.
staging bool
// cc is the gRPC client connection
cc *grpc.ClientConn
// idClient is the CSI Identity Service client
idClient csi.IdentityClient
// nodeClient is the CSI Node Service client
nodeClient csi.NodeClient
}
const (
// TargetStagePath is the path within the plugin's scope that the volume is
// to be staged. This does not need to be accessible or propagated outside
// of the plugin rootfs.
TargetStagePath string = "/data/staged"
// TargetPublishPath is the path within the plugin's scope that the volume
// is to be published. This needs to be the plugin's PropagatedMount.
TargetPublishPath string = "/data/published"
)
func NewNodePlugin(name string, pc plugingetter.CompatPlugin, pa plugingetter.PluginAddr, secrets SecretGetter) NodePlugin {
return newNodePlugin(name, pc, pa, secrets)
}
// newNodePlugin returns a raw nodePlugin object, not behind an interface. this
// is useful for testing.
func newNodePlugin(name string, pc plugingetter.CompatPlugin, pa plugingetter.PluginAddr, secrets SecretGetter) *nodePlugin {
return &nodePlugin{
name: name,
socket: fmt.Sprintf("%s://%s", pa.Addr().Network(), pa.Addr().String()),
scopePath: pc.ScopedPath,
secrets: secrets,
volumeMap: map[string]*volumePublishStatus{},
}
}
// connect is a private method that sets up the identity client and node
// client from a grpc client. it exists separately so that testing code can
// substitute in fake clients without a grpc connection
func (np *nodePlugin) connect(ctx context.Context) error {
// even though this is a unix socket, we must set WithInsecure or the
// connection will not be allowed.
cc, err := grpc.DialContext(ctx, np.socket, grpc.WithInsecure())
if err != nil {
return err
}
np.cc = cc
// first, probe the plugin, to ensure that it exists and is ready to go
idc := csi.NewIdentityClient(cc)
np.idClient = idc
np.nodeClient = csi.NewNodeClient(cc)
return np.init(ctx)
}
func (np *nodePlugin) Client(ctx context.Context) (csi.NodeClient, error) {
if np.nodeClient == nil {
if err := np.connect(ctx); err != nil {
return nil, err
}
}
return np.nodeClient, nil
}
func (np *nodePlugin) init(ctx context.Context) error {
probe, err := np.idClient.Probe(ctx, &csi.ProbeRequest{})
if err != nil {
return err
}
if probe.Ready != nil && !probe.Ready.Value {
return status.Error(codes.FailedPrecondition, "Plugin is not Ready")
}
c, err := np.Client(ctx)
if err != nil {
return err
}
resp, err := c.NodeGetCapabilities(ctx, &csi.NodeGetCapabilitiesRequest{})
if err != nil {
// TODO(ameyag): handle
return err
}
if resp == nil {
return nil
}
log.G(ctx).Debugf("plugin advertises %d capabilities", len(resp.Capabilities))
for _, c := range resp.Capabilities {
if rpc := c.GetRpc(); rpc != nil {
log.G(ctx).Debugf("plugin has capability %s", rpc)
switch rpc.Type {
case csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME:
np.staging = true
}
}
}
return nil
}
// GetPublishedPath returns the path at which the provided volume ID is
// published. This path is provided in terms of absolute location on the host,
// not the location in the plugins' scope.
//
// Returns an empty string if the volume does not exist.
func (np *nodePlugin) GetPublishedPath(volumeID string) string {
np.mu.RLock()
defer np.mu.RUnlock()
if volInfo, ok := np.volumeMap[volumeID]; ok {
if volInfo.isPublished {
return np.scopePath(volInfo.publishedPath)
}
}
return ""
}
func (np *nodePlugin) NodeGetInfo(ctx context.Context) (*api.NodeCSIInfo, error) {
c, err := np.Client(ctx)
if err != nil {
return nil, err
}
resp, err := c.NodeGetInfo(ctx, &csi.NodeGetInfoRequest{})
if err != nil {
return nil, err
}
i := makeNodeInfo(resp)
i.PluginName = np.name
return i, nil
}
func (np *nodePlugin) NodeStageVolume(ctx context.Context, req *api.VolumeAssignment) error {
np.mu.Lock()
defer np.mu.Unlock()
if !np.staging {
return nil
}
stagingTarget := stagePath(req)
// Check arguments
if len(req.VolumeID) == 0 {
return status.Error(codes.InvalidArgument, "VolumeID missing in request")
}
c, err := np.Client(ctx)
if err != nil {
return err
}
_, err = c.NodeStageVolume(ctx, &csi.NodeStageVolumeRequest{
VolumeId: req.VolumeID,
StagingTargetPath: stagingTarget,
Secrets: np.makeSecrets(req),
VolumeCapability: makeCapability(req.AccessMode),
VolumeContext: req.VolumeContext,
PublishContext: req.PublishContext,
})
if err != nil {
return err
}
v := &volumePublishStatus{
stagingPath: stagingTarget,
}
np.volumeMap[req.ID] = v
log.G(ctx).Infof("volume staged to path %s", stagingTarget)
return nil
}
func (np *nodePlugin) NodeUnstageVolume(ctx context.Context, req *api.VolumeAssignment) error {
np.mu.Lock()
defer np.mu.Unlock()
if !np.staging {
return nil
}
stagingTarget := stagePath(req)
// Check arguments
if len(req.VolumeID) == 0 {
return status.Error(codes.FailedPrecondition, "VolumeID missing in request")
}
c, err := np.Client(ctx)
if err != nil {
return err
}
// we must unpublish before we unstage. verify here that the volume is not
// published.
if v, ok := np.volumeMap[req.ID]; ok {
if v.isPublished {
return status.Errorf(codes.FailedPrecondition, "Volume %s is not unpublished", req.ID)
}
return nil
}
_, err = c.NodeUnstageVolume(ctx, &csi.NodeUnstageVolumeRequest{
VolumeId: req.VolumeID,
StagingTargetPath: stagingTarget,
})
if err != nil {
return err
}
// if the volume doesn't exist in the volumeMap, deleting has no effect.
delete(np.volumeMap, req.ID)
log.G(ctx).Info("volume unstaged")
return nil
}
func (np *nodePlugin) NodePublishVolume(ctx context.Context, req *api.VolumeAssignment) error {
// Check arguments
if len(req.VolumeID) == 0 {
return status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
np.mu.Lock()
defer np.mu.Unlock()
publishTarget := publishPath(req)
// some volumes do not require staging. we can check this by checkign the
// staging variable, or we can just see if there is a staging path in the
// map.
var stagingPath string
if vs, ok := np.volumeMap[req.ID]; ok {
stagingPath = vs.stagingPath
} else {
return status.Error(codes.FailedPrecondition, "volume not staged")
}
c, err := np.Client(ctx)
if err != nil {
return err
}
_, err = c.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{
VolumeId: req.VolumeID,
TargetPath: publishTarget,
StagingTargetPath: stagingPath,
VolumeCapability: makeCapability(req.AccessMode),
Secrets: np.makeSecrets(req),
VolumeContext: req.VolumeContext,
PublishContext: req.PublishContext,
})
if err != nil {
return err
}
status, ok := np.volumeMap[req.ID]
if !ok {
status = &volumePublishStatus{}
np.volumeMap[req.ID] = status
}
status.isPublished = true
status.publishedPath = publishTarget
log.G(ctx).Infof("volume published to path %s", publishTarget)
return nil
}
func (np *nodePlugin) NodeUnpublishVolume(ctx context.Context, req *api.VolumeAssignment) error {
// Check arguments
if len(req.VolumeID) == 0 {
return status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
np.mu.Lock()
defer np.mu.Unlock()
publishTarget := publishPath(req)
c, err := np.Client(ctx)
if err != nil {
return err
}
_, err = c.NodeUnpublishVolume(ctx, &csi.NodeUnpublishVolumeRequest{
VolumeId: req.VolumeID,
TargetPath: publishTarget,
})
if err != nil {
return err
}
if v, ok := np.volumeMap[req.ID]; ok {
v.publishedPath = ""
v.isPublished = false
return nil
}
log.G(ctx).Info("volume unpublished")
return nil
}
func (np *nodePlugin) makeSecrets(v *api.VolumeAssignment) map[string]string {
// this should never happen, but program defensively.
if v == nil {
return nil
}
secrets := make(map[string]string, len(v.Secrets))
for _, secret := range v.Secrets {
// TODO(dperny): handle error from Get
value, _ := np.secrets.Get(secret.Secret)
if value != nil {
secrets[secret.Key] = string(value.Spec.Data)
}
}
return secrets
}
// makeNodeInfo converts a csi.NodeGetInfoResponse object into a swarmkit NodeCSIInfo
// object.
func makeNodeInfo(csiNodeInfo *csi.NodeGetInfoResponse) *api.NodeCSIInfo {
return &api.NodeCSIInfo{
NodeID: csiNodeInfo.NodeId,
MaxVolumesPerNode: csiNodeInfo.MaxVolumesPerNode,
}
}
func makeCapability(am *api.VolumeAccessMode) *csi.VolumeCapability {
var mode csi.VolumeCapability_AccessMode_Mode
switch am.Scope {
case api.VolumeScopeSingleNode:
switch am.Sharing {
case api.VolumeSharingNone, api.VolumeSharingOneWriter, api.VolumeSharingAll:
mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
case api.VolumeSharingReadOnly:
mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY
}
case api.VolumeScopeMultiNode:
switch am.Sharing {
case api.VolumeSharingReadOnly:
mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case api.VolumeSharingOneWriter:
mode = csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER
case api.VolumeSharingAll:
mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
}
}
capability := &csi.VolumeCapability{
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: mode,
},
}
if block := am.GetBlock(); block != nil {
capability.AccessType = &csi.VolumeCapability_Block{
// Block type is empty.
Block: &csi.VolumeCapability_BlockVolume{},
}
}
if mount := am.GetMount(); mount != nil {
capability.AccessType = &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{
FsType: mount.FsType,
MountFlags: mount.MountFlags,
},
}
}
return capability
}
// stagePath returns the staging path for a given volume assignment
func stagePath(v *api.VolumeAssignment) string {
// this really just exists so we use the same trick to determine staging
// path across multiple methods and can't forget to change it in one place
// but not another
return filepath.Join(TargetStagePath, v.ID)
}
// publishPath returns the publishing path for a given volume assignment
func publishPath(v *api.VolumeAssignment) string {
// ditto as stagePath
return filepath.Join(TargetPublishPath, v.ID)
}

227
vendor/github.com/docker/swarmkit/agent/csi/volumes.go generated vendored Normal file
View file

@ -0,0 +1,227 @@
package csi
import (
"context"
"fmt"
"sync"
"github.com/sirupsen/logrus"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/swarmkit/agent/csi/plugin"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/volumequeue"
)
// volumeState keeps track of the state of a volume on this node.
type volumeState struct {
// volume is the actual VolumeAssignment for this volume
volume *api.VolumeAssignment
// remove is true if the volume is to be removed, or false if it should be
// active.
remove bool
// removeCallback is called when the volume is successfully removed.
removeCallback func(id string)
}
// volumes is a map that keeps all the currently available volumes to the agent
// mapped by volume ID.
type volumes struct {
// mu guards access to the volumes map.
mu sync.RWMutex
// volumes is a mapping of volume ID to volumeState
volumes map[string]volumeState
// plugins is the PluginManager, which provides translation to the CSI RPCs
plugins plugin.PluginManager
// pendingVolumes is a VolumeQueue which manages which volumes are
// processed and when.
pendingVolumes *volumequeue.VolumeQueue
}
// NewManager returns a place to store volumes.
func NewManager(pg plugingetter.PluginGetter, secrets exec.SecretGetter) exec.VolumesManager {
r := &volumes{
volumes: map[string]volumeState{},
plugins: plugin.NewPluginManager(pg, secrets),
pendingVolumes: volumequeue.NewVolumeQueue(),
}
go r.retryVolumes()
return r
}
// retryVolumes runs in a goroutine to retry failing volumes.
func (r *volumes) retryVolumes() {
ctx := log.WithModule(context.Background(), "node/agent/csi")
for {
vid, attempt := r.pendingVolumes.Wait()
dctx := log.WithFields(ctx, logrus.Fields{
"volume.id": vid,
"attempt": fmt.Sprintf("%d", attempt),
})
// this case occurs when the Stop method has been called on
// pendingVolumes, and means that we should pack up and exit.
if vid == "" && attempt == 0 {
break
}
r.tryVolume(dctx, vid, attempt)
}
}
// tryVolume synchronously tries one volume. it puts the volume back into the
// queue if the attempt fails.
func (r *volumes) tryVolume(ctx context.Context, id string, attempt uint) {
r.mu.RLock()
vs, ok := r.volumes[id]
r.mu.RUnlock()
if !ok {
return
}
if !vs.remove {
if err := r.publishVolume(ctx, vs.volume); err != nil {
log.G(ctx).WithError(err).Info("publishing volume failed")
r.pendingVolumes.Enqueue(id, attempt+1)
}
} else {
if err := r.unpublishVolume(ctx, vs.volume); err != nil {
log.G(ctx).WithError(err).Info("upublishing volume failed")
r.pendingVolumes.Enqueue(id, attempt+1)
} else {
// if unpublishing was successful, then call the callback
vs.removeCallback(id)
}
}
}
// Get returns a volume published path for the provided volume ID. If the volume doesn't exist, returns empty string.
func (r *volumes) Get(volumeID string) (string, error) {
r.mu.Lock()
defer r.mu.Unlock()
if vs, ok := r.volumes[volumeID]; ok {
if vs.remove {
// TODO(dperny): use a structured error
return "", fmt.Errorf("volume being removed")
}
if p, err := r.plugins.Get(vs.volume.Driver.Name); err == nil {
path := p.GetPublishedPath(volumeID)
if path != "" {
return path, nil
}
// don't put this line here, it spams like crazy.
// log.L.WithField("method", "(*volumes).Get").Debugf("Path not published for volume:%v", volumeID)
} else {
return "", err
}
}
return "", fmt.Errorf("%w: published path is unavailable", exec.ErrDependencyNotReady)
}
// Add adds one or more volumes to the volume map.
func (r *volumes) Add(volumes ...api.VolumeAssignment) {
r.mu.Lock()
defer r.mu.Unlock()
for _, volume := range volumes {
// if we get an Add operation, then we will always restart the retries.
v := volume.Copy()
r.volumes[volume.ID] = volumeState{
volume: v,
}
// enqueue the volume so that we process it
r.pendingVolumes.Enqueue(volume.ID, 0)
log.L.WithField("method", "(*volumes).Add").Debugf("Add Volume: %v", volume.VolumeID)
}
}
// Remove removes one or more volumes from this manager. callback is called
// whenever the removal is successful.
func (r *volumes) Remove(volumes []api.VolumeAssignment, callback func(id string)) {
r.mu.Lock()
defer r.mu.Unlock()
for _, volume := range volumes {
// if we get a Remove call, then we always restart the retries and
// attempt removal.
v := volume.Copy()
r.volumes[volume.ID] = volumeState{
volume: v,
remove: true,
removeCallback: callback,
}
r.pendingVolumes.Enqueue(volume.ID, 0)
}
}
func (r *volumes) publishVolume(ctx context.Context, assignment *api.VolumeAssignment) error {
log.G(ctx).Info("attempting to publish volume")
p, err := r.plugins.Get(assignment.Driver.Name)
if err != nil {
return err
}
// even though this may have succeeded already, the call to NodeStageVolume
// is idempotent, so we can retry it every time.
if err := p.NodeStageVolume(ctx, assignment); err != nil {
return err
}
log.G(ctx).Debug("staging volume succeeded, attempting to publish volume")
return p.NodePublishVolume(ctx, assignment)
}
func (r *volumes) unpublishVolume(ctx context.Context, assignment *api.VolumeAssignment) error {
log.G(ctx).Info("attempting to unpublish volume")
p, err := r.plugins.Get(assignment.Driver.Name)
if err != nil {
return err
}
if err := p.NodeUnpublishVolume(ctx, assignment); err != nil {
return err
}
return p.NodeUnstageVolume(ctx, assignment)
}
func (r *volumes) Plugins() exec.VolumePluginManager {
return r.plugins
}
// taskRestrictedVolumesProvider restricts the ids to the task.
type taskRestrictedVolumesProvider struct {
volumes exec.VolumeGetter
volumeIDs map[string]struct{}
}
func (sp *taskRestrictedVolumesProvider) Get(volumeID string) (string, error) {
if _, ok := sp.volumeIDs[volumeID]; !ok {
return "", fmt.Errorf("task not authorized to access volume %s", volumeID)
}
return sp.volumes.Get(volumeID)
}
// Restrict provides a getter that only allows access to the volumes
// referenced by the task.
func Restrict(volumes exec.VolumeGetter, t *api.Task) exec.VolumeGetter {
vids := map[string]struct{}{}
for _, v := range t.Volumes {
vids[v.ID] = struct{}{}
}
return &taskRestrictedVolumesProvider{volumes: volumes, volumeIDs: vids}
}

View file

@ -1,7 +1,10 @@
package agent
import (
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/swarmkit/agent/configs"
"github.com/docker/swarmkit/agent/csi"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/agent/secrets"
"github.com/docker/swarmkit/api"
@ -10,15 +13,18 @@ import (
type dependencyManager struct {
secrets exec.SecretsManager
configs exec.ConfigsManager
volumes exec.VolumesManager
}
// NewDependencyManager creates a dependency manager object that wraps
// objects which provide access to various dependency types.
func NewDependencyManager() exec.DependencyManager {
return &dependencyManager{
func NewDependencyManager(pg plugingetter.PluginGetter) exec.DependencyManager {
d := &dependencyManager{
secrets: secrets.NewManager(),
configs: configs.NewManager(),
}
d.volumes = csi.NewManager(pg, d.secrets)
return d
}
func (d *dependencyManager) Secrets() exec.SecretsManager {
@ -29,9 +35,14 @@ func (d *dependencyManager) Configs() exec.ConfigsManager {
return d.configs
}
func (d *dependencyManager) Volumes() exec.VolumesManager {
return d.volumes
}
type dependencyGetter struct {
secrets exec.SecretGetter
configs exec.ConfigGetter
volumes exec.VolumeGetter
}
func (d *dependencyGetter) Secrets() exec.SecretGetter {
@ -42,11 +53,16 @@ func (d *dependencyGetter) Configs() exec.ConfigGetter {
return d.configs
}
func (d *dependencyGetter) Volumes() exec.VolumeGetter {
return d.volumes
}
// Restrict provides getters that only allows access to the dependencies
// referenced by the task.
func Restrict(dependencies exec.DependencyManager, t *api.Task) exec.DependencyGetter {
return &dependencyGetter{
secrets: secrets.Restrict(dependencies.Secrets(), t),
configs: configs.Restrict(dependencies.Configs(), t),
volumes: csi.Restrict(dependencies.Volumes(), t),
}
}

View file

@ -29,6 +29,11 @@ var (
// ErrTaskNoop returns when the a subsequent call to Do will not result in
// advancing the task. Callers should avoid calling Do until the task has been updated.
ErrTaskNoop = errors.New("exec: task noop")
// ErrDependencyNotReady is returned if a given dependency can be accessed
// through the Getter, but is not yet ready to be used. This is most
// relevant for Volumes, which must be staged and published on the node.
ErrDependencyNotReady error = errors.New("dependency not ready")
)
// ExitCoder is implemented by errors that have an exit code.
@ -65,17 +70,14 @@ func (t temporary) Temporary() bool { return true }
// IsTemporary returns true if the error or a recursive cause returns true for
// temporary.
func IsTemporary(err error) bool {
for err != nil {
if tmp, ok := err.(Temporary); ok && tmp.Temporary() {
return true
}
if tmp, ok := err.(Temporary); ok && tmp.Temporary() {
return true
}
cause := errors.Cause(err)
if cause == err {
break
}
cause := errors.Cause(err)
err = cause
if tmp, ok := cause.(Temporary); ok && tmp.Temporary() {
return true
}
return false

View file

@ -35,11 +35,18 @@ type ConfigsProvider interface {
Configs() ConfigsManager
}
// VolumesProvider is implemented by objects that can store volumes,
// typically an executor.
type VolumesProvider interface {
Volumes() VolumesManager
}
// DependencyManager is a meta-object that can keep track of typed objects
// such as secrets and configs.
type DependencyManager interface {
SecretsProvider
ConfigsProvider
VolumesProvider
}
// DependencyGetter is a meta-object that can provide access to typed objects
@ -47,6 +54,7 @@ type DependencyManager interface {
type DependencyGetter interface {
Secrets() SecretGetter
Configs() ConfigGetter
Volumes() VolumeGetter
}
// SecretGetter contains secret data necessary for the Controller.
@ -80,3 +88,39 @@ type ConfigsManager interface {
Remove(configs []string) // remove the configs by ID
Reset() // remove all configs
}
// VolumeGetter contains volume data necessary for the Controller.
type VolumeGetter interface {
// Get returns the the volume with a specific volume ID, if available.
// When the volume is not available, the return will be nil.
Get(volumeID string) (string, error)
}
// VolumesManager is the interface for volume storage and updates.
type VolumesManager interface {
VolumeGetter
// Add adds one or more volumes
Add(volumes ...api.VolumeAssignment)
// Remove removes one or more volumes. The callback is called each time a
// volume is successfully removed with the ID of the volume removed.
//
// Remove takes a full VolumeAssignment because we may be instructed by the
// swarm manager to attempt removal of a Volume we don't know we have.
Remove(volumes []api.VolumeAssignment, callback func(string))
// Plugins returns the VolumePluginManager for this VolumesManager
Plugins() VolumePluginManager
}
// PluginManager is the interface for accessing the volume plugin manager from
// the executor. This is identical to
// github.com/docker/swarmkit/agent/csi/plugin.PluginManager, except the former
// also includes a Get method for the VolumesManager to use. This does not
// contain that Get method, to avoid having to import the Plugin type, and
// because in this context, it is not needed.
type VolumePluginManager interface {
// NodeInfo returns the NodeCSIInfo for each active plugin. Plugins which
// are added through Set but to which no connection has yet been
// successfully established will not be included.
NodeInfo(ctx context.Context) ([]*api.NodeCSIInfo, error)
}

View file

@ -15,28 +15,48 @@ type StatusReporter interface {
UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error
}
// Reporter recieves update to both task and volume status.
type Reporter interface {
StatusReporter
ReportVolumeUnpublished(ctx context.Context, volumeID string) error
}
type statusReporterFunc func(ctx context.Context, taskID string, status *api.TaskStatus) error
func (fn statusReporterFunc) UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error {
return fn(ctx, taskID, status)
}
type volumeReporterFunc func(ctx context.Context, volumeID string) error
func (fn volumeReporterFunc) ReportVolumeUnpublished(ctx context.Context, volumeID string) error {
return fn(ctx, volumeID)
}
type statusReporterCombined struct {
statusReporterFunc
volumeReporterFunc
}
// statusReporter creates a reliable StatusReporter that will always succeed.
// It handles several tasks at once, ensuring all statuses are reported.
//
// The reporter will continue reporting the current status until it succeeds.
type statusReporter struct {
reporter StatusReporter
reporter Reporter
statuses map[string]*api.TaskStatus
mu sync.Mutex
cond sync.Cond
closed bool
// volumes is a set of volumes which are to be reported unpublished.
volumes map[string]struct{}
mu sync.Mutex
cond sync.Cond
closed bool
}
func newStatusReporter(ctx context.Context, upstream StatusReporter) *statusReporter {
func newStatusReporter(ctx context.Context, upstream Reporter) *statusReporter {
r := &statusReporter{
reporter: upstream,
statuses: make(map[string]*api.TaskStatus),
volumes: make(map[string]struct{}),
}
r.cond.L = &r.mu
@ -65,6 +85,16 @@ func (sr *statusReporter) UpdateTaskStatus(ctx context.Context, taskID string, s
return nil
}
func (sr *statusReporter) ReportVolumeUnpublished(ctx context.Context, volumeID string) error {
sr.mu.Lock()
defer sr.mu.Unlock()
sr.volumes[volumeID] = struct{}{}
sr.cond.Signal()
return nil
}
func (sr *statusReporter) Close() error {
sr.mu.Lock()
defer sr.mu.Unlock()
@ -92,7 +122,7 @@ func (sr *statusReporter) run(ctx context.Context) {
}()
for {
if len(sr.statuses) == 0 {
if len(sr.statuses) == 0 && len(sr.volumes) == 0 {
sr.cond.Wait()
}
@ -125,5 +155,23 @@ func (sr *statusReporter) run(ctx context.Context) {
}
}
}
for volumeID := range sr.volumes {
delete(sr.volumes, volumeID)
sr.mu.Unlock()
err := sr.reporter.ReportVolumeUnpublished(ctx, volumeID)
sr.mu.Lock()
// reporter might be closed during ReportVolumeUnpublished call
if sr.closed {
return
}
if err != nil {
log.G(ctx).WithError(err).Error("status reporter failed to report volume status to agent")
sr.volumes[volumeID] = struct{}{}
}
}
}
}

View file

@ -428,6 +428,24 @@ func (s *session) sendTaskStatuses(ctx context.Context, updates ...*api.UpdateTa
return updates[n:], nil
}
// reportVolumeUnpublished sends a status update to the manager reporting that
// all volumes in the slice are unpublished.
func (s *session) reportVolumeUnpublished(ctx context.Context, volumes []string) error {
updates := []*api.UpdateVolumeStatusRequest_VolumeStatusUpdate{}
for _, volume := range volumes {
updates = append(updates, &api.UpdateVolumeStatusRequest_VolumeStatusUpdate{
ID: volume,
Unpublished: true,
})
}
client := api.NewDispatcherClient(s.conn.ClientConn)
_, err := client.UpdateVolumeStatus(ctx, &api.UpdateVolumeStatusRequest{
SessionID: s.sessionID,
Updates: updates,
})
return err
}
// sendError is used to send errors to errs channel and trigger session recreation
func (s *session) sendError(err error) {
select {

View file

@ -23,11 +23,11 @@ type Worker interface {
// It is not safe to call any worker function after that.
Close()
// Assign assigns a complete set of tasks and configs/secrets to a
// Assign assigns a complete set of tasks and configs/secrets/volumes to a
// worker. Any items not included in this set will be removed.
Assign(ctx context.Context, assignments []*api.AssignmentChange) error
// Updates updates an incremental set of tasks or configs/secrets of
// Updates updates an incremental set of tasks or configs/secrets/volumes of
// the worker. Any items not included either in added or removed will
// remain untouched.
Update(ctx context.Context, assignments []*api.AssignmentChange) error
@ -37,7 +37,7 @@ type Worker interface {
// by the worker.
//
// The listener will be removed if the context is cancelled.
Listen(ctx context.Context, reporter StatusReporter)
Listen(ctx context.Context, reporter Reporter)
// Report resends the status of all tasks controlled by this worker.
Report(ctx context.Context, reporter StatusReporter)
@ -51,7 +51,7 @@ type Worker interface {
// statusReporterKey protects removal map from panic.
type statusReporterKey struct {
StatusReporter
Reporter
}
type worker struct {
@ -152,7 +152,12 @@ func (w *worker) Assign(ctx context.Context, assignments []*api.AssignmentChange
return err
}
return reconcileTaskState(ctx, w, assignments, true)
err = reconcileTaskState(ctx, w, assignments, true)
if err != nil {
return err
}
return reconcileVolumes(ctx, w, assignments)
}
// Update updates the set of tasks, configs, and secrets for the worker.
@ -184,7 +189,12 @@ func (w *worker) Update(ctx context.Context, assignments []*api.AssignmentChange
return err
}
return reconcileTaskState(ctx, w, assignments, false)
err = reconcileTaskState(ctx, w, assignments, false)
if err != nil {
return err
}
return reconcileVolumes(ctx, w, assignments)
}
func reconcileTaskState(ctx context.Context, w *worker, assignments []*api.AssignmentChange, fullSnapshot bool) error {
@ -409,7 +419,57 @@ func reconcileConfigs(ctx context.Context, w *worker, assignments []*api.Assignm
return nil
}
func (w *worker) Listen(ctx context.Context, reporter StatusReporter) {
// reconcileVolumes reconciles the CSI volumes on this node. It does not need
// fullSnapshot like other reconcile functions because volumes are non-trivial
// and are never reset.
func reconcileVolumes(ctx context.Context, w *worker, assignments []*api.AssignmentChange) error {
var (
updatedVolumes []api.VolumeAssignment
removedVolumes []api.VolumeAssignment
)
for _, a := range assignments {
if r := a.Assignment.GetVolume(); r != nil {
switch a.Action {
case api.AssignmentChange_AssignmentActionUpdate:
updatedVolumes = append(updatedVolumes, *r)
case api.AssignmentChange_AssignmentActionRemove:
removedVolumes = append(removedVolumes, *r)
}
}
}
volumesProvider, ok := w.executor.(exec.VolumesProvider)
if !ok {
if len(updatedVolumes) != 0 || len(removedVolumes) != 0 {
log.G(ctx).Warn("volumes update ignored; executor does not support volumes")
}
return nil
}
volumes := volumesProvider.Volumes()
log.G(ctx).WithFields(logrus.Fields{
"len(updatedVolumes)": len(updatedVolumes),
"len(removedVolumes)": len(removedVolumes),
}).Debug("(*worker).reconcileVolumes")
volumes.Remove(removedVolumes, func(id string) {
w.mu.RLock()
defer w.mu.RUnlock()
for key := range w.listeners {
if err := key.Reporter.ReportVolumeUnpublished(ctx, id); err != nil {
log.G(ctx).WithError(err).Errorf("failed reporting volume unpublished for reporter %v", key.Reporter)
}
}
})
volumes.Add(updatedVolumes...)
return nil
}
func (w *worker) Listen(ctx context.Context, reporter Reporter) {
w.mu.Lock()
defer w.mu.Unlock()
@ -526,8 +586,8 @@ func (w *worker) updateTaskStatus(ctx context.Context, tx *bolt.Tx, taskID strin
// broadcast the task status out.
for key := range w.listeners {
if err := key.StatusReporter.UpdateTaskStatus(ctx, taskID, status); err != nil {
log.G(ctx).WithError(err).Errorf("failed updating status for reporter %v", key.StatusReporter)
if err := key.Reporter.UpdateTaskStatus(ctx, taskID, status); err != nil {
log.G(ctx).WithError(err).Errorf("failed updating status for reporter %v", key.Reporter)
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -246,6 +246,34 @@ service Control {
rpc RemoveResource(RemoveResourceRequest) returns (RemoveResourceResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// --- volumes APIs ---
// CreateVolume returns a `CreateVolumeResponse` with a `Volume` based on the
// provided `CreateVolumeRequest.VolumeSpec`.
// - Returns `InvalidArgument` if the `CreateVolumeRequest.VolumeSpec` is
// malformed.
rpc CreateVolume(CreateVolumeRequest) returns (CreateVolumeResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
// GetVolume returns a `GetVolumeResponse` with a Volume with the same ID
// as `GetVolumeRequest.ID`
rpc GetVolume(GetVolumeRequest) returns (GetVolumeResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
rpc UpdateVolume(UpdateVolumeRequest) returns (UpdateVolumeResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
rpc ListVolumes(ListVolumesRequest) returns (ListVolumesResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
rpc RemoveVolume(RemoveVolumeRequest) returns (RemoveVolumeResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
}
}
message GetNodeRequest {
@ -787,3 +815,57 @@ message ListResourcesRequest {
message ListResourcesResponse {
repeated Resource resources = 1;
}
message CreateVolumeRequest {
VolumeSpec spec = 1;
}
message CreateVolumeResponse {
Volume volume = 1;
}
message GetVolumeRequest {
string volume_id = 1;
}
message GetVolumeResponse {
Volume volume = 1;
}
message UpdateVolumeRequest {
string volume_id = 1;
Version volume_version = 2;
VolumeSpec spec = 3;
}
message UpdateVolumeResponse {
Volume volume = 1;
}
message ListVolumesRequest {
message Filters {
repeated string names = 1;
repeated string id_prefixes = 2;
map<string, string> labels = 3;
repeated string name_prefixes = 4;
repeated string groups = 5;
repeated string drivers = 6;
}
Filters filters = 1;
}
message ListVolumesResponse {
repeated Volume volumes = 1;
}
message RemoveVolumeRequest {
string volume_id = 1;
// Force forces the volume to be deleted from swarmkit, regardless of
// whether its current state would permit such an action.
bool force = 2;
}
message RemoveVolumeResponse {}

File diff suppressed because it is too large Load diff

View file

@ -42,6 +42,13 @@ service Dispatcher { // maybe dispatch, al likes this
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
};
// UpdateVolumeStatus updates the status of a Volume. Like
// UpdateTaskStatus, the node should send such updates on every status
// change of its volumes.
rpc UpdateVolumeStatus(UpdateVolumeStatusRequest) returns (UpdateVolumeStatusResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
};
// Tasks is a stream of tasks state for node. Each message contains full list
// of tasks which should be run on node, if task is not present in that list,
// it should be terminated.
@ -155,10 +162,35 @@ message UpdateTaskStatusRequest {
repeated TaskStatusUpdate updates = 3;
}
message UpdateTaskStatusResponse{
message UpdateTaskStatusResponse{
// void
}
message UpdateVolumeStatusRequest {
string session_id = 1;
message VolumeStatusUpdate {
// ID is the ID of the volume being updated. This is the Swarmkit ID,
// not the CSI VolumeID.
string id = 1;
// Unpublished is set to true when the volume is affirmatively
// unpublished on the Node side. We don't need to report that a Volume
// is published on the the node; as soon as the Volume is assigned to
// the Node, we must assume that it has been published until informed
// otherwise.
//
// Further, the Node must not send unpublished = true unless it will
// definitely no longer attempt to call NodePublishVolume.
bool unpublished = 2;
}
repeated VolumeStatusUpdate updates = 2;
}
message UpdateVolumeStatusResponse {
// empty on purpose
}
message TasksRequest {
string session_id = 1;
}
@ -178,6 +210,7 @@ message Assignment {
Task task = 1;
Secret secret = 2;
Config config = 3;
VolumeAssignment volume = 4;
}
}

File diff suppressed because it is too large Load diff

View file

@ -269,6 +269,10 @@ message Task {
// JobIteration is the iteration number of the Job-mode Service that this
// task belongs to.
Version job_iteration = 16;
// Volumes is a list of VolumeAttachments for this task. It specifies which
// volumes this task is allocated.
repeated VolumeAttachment volumes = 17;
}
// NetworkAttachment specifies the network parameters of attachment to
@ -510,3 +514,43 @@ message Extension {
// // Indices, with values expressed as Go templates.
//repeated IndexEntry index_templates = 6;
}
// Volume is the top-level object describing a volume usable by Swarmkit. The
// Volume contains the user's VolumeSpec, the Volume's status, and the Volume
// object that was returned by the CSI Plugin when the volume was created.
message Volume {
option (docker.protobuf.plugin.store_object) = {
watch_selectors: {
id: true
id_prefix: true
name: true
name_prefix: true
custom: true
custom_prefix: true
}
};
// ID is the swarmkit-internal ID for this volume object. This has no
// relation to the CSI volume identifier provided by the CSI Plugin.
string id = 1;
Meta meta = 2 [(gogoproto.nullable) = false];
// Spec is the desired state of the Volume, as provided by the user.
VolumeSpec spec = 3 [(gogoproto.nullable) = false];
// PublishStatus is the status of the volume as it pertains to the various
// nodes it is in use on.
repeated VolumePublishStatus publish_status = 4;
// VolumeInfo contains information about the volume originating from the
// CSI plugin when the volume is created.
VolumeInfo volume_info = 5;
// PendingDelete indicates that this Volume is being removed from Swarm.
// Before a Volume can be removed, we must call the DeleteVolume on the
// Controller. Because of this, we cannot immediately remove the Volume
// when a user wishes to delete it. Instead, we will mark a Volume with
// PendingDelete = true, which instructs Swarm to go through the work of
// removing the volume and then delete it when finished.
bool pending_delete = 6;
}

View file

@ -6,10 +6,10 @@ package api
import (
context "context"
fmt "fmt"
raftpb "github.com/coreos/etcd/raft/raftpb"
github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
raftselector "github.com/docker/swarmkit/manager/raftselector"
proto "github.com/gogo/protobuf/proto"
raftpb "go.etcd.io/etcd/raft/v3/raftpb"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
metadata "google.golang.org/grpc/metadata"
@ -532,6 +532,7 @@ type StoreAction struct {
// *StoreAction_Resource
// *StoreAction_Extension
// *StoreAction_Config
// *StoreAction_Volume
Target isStoreAction_Target `protobuf_oneof:"target"`
}
@ -600,6 +601,9 @@ type StoreAction_Extension struct {
type StoreAction_Config struct {
Config *Config `protobuf:"bytes,10,opt,name=config,proto3,oneof" json:"config,omitempty"`
}
type StoreAction_Volume struct {
Volume *Volume `protobuf:"bytes,11,opt,name=volume,proto3,oneof" json:"volume,omitempty"`
}
func (*StoreAction_Node) isStoreAction_Target() {}
func (*StoreAction_Service) isStoreAction_Target() {}
@ -610,6 +614,7 @@ func (*StoreAction_Secret) isStoreAction_Target() {}
func (*StoreAction_Resource) isStoreAction_Target() {}
func (*StoreAction_Extension) isStoreAction_Target() {}
func (*StoreAction_Config) isStoreAction_Target() {}
func (*StoreAction_Volume) isStoreAction_Target() {}
func (m *StoreAction) GetTarget() isStoreAction_Target {
if m != nil {
@ -681,6 +686,13 @@ func (m *StoreAction) GetConfig() *Config {
return nil
}
func (m *StoreAction) GetVolume() *Volume {
if x, ok := m.GetTarget().(*StoreAction_Volume); ok {
return x.Volume
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*StoreAction) XXX_OneofWrappers() []interface{} {
return []interface{}{
@ -693,6 +705,7 @@ func (*StoreAction) XXX_OneofWrappers() []interface{} {
(*StoreAction_Resource)(nil),
(*StoreAction_Extension)(nil),
(*StoreAction_Config)(nil),
(*StoreAction_Volume)(nil),
}
}
@ -718,72 +731,73 @@ func init() {
}
var fileDescriptor_d2c32e1e3c930c15 = []byte{
// 1028 bytes of a gzipped FileDescriptorProto
// 1046 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0x41, 0x73, 0xdb, 0x44,
0x14, 0xc7, 0x25, 0x5b, 0x75, 0x9a, 0x97, 0x36, 0x09, 0x5b, 0x12, 0x14, 0xb5, 0x28, 0xae, 0xda,
0x19, 0x9c, 0x90, 0xc8, 0x83, 0x61, 0xa6, 0x4c, 0xa1, 0x87, 0x38, 0xf1, 0x4c, 0x4c, 0x5a, 0xa7,
0xa3, 0x24, 0xd0, 0x5b, 0x90, 0xa5, 0x8d, 0x2b, 0x1c, 0x6b, 0xcd, 0xee, 0x3a, 0x81, 0x0b, 0xd3,
0x23, 0xe4, 0xc4, 0x0d, 0x86, 0x99, 0x0e, 0x07, 0x38, 0xf7, 0x03, 0xf0, 0x01, 0x98, 0x0c, 0xa7,
0xde, 0xe8, 0x29, 0x43, 0x9d, 0x3b, 0x7c, 0x05, 0x66, 0x57, 0x52, 0x12, 0x6c, 0xd9, 0xf1, 0x81,
0x4b, 0xb2, 0xa3, 0xfd, 0xfd, 0xdf, 0xff, 0xed, 0xee, 0xdb, 0xb7, 0x86, 0x85, 0x46, 0xc0, 0x9f,
0x76, 0xea, 0xb6, 0x47, 0x5a, 0x45, 0x9f, 0x78, 0x4d, 0x4c, 0x8b, 0xec, 0xd0, 0xa5, 0xad, 0x66,
0xc0, 0x8b, 0x6e, 0x3b, 0x28, 0x52, 0x77, 0x8f, 0xdb, 0x6d, 0x4a, 0x38, 0x41, 0x28, 0x9a, 0xb7,
0x93, 0x79, 0xfb, 0xe0, 0x3d, 0x63, 0xe9, 0x12, 0x39, 0xa9, 0x7f, 0x81, 0x3d, 0xce, 0xa2, 0x08,
0xc6, 0xe2, 0x25, 0x34, 0xff, 0xba, 0x8d, 0x13, 0x76, 0xf9, 0x02, 0xeb, 0x11, 0x8a, 0x09, 0x2b,
0x62, 0xee, 0xf9, 0x32, 0x21, 0xf9, 0xa7, 0x5d, 0xbf, 0x90, 0x9c, 0xf1, 0x66, 0x83, 0x34, 0x88,
0x1c, 0x16, 0xc5, 0x28, 0xfe, 0x7a, 0x6f, 0x88, 0xa1, 0x24, 0xea, 0x9d, 0xbd, 0x62, 0x7b, 0xbf,
0xd3, 0x08, 0xc2, 0xf8, 0x5f, 0x24, 0xb4, 0x5e, 0xa8, 0x00, 0x8e, 0xbb, 0xc7, 0x1f, 0xe1, 0x56,
0x1d, 0x53, 0x74, 0x07, 0xc6, 0x84, 0xd7, 0x6e, 0xe0, 0xeb, 0x6a, 0x5e, 0x2d, 0x68, 0x65, 0xe8,
0x9e, 0xcc, 0xe7, 0x04, 0x50, 0x5d, 0x73, 0x72, 0x62, 0xaa, 0xea, 0x0b, 0x28, 0x24, 0x3e, 0x16,
0x50, 0x26, 0xaf, 0x16, 0xc6, 0x23, 0xa8, 0x46, 0x7c, 0x2c, 0x20, 0x31, 0x55, 0xf5, 0x11, 0x02,
0xcd, 0xf5, 0x7d, 0xaa, 0x67, 0x05, 0xe1, 0xc8, 0x31, 0x2a, 0x43, 0x8e, 0x71, 0x97, 0x77, 0x98,
0xae, 0xe5, 0xd5, 0xc2, 0x44, 0xe9, 0xae, 0xdd, 0xbf, 0xd3, 0xf6, 0x79, 0x36, 0x5b, 0x92, 0x2d,
0x6b, 0xc7, 0x27, 0xf3, 0x8a, 0x13, 0x2b, 0xad, 0xdb, 0x30, 0xf1, 0x09, 0x09, 0x42, 0x07, 0x7f,
0xd9, 0xc1, 0x8c, 0x9f, 0xd9, 0xa8, 0xe7, 0x36, 0xd6, 0x4f, 0x2a, 0x5c, 0x8b, 0x18, 0xd6, 0x26,
0x21, 0xc3, 0xa3, 0xad, 0xea, 0x43, 0x18, 0x6b, 0x49, 0x5b, 0xa6, 0x67, 0xf2, 0xd9, 0xc2, 0x44,
0xc9, 0x1c, 0x9e, 0x9d, 0x93, 0xe0, 0xe8, 0x5d, 0x98, 0xa2, 0xb8, 0x45, 0x0e, 0xb0, 0xbf, 0x9b,
0x44, 0xc8, 0xe6, 0xb3, 0x05, 0xad, 0x9c, 0x99, 0x56, 0x9c, 0xc9, 0x78, 0x2a, 0x12, 0x31, 0xab,
0x0c, 0xd7, 0x1e, 0x62, 0xf7, 0x00, 0x27, 0x0b, 0x28, 0x81, 0x26, 0x76, 0x4c, 0x26, 0x76, 0xb9,
0xa7, 0x64, 0xad, 0x29, 0xb8, 0x1e, 0xc7, 0x88, 0x16, 0x68, 0x3d, 0x84, 0xb9, 0xc7, 0x94, 0x78,
0x98, 0xb1, 0x88, 0x65, 0xcc, 0x6d, 0x9c, 0x39, 0x2c, 0x88, 0x85, 0xc9, 0x2f, 0xb1, 0xc9, 0x94,
0x1d, 0x95, 0x95, 0x9d, 0x80, 0xc9, 0xfc, 0x7d, 0xed, 0xd9, 0x0f, 0x96, 0x62, 0xdd, 0x02, 0x23,
0x2d, 0x5a, 0xec, 0xb5, 0x01, 0xfa, 0x16, 0xa7, 0xd8, 0x6d, 0xfd, 0x1f, 0x56, 0x37, 0x61, 0x2e,
0x25, 0x58, 0xec, 0xf4, 0x31, 0xcc, 0x38, 0x98, 0x91, 0xfd, 0x03, 0xbc, 0xe2, 0xfb, 0x54, 0xa4,
0x13, 0xdb, 0x8c, 0x72, 0x9e, 0xd6, 0x12, 0xcc, 0xf6, 0xaa, 0xe3, 0x72, 0x48, 0xab, 0x99, 0x7d,
0xb8, 0x51, 0x0d, 0x39, 0xa6, 0xa1, 0xbb, 0x2f, 0xe2, 0x24, 0x4e, 0xb3, 0x90, 0x39, 0x33, 0xc9,
0x75, 0x4f, 0xe6, 0x33, 0xd5, 0x35, 0x27, 0x13, 0xf8, 0xe8, 0x01, 0xe4, 0x5c, 0x8f, 0x07, 0x24,
0x8c, 0x6b, 0x65, 0x3e, 0xed, 0xdc, 0xb6, 0x38, 0xa1, 0x78, 0x45, 0x62, 0x49, 0x11, 0x47, 0x22,
0xeb, 0x77, 0x0d, 0x26, 0x2e, 0xcc, 0xa2, 0x8f, 0xce, 0xc2, 0x09, 0xab, 0xc9, 0xd2, 0x9d, 0x4b,
0xc2, 0x6d, 0x04, 0xa1, 0x9f, 0x04, 0x43, 0x76, 0x5c, 0x41, 0x19, 0xb9, 0xe3, 0x7a, 0x9a, 0x54,
0xdc, 0xcd, 0x75, 0x25, 0xaa, 0x1e, 0x74, 0x0f, 0xc6, 0x18, 0xa6, 0x07, 0x81, 0x87, 0xe5, 0xe5,
0x9c, 0x28, 0xdd, 0x4c, 0x75, 0x8b, 0x90, 0x75, 0xc5, 0x49, 0x68, 0x61, 0xc4, 0x5d, 0xd6, 0x8c,
0x2f, 0x6f, 0xaa, 0xd1, 0xb6, 0xcb, 0x9a, 0xc2, 0x48, 0x70, 0xc2, 0x28, 0xc4, 0xfc, 0x90, 0xd0,
0xa6, 0x7e, 0x65, 0xb0, 0x51, 0x2d, 0x42, 0x84, 0x51, 0x4c, 0x0b, 0xa1, 0xb7, 0xdf, 0x61, 0x1c,
0x53, 0x3d, 0x37, 0x58, 0xb8, 0x1a, 0x21, 0x42, 0x18, 0xd3, 0xe8, 0x03, 0xc8, 0x31, 0xec, 0x51,
0xcc, 0xf5, 0x31, 0xa9, 0x33, 0xd2, 0x57, 0x26, 0x88, 0x75, 0xd1, 0x52, 0xe4, 0x08, 0xdd, 0x87,
0xab, 0x14, 0x33, 0xd2, 0xa1, 0x1e, 0xd6, 0xaf, 0x4a, 0xdd, 0xad, 0xd4, 0x6b, 0x18, 0x33, 0xeb,
0x8a, 0x73, 0xc6, 0xa3, 0x07, 0x30, 0x8e, 0xbf, 0xe2, 0x38, 0x64, 0xe2, 0xf0, 0xc6, 0xa5, 0xf8,
0xed, 0x34, 0x71, 0x25, 0x81, 0xd6, 0x15, 0xe7, 0x5c, 0x21, 0x12, 0xf6, 0x48, 0xb8, 0x17, 0x34,
0x74, 0x18, 0x9c, 0xf0, 0xaa, 0x24, 0x44, 0xc2, 0x11, 0x5b, 0xbe, 0x0a, 0x39, 0xee, 0xd2, 0x06,
0xe6, 0x8b, 0xff, 0xa8, 0x30, 0xd5, 0x53, 0x17, 0xe8, 0x1d, 0x18, 0xdb, 0xa9, 0x6d, 0xd4, 0x36,
0x3f, 0xab, 0x4d, 0x2b, 0x86, 0x71, 0xf4, 0x3c, 0x3f, 0xdb, 0x43, 0xec, 0x84, 0xcd, 0x90, 0x1c,
0x86, 0xa8, 0x04, 0x37, 0xb6, 0xb6, 0x37, 0x9d, 0xca, 0xee, 0xca, 0xea, 0x76, 0x75, 0xb3, 0xb6,
0xbb, 0xea, 0x54, 0x56, 0xb6, 0x2b, 0xd3, 0xaa, 0x31, 0x77, 0xf4, 0x3c, 0x3f, 0xd3, 0x23, 0x5a,
0xa5, 0xd8, 0xe5, 0xb8, 0x4f, 0xb3, 0xf3, 0x78, 0x4d, 0x68, 0x32, 0xa9, 0x9a, 0x9d, 0xb6, 0x9f,
0xa6, 0x71, 0x2a, 0x8f, 0x36, 0x3f, 0xad, 0x4c, 0x67, 0x53, 0x35, 0x8e, 0x6c, 0x97, 0xc6, 0x5b,
0xdf, 0xfe, 0x62, 0x2a, 0xbf, 0xfd, 0x6a, 0xf6, 0xae, 0xae, 0xf4, 0x73, 0x16, 0x34, 0x71, 0x43,
0xd1, 0x91, 0x0a, 0xa8, 0xbf, 0x4d, 0xa1, 0xe5, 0xb4, 0x1d, 0x1c, 0xd8, 0x1c, 0x0d, 0x7b, 0x54,
0x3c, 0xee, 0x49, 0x33, 0x7f, 0xbc, 0xf8, 0xfb, 0xc7, 0xcc, 0x14, 0x5c, 0x97, 0xfc, 0x72, 0xcb,
0x0d, 0xdd, 0x06, 0xa6, 0xe8, 0x3b, 0x15, 0xde, 0xe8, 0x6b, 0x64, 0x68, 0x29, 0xfd, 0x1a, 0xa7,
0x37, 0x4f, 0x63, 0x79, 0x44, 0x7a, 0x68, 0x26, 0x05, 0x15, 0x7d, 0x03, 0x93, 0xff, 0x6d, 0x7c,
0x68, 0x61, 0x50, 0x39, 0xf7, 0xb5, 0x56, 0x63, 0x71, 0x14, 0x74, 0x68, 0x06, 0xa5, 0x3f, 0x55,
0x98, 0x3c, 0x7f, 0xb2, 0xd8, 0xd3, 0xa0, 0x8d, 0x3e, 0x07, 0x4d, 0x3c, 0xc8, 0x28, 0xb5, 0x4d,
0x5e, 0x78, 0xce, 0x8d, 0xfc, 0x60, 0x60, 0xf8, 0x01, 0x78, 0x70, 0x45, 0x3e, 0x89, 0x28, 0x35,
0xc2, 0xc5, 0x17, 0xd7, 0xb8, 0x3d, 0x84, 0x18, 0x6a, 0x52, 0xbe, 0x7b, 0xfc, 0xda, 0x54, 0x5e,
0xbd, 0x36, 0x95, 0x67, 0x5d, 0x53, 0x3d, 0xee, 0x9a, 0xea, 0xcb, 0xae, 0xa9, 0xfe, 0xd5, 0x35,
0xd5, 0xef, 0x4f, 0x4d, 0xe5, 0xe5, 0xa9, 0xa9, 0xbc, 0x3a, 0x35, 0x95, 0x27, 0xd9, 0x27, 0x5a,
0x3d, 0x27, 0x7f, 0x5d, 0xbd, 0xff, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x42, 0x09, 0xd0,
0x76, 0x0a, 0x00, 0x00,
0x14, 0xc7, 0x25, 0x5b, 0x75, 0x92, 0xe7, 0x36, 0x09, 0x5b, 0x12, 0x14, 0xb5, 0x28, 0xae, 0xdb,
0x19, 0x9c, 0x90, 0xc8, 0x83, 0xcb, 0x4c, 0x99, 0x42, 0x0f, 0x71, 0xe2, 0x99, 0x98, 0xb4, 0x4e,
0x47, 0x49, 0x4a, 0x6f, 0x41, 0x96, 0x36, 0xae, 0xb0, 0xad, 0x35, 0xbb, 0x6b, 0x07, 0x2e, 0x4c,
0x8f, 0x90, 0x13, 0x37, 0x18, 0x66, 0x3a, 0x1c, 0xe0, 0xdc, 0x0f, 0xc0, 0x27, 0xc8, 0x70, 0xea,
0x8d, 0x9e, 0x32, 0xd4, 0xb9, 0xc3, 0x17, 0xe0, 0xc0, 0xec, 0x4a, 0x4a, 0x42, 0x22, 0x3b, 0x3e,
0x70, 0xd2, 0x8e, 0xf7, 0xf7, 0x7f, 0xff, 0xb7, 0xbb, 0x6f, 0xdf, 0x1a, 0x16, 0x1a, 0x3e, 0x7f,
0xd6, 0xad, 0x5b, 0x2e, 0x69, 0x17, 0x3d, 0xe2, 0x36, 0x31, 0x2d, 0xb2, 0x7d, 0x87, 0xb6, 0x9b,
0x3e, 0x2f, 0x3a, 0x1d, 0xbf, 0x48, 0x9d, 0x3d, 0x6e, 0x75, 0x28, 0xe1, 0x04, 0xa1, 0x70, 0xde,
0x8a, 0xe7, 0xad, 0xde, 0x07, 0xc6, 0xd2, 0x25, 0x72, 0x52, 0xff, 0x02, 0xbb, 0x9c, 0x85, 0x11,
0x8c, 0xc5, 0x4b, 0x68, 0xfe, 0x75, 0x07, 0xc7, 0xec, 0x42, 0x83, 0x58, 0x98, 0xbb, 0x9e, 0xe5,
0x93, 0xa2, 0xf8, 0xca, 0x4c, 0x8a, 0xbd, 0xbb, 0xf2, 0xdb, 0xa9, 0x9f, 0x49, 0xcc, 0x78, 0xbb,
0x41, 0x1a, 0x44, 0x0e, 0x8b, 0x62, 0x14, 0xfd, 0x7a, 0x6f, 0x88, 0x99, 0x24, 0xea, 0xdd, 0xbd,
0x62, 0xa7, 0xd5, 0x6d, 0xf8, 0x41, 0xf4, 0x09, 0x85, 0xf9, 0x97, 0x2a, 0x80, 0xed, 0xec, 0xf1,
0x47, 0xb8, 0x5d, 0xc7, 0x14, 0xdd, 0x86, 0x31, 0xe1, 0xb5, 0xeb, 0x7b, 0xba, 0x9a, 0x53, 0x0b,
0x5a, 0x19, 0xfa, 0x47, 0xf3, 0x19, 0x01, 0x54, 0xd7, 0xec, 0x8c, 0x98, 0xaa, 0x7a, 0x02, 0x0a,
0x88, 0x87, 0x05, 0x94, 0xca, 0xa9, 0x85, 0x89, 0x10, 0xaa, 0x11, 0x0f, 0x0b, 0x48, 0x4c, 0x55,
0x3d, 0x84, 0x40, 0x73, 0x3c, 0x8f, 0xea, 0x69, 0x41, 0xd8, 0x72, 0x8c, 0xca, 0x90, 0x61, 0xdc,
0xe1, 0x5d, 0xa6, 0x6b, 0x39, 0xb5, 0x90, 0x2d, 0xdd, 0xb1, 0x2e, 0xee, 0xb2, 0x75, 0x9a, 0xcd,
0x96, 0x64, 0xcb, 0xda, 0xe1, 0xd1, 0xbc, 0x62, 0x47, 0xca, 0xfc, 0x2d, 0xc8, 0x7e, 0x4a, 0xfc,
0xc0, 0xc6, 0x5f, 0x76, 0x31, 0xe3, 0x27, 0x36, 0xea, 0xa9, 0x4d, 0xfe, 0x27, 0x15, 0xae, 0x86,
0x0c, 0xeb, 0x90, 0x80, 0xe1, 0xd1, 0x56, 0xf5, 0x11, 0x8c, 0xb5, 0xa5, 0x2d, 0xd3, 0x53, 0xb9,
0x74, 0x21, 0x5b, 0x32, 0x87, 0x67, 0x67, 0xc7, 0x38, 0x7a, 0x1f, 0xa6, 0x28, 0x6e, 0x93, 0x1e,
0xf6, 0x76, 0xe3, 0x08, 0xe9, 0x5c, 0xba, 0xa0, 0x95, 0x53, 0xd3, 0x8a, 0x3d, 0x19, 0x4d, 0x85,
0x22, 0x96, 0x2f, 0xc3, 0xd5, 0x87, 0xd8, 0xe9, 0xe1, 0x78, 0x01, 0x25, 0xd0, 0xc4, 0x8e, 0xc9,
0xc4, 0x2e, 0xf7, 0x94, 0x6c, 0x7e, 0x0a, 0xae, 0x45, 0x31, 0xc2, 0x05, 0xe6, 0x1f, 0xc2, 0xdc,
0x63, 0x4a, 0x5c, 0xcc, 0x58, 0xc8, 0x32, 0xe6, 0x34, 0x4e, 0x1c, 0x16, 0xc4, 0xc2, 0xe4, 0x2f,
0x91, 0xc9, 0x94, 0x15, 0x96, 0x95, 0x15, 0x83, 0xf1, 0xfc, 0x7d, 0xed, 0xf9, 0x0f, 0x79, 0x25,
0x7f, 0x13, 0x8c, 0xa4, 0x68, 0x91, 0xd7, 0x06, 0xe8, 0x5b, 0x9c, 0x62, 0xa7, 0xfd, 0x7f, 0x58,
0xdd, 0x80, 0xb9, 0x84, 0x60, 0x91, 0xd3, 0x27, 0x30, 0x63, 0x63, 0x46, 0x5a, 0x3d, 0xbc, 0xe2,
0x79, 0x54, 0xa4, 0x13, 0xd9, 0x8c, 0x72, 0x9e, 0xf9, 0x25, 0x98, 0x3d, 0xaf, 0x8e, 0xca, 0x21,
0xa9, 0x66, 0x5a, 0x70, 0xbd, 0x1a, 0x70, 0x4c, 0x03, 0xa7, 0x25, 0xe2, 0xc4, 0x4e, 0xb3, 0x90,
0x3a, 0x31, 0xc9, 0xf4, 0x8f, 0xe6, 0x53, 0xd5, 0x35, 0x3b, 0xe5, 0x7b, 0xe8, 0x01, 0x64, 0x1c,
0x97, 0xfb, 0x24, 0x88, 0x6a, 0x65, 0x3e, 0xe9, 0xdc, 0xb6, 0x38, 0xa1, 0x78, 0x45, 0x62, 0x71,
0x11, 0x87, 0xa2, 0xfc, 0x3f, 0x1a, 0x64, 0xcf, 0xcc, 0xa2, 0x8f, 0x4f, 0xc2, 0x09, 0xab, 0xc9,
0xd2, 0xed, 0x4b, 0xc2, 0x6d, 0xf8, 0x81, 0x17, 0x07, 0x43, 0x56, 0x54, 0x41, 0x29, 0xb9, 0xe3,
0x7a, 0x92, 0x54, 0xdc, 0xcd, 0x75, 0x25, 0xac, 0x1e, 0x74, 0x0f, 0xc6, 0x18, 0xa6, 0x3d, 0xdf,
0xc5, 0xf2, 0x72, 0x66, 0x4b, 0x37, 0x12, 0xdd, 0x42, 0x64, 0x5d, 0xb1, 0x63, 0x5a, 0x18, 0x71,
0x87, 0x35, 0xa3, 0xcb, 0x9b, 0x68, 0xb4, 0xed, 0xb0, 0xa6, 0x30, 0x12, 0x9c, 0x30, 0x0a, 0x30,
0xdf, 0x27, 0xb4, 0xa9, 0x5f, 0x19, 0x6c, 0x54, 0x0b, 0x11, 0x61, 0x14, 0xd1, 0x42, 0xe8, 0xb6,
0xba, 0x8c, 0x63, 0xaa, 0x67, 0x06, 0x0b, 0x57, 0x43, 0x44, 0x08, 0x23, 0x1a, 0x7d, 0x08, 0x19,
0x86, 0x5d, 0x8a, 0xb9, 0x3e, 0x26, 0x75, 0x46, 0xf2, 0xca, 0x04, 0xb1, 0x2e, 0x5a, 0x8a, 0x1c,
0xa1, 0xfb, 0x30, 0x4e, 0x31, 0x23, 0x5d, 0xea, 0x62, 0x7d, 0x5c, 0xea, 0x6e, 0x26, 0x5e, 0xc3,
0x88, 0x59, 0x57, 0xec, 0x13, 0x1e, 0x3d, 0x80, 0x09, 0xfc, 0x15, 0xc7, 0x01, 0x13, 0x87, 0x37,
0x21, 0xc5, 0xef, 0x26, 0x89, 0x2b, 0x31, 0xb4, 0xae, 0xd8, 0xa7, 0x0a, 0x91, 0xb0, 0x4b, 0x82,
0x3d, 0xbf, 0xa1, 0xc3, 0xe0, 0x84, 0x57, 0x25, 0x21, 0x12, 0x0e, 0x59, 0xa1, 0xea, 0x91, 0x56,
0xb7, 0x8d, 0xf5, 0xec, 0x60, 0xd5, 0x13, 0x49, 0x08, 0x55, 0xc8, 0x96, 0xc7, 0x21, 0xc3, 0x1d,
0xda, 0xc0, 0x7c, 0xf1, 0x6f, 0x15, 0xa6, 0xce, 0x55, 0x13, 0x7a, 0x0f, 0xc6, 0x76, 0x6a, 0x1b,
0xb5, 0xcd, 0xcf, 0x6a, 0xd3, 0x8a, 0x61, 0x1c, 0xbc, 0xc8, 0xcd, 0x9e, 0x23, 0x76, 0x82, 0x66,
0x40, 0xf6, 0x03, 0x54, 0x82, 0xeb, 0x5b, 0xdb, 0x9b, 0x76, 0x65, 0x77, 0x65, 0x75, 0xbb, 0xba,
0x59, 0xdb, 0x5d, 0xb5, 0x2b, 0x2b, 0xdb, 0x95, 0x69, 0xd5, 0x98, 0x3b, 0x78, 0x91, 0x9b, 0x39,
0x27, 0x5a, 0xa5, 0xd8, 0xe1, 0xf8, 0x82, 0x66, 0xe7, 0xf1, 0x9a, 0xd0, 0xa4, 0x12, 0x35, 0x3b,
0x1d, 0x2f, 0x49, 0x63, 0x57, 0x1e, 0x6d, 0x3e, 0xa9, 0x4c, 0xa7, 0x13, 0x35, 0xb6, 0x6c, 0xb2,
0xc6, 0x3b, 0xdf, 0xfe, 0x62, 0x2a, 0xbf, 0xfd, 0x6a, 0x9e, 0x5f, 0x5d, 0xe9, 0xe7, 0x34, 0x68,
0xe2, 0x5e, 0xa3, 0x03, 0x15, 0xd0, 0xc5, 0xe6, 0x86, 0x96, 0x93, 0x76, 0x70, 0x60, 0x4b, 0x35,
0xac, 0x51, 0xf1, 0xa8, 0x93, 0xcd, 0xfc, 0xfe, 0xf2, 0xaf, 0x1f, 0x53, 0x53, 0x70, 0x4d, 0xf2,
0xcb, 0x6d, 0x27, 0x70, 0x1a, 0x98, 0xa2, 0xef, 0x54, 0x78, 0xeb, 0x42, 0xfb, 0x43, 0x4b, 0xc9,
0x97, 0x3f, 0xb9, 0xe5, 0x1a, 0xcb, 0x23, 0xd2, 0x43, 0x33, 0x29, 0xa8, 0xe8, 0x1b, 0x98, 0xfc,
0x6f, 0xbb, 0x44, 0x0b, 0x83, 0x2e, 0xc1, 0x85, 0x86, 0x6c, 0x2c, 0x8e, 0x82, 0x0e, 0xcd, 0xa0,
0xf4, 0x87, 0x0a, 0x93, 0xa7, 0x0f, 0x1d, 0x7b, 0xe6, 0x77, 0xd0, 0xe7, 0xa0, 0x89, 0x67, 0x1c,
0x25, 0x36, 0xd7, 0x33, 0x7f, 0x02, 0x8c, 0xdc, 0x60, 0x60, 0xf8, 0x01, 0xb8, 0x70, 0x45, 0x3e,
0xa4, 0x28, 0x31, 0xc2, 0xd9, 0x77, 0xda, 0xb8, 0x35, 0x84, 0x18, 0x6a, 0x52, 0xbe, 0x73, 0xf8,
0xc6, 0x54, 0x5e, 0xbf, 0x31, 0x95, 0xe7, 0x7d, 0x53, 0x3d, 0xec, 0x9b, 0xea, 0xab, 0xbe, 0xa9,
0xfe, 0xd9, 0x37, 0xd5, 0xef, 0x8f, 0x4d, 0xe5, 0xd5, 0xb1, 0xa9, 0xbc, 0x3e, 0x36, 0x95, 0xa7,
0xe9, 0xa7, 0x5a, 0x3d, 0x23, 0xff, 0x93, 0xdd, 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x4a, 0x56,
0x23, 0xf6, 0xa8, 0x0a, 0x00, 0x00,
}
type authenticatedWrapperRaftServer struct {
@ -1079,6 +1093,12 @@ func (m *StoreAction) CopyFrom(src interface{}) {
}
github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig())
m.Target = &v
case *StoreAction_Volume:
v := StoreAction_Volume{
Volume: &Volume{},
}
github_com_docker_swarmkit_api_deepcopy.Copy(v.Volume, o.GetVolume())
m.Target = &v
}
}
@ -2030,6 +2050,27 @@ func (m *StoreAction_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) {
}
return len(dAtA) - i, nil
}
func (m *StoreAction_Volume) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *StoreAction_Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if m.Volume != nil {
{
size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRaft(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x5a
}
return len(dAtA) - i, nil
}
func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
offset -= sovRaft(v)
base := offset
@ -2673,6 +2714,18 @@ func (m *StoreAction_Config) Size() (n int) {
}
return n
}
func (m *StoreAction_Volume) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Volume != nil {
l = m.Volume.Size()
n += 1 + l + sovRaft(uint64(l))
}
return n
}
func sovRaft(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
@ -2914,6 +2967,16 @@ func (this *StoreAction_Config) String() string {
}, "")
return s
}
func (this *StoreAction_Volume) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&StoreAction_Volume{`,
`Volume:` + strings.Replace(fmt.Sprintf("%v", this.Volume), "Volume", "Volume", 1) + `,`,
`}`,
}, "")
return s
}
func valueToStringRaft(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@ -4374,6 +4437,41 @@ func (m *StoreAction) Unmarshal(dAtA []byte) error {
}
m.Target = &StoreAction_Config{v}
iNdEx = postIndex
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthRaft
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthRaft
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
v := &Volume{}
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
m.Target = &StoreAction_Volume{v}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipRaft(dAtA[iNdEx:])

View file

@ -4,7 +4,7 @@ package docker.swarmkit.v1;
import "github.com/docker/swarmkit/api/objects.proto";
import "github.com/docker/swarmkit/api/types.proto";
import "github.com/coreos/etcd/raft/raftpb/raft.proto";
import "go.etcd.io/etcd/raft/v3/raftpb/raft.proto";
import weak "gogoproto/gogo.proto";
import weak "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
@ -146,5 +146,6 @@ message StoreAction {
Resource resource = 8;
Extension extension = 9;
Config config = 10;
Volume volume = 11;
}
}

View file

@ -59,6 +59,7 @@ type StoreSnapshot struct {
Resources []*Resource `protobuf:"bytes,7,rep,name=resources,proto3" json:"resources,omitempty"`
Extensions []*Extension `protobuf:"bytes,8,rep,name=extensions,proto3" json:"extensions,omitempty"`
Configs []*Config `protobuf:"bytes,9,rep,name=configs,proto3" json:"configs,omitempty"`
Volumes []*Volume `protobuf:"bytes,10,rep,name=volumes,proto3" json:"volumes,omitempty"`
}
func (m *StoreSnapshot) Reset() { *m = StoreSnapshot{} }
@ -181,39 +182,40 @@ func init() {
}
var fileDescriptor_c5cad0b62cecd9af = []byte{
// 507 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x6f, 0xd3, 0x30,
0x14, 0xc7, 0x93, 0xfe, 0x4a, 0xe7, 0x09, 0x18, 0x16, 0x07, 0xab, 0x0c, 0x53, 0xca, 0x0e, 0x45,
0x82, 0x14, 0x0a, 0x12, 0x08, 0x69, 0x1c, 0x3a, 0x71, 0xe0, 0xc0, 0x0e, 0x2e, 0x9a, 0xb8, 0xa6,
0xa9, 0xdb, 0x86, 0x90, 0xb8, 0xf2, 0x73, 0x3b, 0x8e, 0xf0, 0x1f, 0xf0, 0x67, 0xf5, 0xb8, 0xe3,
0x4e, 0x88, 0xb5, 0x07, 0xfe, 0x0d, 0x64, 0x3b, 0x09, 0x95, 0x48, 0xb7, 0x5b, 0x64, 0x7d, 0x3e,
0xef, 0x7d, 0xed, 0xbc, 0x87, 0x9e, 0x4d, 0x23, 0x35, 0x5b, 0x8c, 0xfc, 0x50, 0x24, 0xbd, 0xb1,
0x08, 0x63, 0x2e, 0x7b, 0x70, 0x1e, 0xc8, 0x24, 0x8e, 0x54, 0x2f, 0x98, 0x47, 0x3d, 0x48, 0x83,
0x39, 0xcc, 0x84, 0xf2, 0xe7, 0x52, 0x28, 0x81, 0xb1, 0x65, 0xfc, 0x9c, 0xf1, 0x97, 0x2f, 0x5a,
0x4f, 0x6f, 0x28, 0x21, 0x46, 0x5f, 0x78, 0xa8, 0xc0, 0x56, 0x68, 0x3d, 0xb9, 0x81, 0x96, 0xc1,
0x24, 0x6b, 0xd6, 0xba, 0x37, 0x15, 0x53, 0x61, 0x3e, 0x7b, 0xfa, 0xcb, 0x9e, 0x76, 0x7e, 0xd4,
0xd0, 0xad, 0xa1, 0x12, 0x92, 0x0f, 0xb3, 0x68, 0xd8, 0x47, 0xf5, 0x54, 0x8c, 0x39, 0x10, 0xb7,
0x5d, 0xed, 0xee, 0xf7, 0x89, 0xff, 0x7f, 0x48, 0xff, 0x54, 0x8c, 0x39, 0xb3, 0x18, 0x7e, 0x8d,
0x9a, 0xc0, 0xe5, 0x32, 0x0a, 0x39, 0x90, 0x8a, 0x51, 0xee, 0x97, 0x29, 0x43, 0xcb, 0xb0, 0x02,
0xd6, 0x62, 0xca, 0xd5, 0xb9, 0x90, 0x31, 0x90, 0xea, 0x6e, 0xf1, 0xd4, 0x32, 0xac, 0x80, 0x75,
0x42, 0x15, 0x40, 0x0c, 0xa4, 0xb6, 0x3b, 0xe1, 0xa7, 0x00, 0x62, 0x66, 0x31, 0xdd, 0x28, 0xfc,
0xba, 0x00, 0xc5, 0x25, 0x90, 0xfa, 0xee, 0x46, 0x27, 0x96, 0x61, 0x05, 0x8c, 0x5f, 0x21, 0x0f,
0x78, 0x28, 0xb9, 0x02, 0xd2, 0x30, 0x5e, 0xab, 0xfc, 0x66, 0x1a, 0x61, 0x39, 0x8a, 0xdf, 0xa2,
0x3d, 0xc9, 0x41, 0x2c, 0xa4, 0x7e, 0x11, 0xcf, 0x78, 0x87, 0x65, 0x1e, 0xcb, 0x20, 0xf6, 0x0f,
0xc7, 0xc7, 0x08, 0xf1, 0x6f, 0x8a, 0xa7, 0x10, 0x89, 0x14, 0x48, 0xd3, 0xc8, 0x0f, 0xca, 0xe4,
0xf7, 0x39, 0xc5, 0xb6, 0x04, 0x1d, 0x38, 0x14, 0xe9, 0x24, 0x9a, 0x02, 0xd9, 0xdb, 0x1d, 0xf8,
0xc4, 0x20, 0x2c, 0x47, 0x3b, 0x11, 0xba, 0x93, 0xdd, 0xbd, 0x18, 0x82, 0x37, 0xc8, 0x4b, 0x78,
0x32, 0xd2, 0x2f, 0x66, 0xc7, 0x80, 0x96, 0xde, 0x20, 0x98, 0xa8, 0x8f, 0x06, 0x63, 0x39, 0x8e,
0x0f, 0x91, 0x27, 0x79, 0x22, 0x96, 0x7c, 0x6c, 0xa6, 0xa1, 0x36, 0xa8, 0x1c, 0x38, 0x2c, 0x3f,
0xea, 0xfc, 0x71, 0x51, 0xb3, 0x68, 0xf2, 0x0e, 0x79, 0x4b, 0x2e, 0x75, 0x72, 0xe2, 0xb6, 0xdd,
0xee, 0xed, 0xfe, 0x51, 0xe9, 0xf3, 0xe6, 0x3b, 0x73, 0x66, 0x59, 0x96, 0x4b, 0xf8, 0x03, 0x42,
0x59, 0xd7, 0x59, 0x34, 0x27, 0x95, 0xb6, 0xdb, 0xdd, 0xef, 0x3f, 0xbe, 0xe6, 0xcf, 0xe6, 0x95,
0x06, 0xb5, 0xd5, 0xaf, 0x87, 0x0e, 0xdb, 0x92, 0xf1, 0x31, 0xaa, 0x83, 0xde, 0x02, 0x52, 0x35,
0x55, 0x1e, 0x95, 0x06, 0xd9, 0x5e, 0x93, 0xac, 0x86, 0xb5, 0x3a, 0x77, 0x91, 0x97, 0xa5, 0xc3,
0x0d, 0x54, 0x39, 0x7b, 0x7e, 0xe0, 0x0c, 0x8e, 0x56, 0x57, 0xd4, 0xb9, 0xbc, 0xa2, 0xce, 0xf7,
0x35, 0x75, 0x57, 0x6b, 0xea, 0x5e, 0xac, 0xa9, 0xfb, 0x7b, 0x4d, 0xdd, 0x9f, 0x1b, 0xea, 0x5c,
0x6c, 0xa8, 0x73, 0xb9, 0xa1, 0xce, 0xe7, 0xca, 0xa8, 0x61, 0xf6, 0xf0, 0xe5, 0xdf, 0x00, 0x00,
0x00, 0xff, 0xff, 0x97, 0x4e, 0xfd, 0x2a, 0x3b, 0x04, 0x00, 0x00,
// 523 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0x41, 0x8f, 0x12, 0x31,
0x14, 0xc7, 0x67, 0x58, 0x60, 0xd8, 0x6e, 0xd4, 0xb5, 0xf1, 0xd0, 0xe0, 0x5a, 0x11, 0xf7, 0x80,
0x89, 0x0e, 0x8a, 0x26, 0x1a, 0x93, 0xf5, 0xc0, 0xc6, 0x83, 0x07, 0xf7, 0x50, 0x0c, 0xf1, 0x3a,
0x0c, 0x05, 0x46, 0x76, 0xa6, 0xa4, 0xaf, 0xb0, 0x1e, 0xfd, 0x08, 0x7e, 0x1b, 0xbf, 0x02, 0xc7,
0x3d, 0xee, 0xc9, 0xb8, 0x70, 0xf0, 0x6b, 0x98, 0xb6, 0x53, 0x24, 0x71, 0x70, 0x6f, 0x93, 0xe6,
0xf7, 0x7b, 0xef, 0xdf, 0xce, 0x7b, 0xe8, 0xd9, 0x38, 0x51, 0x93, 0xf9, 0x20, 0x8c, 0x45, 0xda,
0x1e, 0x8a, 0x78, 0xca, 0x65, 0x1b, 0x2e, 0x22, 0x99, 0x4e, 0x13, 0xd5, 0x8e, 0x66, 0x49, 0x1b,
0xb2, 0x68, 0x06, 0x13, 0xa1, 0xc2, 0x99, 0x14, 0x4a, 0x60, 0x6c, 0x99, 0xd0, 0x31, 0xe1, 0xe2,
0x45, 0xfd, 0xe9, 0x0d, 0x25, 0xc4, 0xe0, 0x0b, 0x8f, 0x15, 0xd8, 0x0a, 0xf5, 0x27, 0x37, 0xd0,
0x32, 0x1a, 0xe5, 0xcd, 0xea, 0xf7, 0xc6, 0x62, 0x2c, 0xcc, 0x67, 0x5b, 0x7f, 0xd9, 0xd3, 0xe6,
0x8f, 0x32, 0xba, 0xd5, 0x53, 0x42, 0xf2, 0x5e, 0x1e, 0x0d, 0x87, 0xa8, 0x92, 0x89, 0x21, 0x07,
0xe2, 0x37, 0xf6, 0x5a, 0x07, 0x1d, 0x12, 0xfe, 0x1b, 0x32, 0x3c, 0x13, 0x43, 0xce, 0x2c, 0x86,
0x5f, 0xa3, 0x1a, 0x70, 0xb9, 0x48, 0x62, 0x0e, 0xa4, 0x64, 0x94, 0xfb, 0x45, 0x4a, 0xcf, 0x32,
0x6c, 0x03, 0x6b, 0x31, 0xe3, 0xea, 0x42, 0xc8, 0x29, 0x90, 0xbd, 0xdd, 0xe2, 0x99, 0x65, 0xd8,
0x06, 0xd6, 0x09, 0x55, 0x04, 0x53, 0x20, 0xe5, 0xdd, 0x09, 0x3f, 0x45, 0x30, 0x65, 0x16, 0xd3,
0x8d, 0xe2, 0xf3, 0x39, 0x28, 0x2e, 0x81, 0x54, 0x76, 0x37, 0x3a, 0xb5, 0x0c, 0xdb, 0xc0, 0xf8,
0x15, 0x0a, 0x80, 0xc7, 0x92, 0x2b, 0x20, 0x55, 0xe3, 0xd5, 0x8b, 0x6f, 0xa6, 0x11, 0xe6, 0x50,
0xfc, 0x16, 0xed, 0x4b, 0x0e, 0x62, 0x2e, 0xf5, 0x8b, 0x04, 0xc6, 0x3b, 0x2a, 0xf2, 0x58, 0x0e,
0xb1, 0xbf, 0x38, 0x3e, 0x41, 0x88, 0x7f, 0x55, 0x3c, 0x83, 0x44, 0x64, 0x40, 0x6a, 0x46, 0x7e,
0x50, 0x24, 0xbf, 0x77, 0x14, 0xdb, 0x12, 0x74, 0xe0, 0x58, 0x64, 0xa3, 0x64, 0x0c, 0x64, 0x7f,
0x77, 0xe0, 0x53, 0x83, 0x30, 0x87, 0x6a, 0x6b, 0x21, 0xce, 0xe7, 0x29, 0x07, 0x82, 0x76, 0x5b,
0x7d, 0x83, 0x30, 0x87, 0x36, 0x13, 0x74, 0x27, 0x7f, 0xb1, 0xcd, 0xe8, 0xbc, 0x41, 0x41, 0xca,
0xd3, 0x81, 0x7e, 0x67, 0x3b, 0x3c, 0xb4, 0xf0, 0xde, 0xd1, 0x48, 0x7d, 0x34, 0x18, 0x73, 0x38,
0x3e, 0x42, 0x81, 0xe4, 0xa9, 0x58, 0xf0, 0xa1, 0x99, 0xa1, 0x72, 0xb7, 0x74, 0xe8, 0x31, 0x77,
0xd4, 0xfc, 0xed, 0xa3, 0xda, 0xa6, 0xc9, 0x3b, 0x14, 0x2c, 0xb8, 0xd4, 0xf7, 0x25, 0x7e, 0xc3,
0x6f, 0xdd, 0xee, 0x1c, 0x17, 0xfe, 0x14, 0xb7, 0x69, 0x7d, 0xcb, 0x32, 0x27, 0xe1, 0x0f, 0x08,
0xe5, 0x5d, 0x27, 0xc9, 0x8c, 0x94, 0x1a, 0x7e, 0xeb, 0xa0, 0xf3, 0xf8, 0x3f, 0xf3, 0xe0, 0x2a,
0x75, 0xcb, 0xcb, 0x9f, 0x0f, 0x3d, 0xb6, 0x25, 0xe3, 0x13, 0x54, 0x01, 0xbd, 0x3b, 0x64, 0xcf,
0x54, 0x79, 0x54, 0x18, 0x64, 0x7b, 0xb9, 0xf2, 0x1a, 0xd6, 0x6a, 0xde, 0x45, 0x41, 0x9e, 0x0e,
0x57, 0x51, 0xa9, 0xff, 0xfc, 0xd0, 0xeb, 0x1e, 0x2f, 0xaf, 0xa9, 0x77, 0x75, 0x4d, 0xbd, 0x6f,
0x2b, 0xea, 0x2f, 0x57, 0xd4, 0xbf, 0x5c, 0x51, 0xff, 0xd7, 0x8a, 0xfa, 0xdf, 0xd7, 0xd4, 0xbb,
0x5c, 0x53, 0xef, 0x6a, 0x4d, 0xbd, 0xcf, 0xa5, 0x41, 0xd5, 0x6c, 0xef, 0xcb, 0x3f, 0x01, 0x00,
0x00, 0xff, 0xff, 0x27, 0xb3, 0xad, 0x75, 0x71, 0x04, 0x00, 0x00,
}
func (m *StoreSnapshot) Copy() *StoreSnapshot {
@ -301,6 +303,14 @@ func (m *StoreSnapshot) CopyFrom(src interface{}) {
}
}
if o.Volumes != nil {
m.Volumes = make([]*Volume, len(o.Volumes))
for i := range m.Volumes {
m.Volumes[i] = &Volume{}
github_com_docker_swarmkit_api_deepcopy.Copy(m.Volumes[i], o.Volumes[i])
}
}
}
func (m *ClusterSnapshot) Copy() *ClusterSnapshot {
@ -368,6 +378,20 @@ func (m *StoreSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.Volumes) > 0 {
for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintSnapshot(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x52
}
}
if len(m.Configs) > 0 {
for iNdEx := len(m.Configs) - 1; iNdEx >= 0; iNdEx-- {
{
@ -660,6 +684,12 @@ func (m *StoreSnapshot) Size() (n int) {
n += 1 + l + sovSnapshot(uint64(l))
}
}
if len(m.Volumes) > 0 {
for _, e := range m.Volumes {
l = e.Size()
n += 1 + l + sovSnapshot(uint64(l))
}
}
return n
}
@ -754,6 +784,11 @@ func (this *StoreSnapshot) String() string {
repeatedStringForConfigs += strings.Replace(fmt.Sprintf("%v", f), "Config", "Config", 1) + ","
}
repeatedStringForConfigs += "}"
repeatedStringForVolumes := "[]*Volume{"
for _, f := range this.Volumes {
repeatedStringForVolumes += strings.Replace(fmt.Sprintf("%v", f), "Volume", "Volume", 1) + ","
}
repeatedStringForVolumes += "}"
s := strings.Join([]string{`&StoreSnapshot{`,
`Nodes:` + repeatedStringForNodes + `,`,
`Services:` + repeatedStringForServices + `,`,
@ -764,6 +799,7 @@ func (this *StoreSnapshot) String() string {
`Resources:` + repeatedStringForResources + `,`,
`Extensions:` + repeatedStringForExtensions + `,`,
`Configs:` + repeatedStringForConfigs + `,`,
`Volumes:` + repeatedStringForVolumes + `,`,
`}`,
}, "")
return s
@ -1139,6 +1175,40 @@ func (m *StoreSnapshot) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSnapshot
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthSnapshot
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthSnapshot
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Volumes = append(m.Volumes, &Volume{})
if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSnapshot(dAtA[iNdEx:])

View file

@ -23,6 +23,7 @@ message StoreSnapshot {
repeated Resource resources = 7;
repeated Extension extensions = 8;
repeated Config configs = 9;
repeated Volume volumes = 10;
}
// ClusterSnapshot stores cluster membership information in snapshots.

File diff suppressed because it is too large Load diff

View file

@ -506,3 +506,71 @@ message ConfigSpec {
// - golang: Go templating
Driver templating = 3;
}
message VolumeSpec {
// Annotations includes the name and labels of a volume. The name used in the
// spec's Annotations will be passed to the Plugin as the "Name" in the
// CreateVolume request.
Annotations annotations = 1 [(gogoproto.nullable) = false];
// Group defines the volume group this particular volume belongs to. When
// requesting volumes for a workload, the group name can be used instead of
// the volume's name, which tells swarmkit to pick one from the many volumes
// belonging to that group.
string group = 2;
// Driver represents the CSI Plugin object and its configuration parameters.
// The "options" field of the Driver object is passed in the CSI
// CreateVolumeRequest as the "parameters" field. The Driver must be
// specified; there is no default CSI Plugin.
Driver driver = 3;
// AccessMode is similar to, and used to determine, the volume access mode as
// defined in the CSI spec, as well as the volume type (block vs mount). In
// this way, it is more similar to the VolumeCapability message in the CSI
// spec.
VolumeAccessMode access_mode = 4;
// Secrets represents a set of key/value pairs to pass to the CSI plugin. The
// keys of the secrets can be anything, but the values refer to swarmkit
// Secret objects. See the "Secrets Requirements" section of the CSI Plugin
// Spec for more information.
repeated VolumeSecret secrets = 5;
// AccessibilityRequirements specifies where a volume must be accessible
// from.
//
// This field must be empty if the plugin does not support
// VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the
// plugin does not support it, volume will not be created.
//
// If AccessibilityRequirements is empty, but the plugin does support
// VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire
// cluster is a valid target for the volume.
TopologyRequirement AccessibilityRequirements = 6;
// CapacityRange is the capacity this volume should be created with. If nil,
// the plugin will decide the capacity.
CapacityRange capacity_range = 7;
enum VolumeAvailability {
option (gogoproto.goproto_enum_prefix) = false;
// Active allows a volume to be used and scheduled to. This is the
// default state.
ACTIVE = 0 [(gogoproto.enumvalue_customname) = "VolumeAvailabilityActive"];
// Pause prevents volumes from having new workloads scheduled to use
// them, even if they're already published on a Node.
PAUSE = 1 [(gogoproto.enumvalue_customname) = "VolumeAvailabilityPause"];
// Drain causes existing workloads using this volume to be rescheduled,
// causing the volume to be unpublished and removed from nodes.
DRAIN = 2 [(gogoproto.enumvalue_customname) = "VolumeAvailabilityDrain"];
}
// Availability is the Volume's desired availability. Analogous to Node
// Availability, this allows the user to take volumes offline in order to
// update or delete them.
VolumeAvailability availability = 8;
}

File diff suppressed because it is too large Load diff

View file

@ -62,6 +62,7 @@ enum ResourceType {
TASK = 0;
SECRET = 1;
CONFIG = 2;
VOLUME = 3;
}
message Resources {
@ -140,6 +141,8 @@ message NodeDescription {
// FIPS indicates whether the node has FIPS-enabled
bool fips = 6 [(gogoproto.customname) = "FIPS"];
repeated NodeCSIInfo csi_info = 7 [(gogoproto.customname) = "CSIInfo"];
}
message NodeTLSInfo {
@ -151,6 +154,27 @@ message NodeTLSInfo {
bytes cert_issuer_public_key = 3;
}
// NodeCSIInfo represents information about a Node returned by calling the
// NodeGetInfo RPC on the CSI plugin present on the node. There is a separate
// NodeCSIInfo object for each CSI plugin present.
message NodeCSIInfo {
// PluginName is the name of the CSI plugin.
string plugin_name = 1;
// NodeID is the ID of the node as reported by the CSI plugin. This will be
// different from the swarmkit node ID.
string node_id = 2;
// MaxVolumesPerNode is the maximum number of volumes that may be published
// to this node.
int64 max_volumes_per_node = 3;
// AccessibleTopology indicates the location of this node in the CSI plugin's
// topology
Topology accessible_topology = 4;
}
message RaftMemberStatus {
bool leader = 1;
@ -215,6 +239,7 @@ message Mount {
VOLUME = 1 [(gogoproto.enumvalue_customname) = "MountTypeVolume"]; // Remote storage volumes
TMPFS = 2 [(gogoproto.enumvalue_customname) = "MountTypeTmpfs"]; // Mount a tmpfs
NPIPE = 3 [(gogoproto.enumvalue_customname) = "MountTypeNamedPipe"]; // Windows named pipes
CSI = 4 [(gogoproto.enumvalue_customname) = "MountTypeCSI"]; // CSI volume
}
// Type defines the nature of the mount.
@ -222,6 +247,10 @@ message Mount {
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
//
// For CSI type mounts, the source is either the name of the volume or the
// name of the volume group. To specify a volume group, the source should be
// prefixed with "group:", as in "group:groupname"
string source = 2;
// Target path in container
@ -1131,3 +1160,481 @@ message JobStatus {
// newly added nodes executing long-forgotten jobs.
google.protobuf.Timestamp last_execution = 2;
}
// VolumeAccessMode is the access mode of the volume, and is used to determine
// the CSI AccessMode value, as well as the volume access type (block vs
// mount). In this way, it is more similar to the CSI VolumeCapability message.
//
// This defines how and where a volume can be accessed by more than
// one Task, but does not imply anything about the accessible topology of the
// volume.
//
// For analogy, a flash drive can be used on many computers, but only one of
// them at a time, and so would have a scope of "Single". But, it can be used
// by any number of programs simultaneously, so would have a sharing of "All".
message VolumeAccessMode {
// Scope enumerates the possible volume access scopes.
enum Scope {
option (gogoproto.goproto_enum_prefix) = false;
// VolumeScopeSingleNode indicates that only one node at a time may have
// access to the volume.
SINGLE_NODE = 0 [(gogoproto.enumvalue_customname) = "VolumeScopeSingleNode"];
// VolumeScopeMultiNode indicates that multiple nodes may access the volume
// at the same time.
MULTI_NODE = 1 [(gogoproto.enumvalue_customname) = "VolumeScopeMultiNode"];
}
// Sharing enumerates the possible volume sharing modes.
enum Sharing {
option (gogoproto.goproto_enum_prefix) = false;
// VolumeSharingNone indicates that the volume may only be used by a single
// Task at any given time.
NONE = 0 [(gogoproto.enumvalue_customname) = "VolumeSharingNone"];
// VolumeSharingReadOnly indicates that the volume may be accessed by
// multiple Tasks, but all Tasks only have have read access.
READ_ONLY = 1 [(gogoproto.enumvalue_customname) = "VolumeSharingReadOnly"];
// VolumeSharingOneWriter indicates that the Volume may be accessed by
// multiple Tasks, but only the one Task may have write permission for the
// Volume.
ONE_WRITER = 2 [(gogoproto.enumvalue_customname) = "VolumeSharingOneWriter"];
// VolumeSharingAll indicates that any number of Tasks may have read and
// write access to the volume.
ALL = 3 [(gogoproto.enumvalue_customname) = "VolumeSharingAll"];
}
// BlockVolume indicates the volume will be accessed with the block device
// API.
message BlockVolume {
// intentionally empty
}
// MountVolume indicates the volume will be access with the filesystem API.
message MountVolume {
// FsType is the filesystem type. This field is optional, and an empty
// string is equal to an unspecified value.
string fs_type = 1;
// MountFlags indicates mount options to be used for the volume. This
// field is optional, and may contain sensitive data.
repeated string mount_flags = 2;
}
// Scope defines on how many nodes this volume can be accessed
// simultaneously. If unset, will default to the zero-value of SINGLE_NODE.
Scope scope = 1;
// Sharing defines how many tasks can use this volume at the same time, and
// in what way. If unset, will default to the zero-value of NONE.
Sharing sharing = 2;
// AccessType defines the access type of the volume. Unlike Sharing and
// Scope, Swarmkit itself doesn't define either of these as a default, but
// but the upstream is free to do so. However, one of these MUST be set.
oneof access_type {
BlockVolume block = 3;
MountVolume mount = 4;
}
}
// VolumeSecret indicates a secret value that must be passed to CSI plugin
// operations.
message VolumeSecret {
// Key represents the key that will be passed as a controller secret to the
// CSI plugin.
string key = 1;
// Secret represents the swarmkit Secret object from which to read data to
// use as the value to pass to the CSI plugin. This can be either a secret
// name or ID.
//
// TODO(dperny): should this be a SecretReference instead?
string secret = 2;
}
// VolumePublishStatus contains information about the volume's publishing to a
// specific node.
//
// Publishing or unpublishing a volume to a node is a two-step process.
//
// When a Volume is needed on a Node, a VolumePublishStatus with state
// PendingPublish is added. This indicates that the volume should be published,
// but the RPCs have not been executed.
//
// Then, afterward, ControllerPublishVolume is called for the Volume, and the
// State is changed to Published, indicating that the call was a success.
//
// When a Volume is no longer needed, the process is similar, with the State
// being changed to PendingUnpublish. When ControllerUnpublishVolume succeeds,
// the PublishStatus for that Node is simply removed.
//
// Without this two-step process, the following could happen:
//
// 1. ControllerPublishVolume is called and the Volume is successfully
// published.
// 2. A crash or leadership change disrupts the cluster before
// the Volume with the updated VolumePublishStatus can be added to the
// store.
// 3. The Task that required the Volume to be published is deleted.
//
// In this case, the Volume would be published to the Node, but Swarm would be
// unaware of this, and would additionally be unaware that the Volume _should_
// be published to the Node.
//
// By first committing our intention to publish a Volume, we guarantee that the
// Volume itself is sufficient to know which Nodes it may have been published
// to.
message VolumePublishStatus {
// State is the state of the volume in the publish/unpublish
// lifecycle, on a particular node.
enum State {
// PendingPublish indicates that the volume should be published on this
// node, but the call to ControllerPublishVolume has not been
// successfully completed yet and the result recorded by swarmkit.
PENDING_PUBLISH = 0;
// Published means the volume is published successfully to the node.
PUBLISHED = 1;
// PendingNodeUnpublish indicates that the Volume should be unpublished
// on the Node, and we're waiting for confirmation that it has done so.
// After the Node has confirmed that the Volume has been unpublished,
// the state will move to PendingUnpublish.
PENDING_NODE_UNPUBLISH = 2;
// PendingUnpublish means the volume is published to the node, and
// needs to not be, but the call to ControllerUnpublishVolume has not
// verifiably succeeded yet. There is no Unpublished state, because
// after the volume has been verifiably unpublished, the
// VolumePublishStatus for the node is removed.
PENDING_UNPUBLISH = 3;
}
// NodeID is the swarm (not CSI plugin) node ID that this volume is
// published to.
string node_id = 1;
// State is the publish state of the volume.
State state = 2;
// PublishContext is the same PublishContext returned by a call to
// ControllerPublishVolume.
map<string, string> publish_context = 3;
// Message is a human-readable message explaining the state of the volume.
// It exists to convey the current situation with the volume to the user,
// allowing, for example, the user to see error messages why a volume might
// not be published yet.
string message = 5;
}
// VolumeInfo contains information about the volume originating from the CSI
// plugin.
message VolumeInfo {
// CapacityBytes is the capacity of this volume in bytes. A value of 0
// indicates that the capcity is unknown.
int64 capacity_bytes = 1;
// VolumeContext includes fields that are opaque to Swarmkit.
map<string, string> volume_context = 2;
// VolumeID is the ID of the volume as reported by the CSI plugin.
// Information about the volume is not cached in swarmkit's object store;
// instead, it is retrieved on-demand as needed. If the VolumeID field is an
// empty string, and the plugin advertises CREATE_DELETE_VOLUME capability,
// then Swarmkit has not yet called CreateVolume.
string volume_id = 3;
// AccessibleTopology is the topology this volume is actually accessible
// from.
repeated Topology accessible_topology = 4;
}
// CapacityRange describes the minimum and maximum capacity a volume should be
// created with.
message CapacityRange {
// RequiredBytes specifies that a volume must be at least this big. The value
// of 0 indicates an unspecified minimum. Must not be negative.
int64 required_bytes = 1;
// LimitBytes specifies that a volume must not be bigger than this. The value
// of 0 indicates an unspecified maximum. Must not be negative.
int64 limit_bytes = 2;
}
// VolumeAssignment contains the information needed by a Node to use a CSI
// volume. This includes the information need to Stage and Publish the volume
// on the node, but never the full Volume object.
message VolumeAssignment {
// ID is the swarmkit ID for the volume. This is used by swarmkit components
// to identify the volume.
string id = 1;
// VolumeID is the CSI volume ID as returned from CreateVolume. This is used
// by the CSI driver to identify the volume.
string volume_id = 2;
// Driver is the CSI Driver that this volume is managed by.
Driver driver = 3;
// VolumeContext is a map returned from the CSI Controller service when a
// Volume is created. It is optional for the driver to provide, but if it is
// provided, it must be passed to subsequent calls.
map<string,string> volume_context = 4;
// PublishContext is a map returned from the Controller service when
// ControllerPublishVolume is called. Again, it is optional, but if provided,
// must be passed.
map<string,string> publish_context = 5;
// AccessMode specifies the access mode of the volume.
VolumeAccessMode access_mode = 6;
// Secrets is the set of secrets required by the CSI plugin. These refer to
// swarmkit Secrets that will be distributed separately to the node.
repeated VolumeSecret secrets = 7;
}
// VolumeAttachment is the information associating a Volume with a Task.
message VolumeAttachment {
// ID is the swarmkit ID of the volume assigned to this task, not the CSI
// volume ID.
string id = 1;
// Source indicates the Mount source that this volume is assigned for.
string source = 2;
// Target indicates the Mount target that this volume is assigned for.
string target = 3;
}
// These types are copied from the CSI spec. They are copied because there is
// difficulty in compatibility between the CSI protos and the swarmkit protos,
// and straight importing them is difficult.
// TopologyRequirement expresses the user's requirements for a volume's
// accessible topology.
message TopologyRequirement {
// Specifies the list of topologies the provisioned volume MUST be
// accessible from.
// This field is OPTIONAL. If TopologyRequirement is specified either
// requisite or preferred or both MUST be specified.
//
// If requisite is specified, the provisioned volume MUST be
// accessible from at least one of the requisite topologies.
//
// Given
// x = number of topologies provisioned volume is accessible from
// n = number of requisite topologies
// The CO MUST ensure n >= 1. The SP MUST ensure x >= 1
// If x==n, then the SP MUST make the provisioned volume available to
// all topologies from the list of requisite topologies. If it is
// unable to do so, the SP MUST fail the CreateVolume call.
// For example, if a volume should be accessible from a single zone,
// and requisite =
// {"region": "R1", "zone": "Z2"}
// then the provisioned volume MUST be accessible from the "region"
// "R1" and the "zone" "Z2".
// Similarly, if a volume should be accessible from two zones, and
// requisite =
// {"region": "R1", "zone": "Z2"},
// {"region": "R1", "zone": "Z3"}
// then the provisioned volume MUST be accessible from the "region"
// "R1" and both "zone" "Z2" and "zone" "Z3".
//
// If x<n, then the SP SHALL choose x unique topologies from the list
// of requisite topologies. If it is unable to do so, the SP MUST fail
// the CreateVolume call.
// For example, if a volume should be accessible from a single zone,
// and requisite =
// {"region": "R1", "zone": "Z2"},
// {"region": "R1", "zone": "Z3"}
// then the SP may choose to make the provisioned volume available in
// either the "zone" "Z2" or the "zone" "Z3" in the "region" "R1".
// Similarly, if a volume should be accessible from two zones, and
// requisite =
// {"region": "R1", "zone": "Z2"},
// {"region": "R1", "zone": "Z3"},
// {"region": "R1", "zone": "Z4"}
// then the provisioned volume MUST be accessible from any combination
// of two unique topologies: e.g. "R1/Z2" and "R1/Z3", or "R1/Z2" and
// "R1/Z4", or "R1/Z3" and "R1/Z4".
//
// If x>n, then the SP MUST make the provisioned volume available from
// all topologies from the list of requisite topologies and MAY choose
// the remaining x-n unique topologies from the list of all possible
// topologies. If it is unable to do so, the SP MUST fail the
// CreateVolume call.
// For example, if a volume should be accessible from two zones, and
// requisite =
// {"region": "R1", "zone": "Z2"}
// then the provisioned volume MUST be accessible from the "region"
// "R1" and the "zone" "Z2" and the SP may select the second zone
// independently, e.g. "R1/Z4".
repeated Topology requisite = 1;
// Specifies the list of topologies the CO would prefer the volume to
// be provisioned in.
//
// This field is OPTIONAL. If TopologyRequirement is specified either
// requisite or preferred or both MUST be specified.
//
// An SP MUST attempt to make the provisioned volume available using
// the preferred topologies in order from first to last.
//
// If requisite is specified, all topologies in preferred list MUST
// also be present in the list of requisite topologies.
//
// If the SP is unable to to make the provisioned volume available
// from any of the preferred topologies, the SP MAY choose a topology
// from the list of requisite topologies.
// If the list of requisite topologies is not specified, then the SP
// MAY choose from the list of all possible topologies.
// If the list of requisite topologies is specified and the SP is
// unable to to make the provisioned volume available from any of the
// requisite topologies it MUST fail the CreateVolume call.
//
// Example 1:
// Given a volume should be accessible from a single zone, and
// requisite =
// {"region": "R1", "zone": "Z2"},
// {"region": "R1", "zone": "Z3"}
// preferred =
// {"region": "R1", "zone": "Z3"}
// then the the SP SHOULD first attempt to make the provisioned volume
// available from "zone" "Z3" in the "region" "R1" and fall back to
// "zone" "Z2" in the "region" "R1" if that is not possible.
//
// Example 2:
// Given a volume should be accessible from a single zone, and
// requisite =
// {"region": "R1", "zone": "Z2"},
// {"region": "R1", "zone": "Z3"},
// {"region": "R1", "zone": "Z4"},
// {"region": "R1", "zone": "Z5"}
// preferred =
// {"region": "R1", "zone": "Z4"},
// {"region": "R1", "zone": "Z2"}
// then the the SP SHOULD first attempt to make the provisioned volume
// accessible from "zone" "Z4" in the "region" "R1" and fall back to
// "zone" "Z2" in the "region" "R1" if that is not possible. If that
// is not possible, the SP may choose between either the "zone"
// "Z3" or "Z5" in the "region" "R1".
//
// Example 3:
// Given a volume should be accessible from TWO zones (because an
// opaque parameter in CreateVolumeRequest, for example, specifies
// the volume is accessible from two zones, aka synchronously
// replicated), and
// requisite =
// {"region": "R1", "zone": "Z2"},
// {"region": "R1", "zone": "Z3"},
// {"region": "R1", "zone": "Z4"},
// {"region": "R1", "zone": "Z5"}
// preferred =
// {"region": "R1", "zone": "Z5"},
// {"region": "R1", "zone": "Z3"}
// then the the SP SHOULD first attempt to make the provisioned volume
// accessible from the combination of the two "zones" "Z5" and "Z3" in
// the "region" "R1". If that's not possible, it should fall back to
// a combination of "Z5" and other possibilities from the list of
// requisite. If that's not possible, it should fall back to a
// combination of "Z3" and other possibilities from the list of
// requisite. If that's not possible, it should fall back to a
// combination of other possibilities from the list of requisite.
repeated Topology preferred = 2;
}
// Topology is a map of topological domains to topological segments.
// A topological domain is a sub-division of a cluster, like "region",
// "zone", "rack", etc.
// A topological segment is a specific instance of a topological domain,
// like "zone3", "rack3", etc.
// For example {"com.company/zone": "Z1", "com.company/rack": "R3"}
// Valid keys have two segments: an OPTIONAL prefix and name, separated
// by a slash (/), for example: "com.company.example/zone".
// The key name segment is REQUIRED. The prefix is OPTIONAL.
// The key name MUST be 63 characters or less, begin and end with an
// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
// underscores (_), dots (.), or alphanumerics in between, for example
// "zone".
// The key prefix MUST be 63 characters or less, begin and end with a
// lower-case alphanumeric character ([a-z0-9]), contain only
// dashes (-), dots (.), or lower-case alphanumerics in between, and
// follow domain name notation format
// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
// The key prefix SHOULD include the plugin's host company name and/or
// the plugin name, to minimize the possibility of collisions with keys
// from other plugins.
// If a key prefix is specified, it MUST be identical across all
// topology keys returned by the SP (across all RPCs).
// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone"
// MUST not both exist.
// Each value (topological segment) MUST contain 1 or more strings.
// Each string MUST be 63 characters or less and begin and end with an
// alphanumeric character with '-', '_', '.', or alphanumerics in
// between.
message Topology {
map<string, string> segments = 1;
}
// VolumeCapability specifies a capability of a volume.
message VolumeCapability {
// Indicate that the volume will be accessed via the block device API.
message BlockVolume {
// Intentionally empty, for now.
}
// Indicate that the volume will be accessed via the filesystem API.
message MountVolume {
// The filesystem type. This field is OPTIONAL.
// An empty string is equal to an unspecified field value.
string fs_type = 1;
// The mount options that can be used for the volume. This field is
// OPTIONAL. `mount_flags` MAY contain sensitive information.
// Therefore, the CO and the Plugin MUST NOT leak this information
// to untrusted entities. The total size of this repeated field
// SHALL NOT exceed 4 KiB.
repeated string mount_flags = 2;
}
// Specify how a volume can be accessed.
message AccessMode {
enum Mode {
UNKNOWN = 0;
// Can only be published once as read/write on a single node, at
// any given time.
SINGLE_NODE_WRITER = 1;
// Can only be published once as readonly on a single node, at
// any given time.
SINGLE_NODE_READER_ONLY = 2;
// Can be published as readonly at multiple nodes simultaneously.
MULTI_NODE_READER_ONLY = 3;
// Can be published at multiple nodes simultaneously. Only one of
// the node can be used as read/write. The rest will be readonly.
MULTI_NODE_SINGLE_WRITER = 4;
// Can be published as read/write at multiple nodes
// simultaneously.
MULTI_NODE_MULTI_WRITER = 5;
}
// This field is REQUIRED.
Mode mode = 1;
}
// Specifies what API the volume will be accessed using. One of the
// following fields MUST be specified.
oneof access_type {
BlockVolume block = 1;
MountVolume mount = 2;
}
// This is a REQUIRED field.
AccessMode access_mode = 3;
}

View file

@ -80,6 +80,7 @@ type Object struct {
// *Object_Resource
// *Object_Extension
// *Object_Config
// *Object_Volume
Object isObject_Object `protobuf_oneof:"Object"`
}
@ -148,6 +149,9 @@ type Object_Extension struct {
type Object_Config struct {
Config *Config `protobuf:"bytes,9,opt,name=config,proto3,oneof" json:"config,omitempty"`
}
type Object_Volume struct {
Volume *Volume `protobuf:"bytes,10,opt,name=volume,proto3,oneof" json:"volume,omitempty"`
}
func (*Object_Node) isObject_Object() {}
func (*Object_Service) isObject_Object() {}
@ -158,6 +162,7 @@ func (*Object_Secret) isObject_Object() {}
func (*Object_Resource) isObject_Object() {}
func (*Object_Extension) isObject_Object() {}
func (*Object_Config) isObject_Object() {}
func (*Object_Volume) isObject_Object() {}
func (m *Object) GetObject() isObject_Object {
if m != nil {
@ -229,6 +234,13 @@ func (m *Object) GetConfig() *Config {
return nil
}
func (m *Object) GetVolume() *Volume {
if x, ok := m.GetObject().(*Object_Volume); ok {
return x.Volume
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Object) XXX_OneofWrappers() []interface{} {
return []interface{}{
@ -241,6 +253,7 @@ func (*Object) XXX_OneofWrappers() []interface{} {
(*Object_Resource)(nil),
(*Object_Extension)(nil),
(*Object_Config)(nil),
(*Object_Volume)(nil),
}
}
@ -789,82 +802,83 @@ func init() {
}
var fileDescriptor_da25266013800cd9 = []byte{
// 1199 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xbd, 0x73, 0x1b, 0xc5,
0x1b, 0xc7, 0xef, 0x14, 0xf9, 0x24, 0x3d, 0xb6, 0x13, 0xcf, 0xc6, 0x49, 0xee, 0xa7, 0x5f, 0x90,
0x85, 0x78, 0xcb, 0x24, 0x41, 0x06, 0x13, 0x92, 0x01, 0x02, 0x33, 0x96, 0x2c, 0x46, 0x22, 0xe3,
0x97, 0x59, 0xdb, 0x49, 0xa9, 0x39, 0xdf, 0x3d, 0x56, 0x0e, 0xdf, 0xdd, 0x8a, 0xbd, 0x93, 0x1d,
0x77, 0x14, 0x14, 0x4c, 0x2a, 0x1a, 0x66, 0x68, 0x52, 0x41, 0x4d, 0x43, 0x07, 0xff, 0x40, 0x86,
0x2a, 0x65, 0x68, 0x3c, 0x44, 0xe9, 0x28, 0xf8, 0x0b, 0x28, 0x98, 0x7d, 0x39, 0xdb, 0x51, 0x4e,
0x36, 0xa9, 0xb4, 0xb7, 0xf7, 0xf9, 0x3e, 0xfb, 0xec, 0xf3, 0x76, 0x82, 0xab, 0x3d, 0x3f, 0xb9,
0x3f, 0xd8, 0xaa, 0xbb, 0x2c, 0x9c, 0xf7, 0x98, 0xbb, 0x83, 0x7c, 0x3e, 0xde, 0x73, 0x78, 0xb8,
0xe3, 0x27, 0xf3, 0x4e, 0xdf, 0x9f, 0xdf, 0x73, 0x12, 0xf7, 0x7e, 0xbd, 0xcf, 0x59, 0xc2, 0x08,
0x51, 0x40, 0x3d, 0x05, 0xea, 0xbb, 0xef, 0x97, 0x4f, 0xd3, 0xc7, 0x7d, 0x74, 0x63, 0xa5, 0x2f,
0x5f, 0x3f, 0x85, 0x65, 0x5b, 0x5f, 0xa2, 0x9b, 0xa4, 0xf4, 0x69, 0x96, 0x93, 0xfd, 0x3e, 0xa6,
0xec, 0x6c, 0x8f, 0xf5, 0x98, 0x5c, 0xce, 0x8b, 0x95, 0xde, 0xbd, 0x75, 0x82, 0x05, 0x49, 0x6c,
0x0d, 0xb6, 0xe7, 0xfb, 0xc1, 0xa0, 0xe7, 0x47, 0xfa, 0x47, 0x09, 0x6b, 0xdf, 0xe4, 0xc1, 0x5a,
0x95, 0xce, 0x90, 0x3a, 0xe4, 0x23, 0xe6, 0xa1, 0x6d, 0x56, 0xcd, 0x2b, 0x93, 0x0b, 0x76, 0xfd,
0xe5, 0x10, 0xd4, 0x57, 0x98, 0x87, 0x6d, 0x83, 0x4a, 0x8e, 0xdc, 0x82, 0x42, 0x8c, 0x7c, 0xd7,
0x77, 0xd1, 0xce, 0x49, 0xc9, 0xff, 0xb3, 0x24, 0xeb, 0x0a, 0x69, 0x1b, 0x34, 0xa5, 0x85, 0x30,
0xc2, 0x64, 0x8f, 0xf1, 0x1d, 0xfb, 0xcc, 0x78, 0xe1, 0x8a, 0x42, 0x84, 0x50, 0xd3, 0xc2, 0xc3,
0xc4, 0x89, 0x77, 0xec, 0xfc, 0x78, 0x0f, 0x37, 0x9c, 0x58, 0x48, 0x24, 0x27, 0x0e, 0x72, 0x83,
0x41, 0x9c, 0x20, 0xb7, 0x27, 0xc6, 0x1f, 0xd4, 0x54, 0x88, 0x38, 0x48, 0xd3, 0xe4, 0x06, 0x58,
0x31, 0xba, 0x1c, 0x13, 0xdb, 0x92, 0xba, 0x72, 0xf6, 0xcd, 0x04, 0xd1, 0x36, 0xa8, 0x66, 0xc9,
0xc7, 0x50, 0xe4, 0x18, 0xb3, 0x01, 0x77, 0xd1, 0x2e, 0x48, 0xdd, 0xe5, 0x2c, 0x1d, 0xd5, 0x4c,
0xdb, 0xa0, 0x87, 0x3c, 0xf9, 0x14, 0x4a, 0xf8, 0x20, 0xc1, 0x28, 0xf6, 0x59, 0x64, 0x17, 0xa5,
0xf8, 0xb5, 0x2c, 0x71, 0x2b, 0x85, 0xda, 0x06, 0x3d, 0x52, 0x08, 0x87, 0x5d, 0x16, 0x6d, 0xfb,
0x3d, 0xbb, 0x34, 0xde, 0xe1, 0xa6, 0x24, 0x84, 0xc3, 0x8a, 0x6d, 0x14, 0xd3, 0xdc, 0xd7, 0xd6,
0x60, 0x6a, 0x1d, 0x03, 0x74, 0x93, 0xc6, 0xfe, 0x7a, 0xc0, 0x12, 0x72, 0x1d, 0x40, 0x67, 0xab,
0xeb, 0x7b, 0xb2, 0x22, 0x4a, 0x8d, 0xe9, 0xe1, 0xc1, 0x5c, 0x49, 0xa7, 0xb3, 0xb3, 0x44, 0x4b,
0x1a, 0xe8, 0x78, 0x84, 0x40, 0x3e, 0x0e, 0x58, 0x22, 0xcb, 0x20, 0x4f, 0xe5, 0xba, 0xb6, 0x06,
0x67, 0x53, 0x8b, 0xcd, 0x41, 0x9c, 0xb0, 0x50, 0x50, 0x3b, 0x7e, 0xa4, 0xad, 0x51, 0xb9, 0x26,
0xb3, 0x30, 0xe1, 0x47, 0x1e, 0x3e, 0x90, 0xd2, 0x12, 0x55, 0x0f, 0x62, 0x77, 0xd7, 0x09, 0x06,
0x28, 0xcb, 0xa3, 0x44, 0xd5, 0x43, 0xed, 0x2f, 0x0b, 0x8a, 0xa9, 0x49, 0x62, 0x43, 0xee, 0xd0,
0x31, 0x6b, 0x78, 0x30, 0x97, 0xeb, 0x2c, 0xb5, 0x0d, 0x9a, 0xf3, 0x3d, 0x72, 0x0d, 0x4a, 0xbe,
0xd7, 0xed, 0x73, 0xdc, 0xf6, 0xb5, 0xd9, 0xc6, 0xd4, 0xf0, 0x60, 0xae, 0xd8, 0x59, 0x5a, 0x93,
0x7b, 0x22, 0xec, 0xbe, 0xa7, 0xd6, 0x64, 0x16, 0xf2, 0x91, 0x13, 0xea, 0x83, 0x64, 0x65, 0x3b,
0x21, 0x92, 0xd7, 0x61, 0x52, 0xfc, 0xa6, 0x46, 0xf2, 0xfa, 0x25, 0x88, 0x4d, 0x2d, 0xbc, 0x0d,
0x96, 0x2b, 0xaf, 0xa5, 0x2b, 0xab, 0x96, 0x5d, 0x21, 0xc7, 0x03, 0x20, 0x03, 0xaf, 0x42, 0xd1,
0x81, 0x69, 0xb5, 0x4a, 0x8f, 0xb0, 0x5e, 0xc1, 0xc8, 0x94, 0x92, 0x6a, 0x47, 0xea, 0x2f, 0x64,
0xaa, 0x90, 0x91, 0x29, 0x51, 0x29, 0x47, 0xb9, 0x7a, 0x0b, 0x0a, 0xa2, 0x7b, 0x05, 0x5c, 0x94,
0x30, 0x0c, 0x0f, 0xe6, 0x2c, 0xd1, 0xd8, 0x92, 0xb4, 0xc4, 0xcb, 0x8e, 0x47, 0x6e, 0xea, 0x94,
0xaa, 0x72, 0xaa, 0x9e, 0xe4, 0x98, 0x28, 0x18, 0x11, 0x3a, 0xc1, 0x93, 0x25, 0x98, 0xf6, 0x30,
0xf6, 0x39, 0x7a, 0xdd, 0x38, 0x71, 0x12, 0xb4, 0xa1, 0x6a, 0x5e, 0x39, 0x9b, 0x5d, 0xcb, 0xa2,
0x57, 0xd7, 0x05, 0x24, 0x2e, 0xa5, 0x55, 0xf2, 0x99, 0x2c, 0x40, 0x9e, 0xb3, 0x00, 0xed, 0x49,
0x29, 0xbe, 0x3c, 0x6e, 0x14, 0x51, 0x16, 0xc8, 0x71, 0x24, 0x58, 0xd2, 0x01, 0x08, 0x31, 0xdc,
0x42, 0x1e, 0xdf, 0xf7, 0xfb, 0xf6, 0x94, 0x54, 0xbe, 0x33, 0x4e, 0xb9, 0xde, 0x47, 0xb7, 0xbe,
0x7c, 0x88, 0x8b, 0xe4, 0x1e, 0x89, 0xc9, 0x32, 0x5c, 0xe0, 0xb8, 0x8d, 0x1c, 0x23, 0x17, 0xbd,
0xae, 0x9e, 0x3e, 0x22, 0x62, 0xd3, 0x32, 0x62, 0x97, 0x86, 0x07, 0x73, 0xe7, 0xe9, 0x21, 0xa0,
0x07, 0x95, 0x0c, 0xdf, 0x79, 0xfe, 0xd2, 0xb6, 0x47, 0xbe, 0x80, 0xd9, 0x63, 0xe6, 0xd4, 0xb0,
0x10, 0xd6, 0xce, 0x4a, 0x6b, 0x17, 0x87, 0x07, 0x73, 0xe4, 0xc8, 0x9a, 0x9a, 0x2a, 0xd2, 0x18,
0xe1, 0xa3, 0xbb, 0xa3, 0xb6, 0x54, 0x1f, 0x0b, 0x5b, 0x33, 0x59, 0xb6, 0x54, 0xc3, 0x8f, 0xda,
0xd2, 0xbb, 0xa2, 0xf9, 0x54, 0x43, 0x9e, 0x4b, 0x8b, 0x5f, 0x3c, 0x35, 0xf2, 0x90, 0x6b, 0xec,
0xd7, 0xfe, 0xc8, 0xc1, 0xd4, 0x3d, 0xf1, 0x41, 0xa4, 0xf8, 0xd5, 0x00, 0xe3, 0x84, 0xb4, 0xa0,
0x80, 0x51, 0xc2, 0x7d, 0x8c, 0x6d, 0xb3, 0x7a, 0xe6, 0xca, 0xe4, 0xc2, 0xb5, 0xac, 0xd8, 0x1e,
0x97, 0xa8, 0x87, 0x56, 0x94, 0xf0, 0x7d, 0x9a, 0x6a, 0xc9, 0x6d, 0x98, 0xe4, 0x18, 0x0f, 0x42,
0xec, 0x6e, 0x73, 0x16, 0x9e, 0xf4, 0xe1, 0xb8, 0x8b, 0x5c, 0x8c, 0x36, 0x0a, 0x8a, 0xff, 0x9c,
0xb3, 0x90, 0x5c, 0x07, 0xe2, 0x47, 0x6e, 0x30, 0xf0, 0xb0, 0xcb, 0x02, 0xaf, 0xab, 0xbe, 0xa2,
0xb2, 0x79, 0x8b, 0x74, 0x46, 0xbf, 0x59, 0x0d, 0x3c, 0x35, 0xd4, 0xca, 0xdf, 0x9b, 0x00, 0x47,
0x3e, 0x64, 0xce, 0x9f, 0x4f, 0xc0, 0x72, 0xdc, 0x44, 0xcc, 0xdc, 0x9c, 0x2c, 0x98, 0x37, 0xc6,
0x5e, 0x6a, 0x51, 0x62, 0x77, 0xfc, 0xc8, 0xa3, 0x5a, 0x42, 0x6e, 0x42, 0x61, 0xdb, 0x0f, 0x12,
0xe4, 0xb1, 0x7d, 0x46, 0x86, 0xe4, 0xf2, 0x49, 0x6d, 0x42, 0x53, 0xb8, 0xf6, 0x5b, 0x1a, 0xdb,
0x65, 0x8c, 0x63, 0xa7, 0x87, 0xe4, 0x33, 0xb0, 0x70, 0x17, 0xa3, 0x24, 0x0d, 0xed, 0xdb, 0x63,
0xbd, 0xd0, 0x8a, 0x7a, 0x4b, 0xe0, 0x54, 0xab, 0xc8, 0x87, 0x50, 0xd8, 0x55, 0xd1, 0xfa, 0x2f,
0x01, 0x4d, 0xd9, 0xf2, 0x2f, 0x26, 0x4c, 0x48, 0x43, 0xc7, 0xc2, 0x60, 0xbe, 0x7a, 0x18, 0x16,
0xc0, 0xd2, 0x89, 0xc8, 0x8d, 0xff, 0xf6, 0xa8, 0x94, 0x50, 0x4d, 0x92, 0x8f, 0x00, 0x46, 0x12,
0x78, 0xb2, 0xae, 0xc4, 0xd2, 0xac, 0x5e, 0xfd, 0xc7, 0x84, 0x73, 0x23, 0xae, 0x90, 0x1b, 0x30,
0x7b, 0x6f, 0x71, 0xa3, 0xd9, 0xee, 0x2e, 0x36, 0x37, 0x3a, 0xab, 0x2b, 0xdd, 0xcd, 0x95, 0x3b,
0x2b, 0xab, 0xf7, 0x56, 0x66, 0x8c, 0x72, 0xf9, 0xe1, 0xa3, 0xea, 0xc5, 0x11, 0x7c, 0x33, 0xda,
0x89, 0xd8, 0x9e, 0x70, 0xfc, 0xfc, 0x0b, 0xaa, 0x26, 0x6d, 0x2d, 0x6e, 0xb4, 0x66, 0xcc, 0xf2,
0xff, 0x1e, 0x3e, 0xaa, 0x5e, 0x18, 0x11, 0x35, 0x39, 0xaa, 0xc9, 0xf4, 0xa2, 0x66, 0x73, 0x6d,
0x49, 0x68, 0x72, 0x99, 0x9a, 0xcd, 0xbe, 0x97, 0xa5, 0xa1, 0xad, 0xe5, 0xd5, 0xbb, 0xad, 0x99,
0x7c, 0xa6, 0x86, 0x62, 0xc8, 0x76, 0xb1, 0x7c, 0xe9, 0xdb, 0x1f, 0x2b, 0xc6, 0xaf, 0x3f, 0x55,
0x46, 0xaf, 0xba, 0x10, 0xc2, 0x84, 0xdc, 0x22, 0x5e, 0xba, 0xa8, 0x9e, 0xd6, 0x88, 0xe5, 0xea,
0x69, 0xf5, 0x54, 0xbb, 0xf0, 0xfb, 0xcf, 0x7f, 0xff, 0x90, 0x3b, 0x07, 0xd3, 0x92, 0x78, 0x37,
0x74, 0x22, 0xa7, 0x87, 0xfc, 0x3d, 0xb3, 0xf1, 0xe6, 0xe3, 0x67, 0x15, 0xe3, 0xe9, 0xb3, 0x8a,
0xf1, 0xf5, 0xb0, 0x62, 0x3e, 0x1e, 0x56, 0xcc, 0x27, 0xc3, 0x8a, 0xf9, 0xe7, 0xb0, 0x62, 0x7e,
0xf7, 0xbc, 0x62, 0x3c, 0x79, 0x5e, 0x31, 0x9e, 0x3e, 0xaf, 0x18, 0x5b, 0x96, 0xfc, 0x33, 0xf9,
0xc1, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x96, 0x4e, 0x58, 0x61, 0x63, 0x0b, 0x00, 0x00,
// 1210 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xcd, 0x73, 0xdb, 0xc4,
0x1b, 0xc7, 0x25, 0xd7, 0x51, 0xec, 0x27, 0x49, 0x9b, 0xd9, 0xa6, 0xad, 0x7e, 0xfe, 0x15, 0xc7,
0x98, 0xb7, 0x4e, 0x5b, 0x1c, 0x08, 0xa5, 0x1d, 0xa0, 0x30, 0x13, 0x3b, 0x66, 0x6c, 0x3a, 0x79,
0x99, 0x4d, 0xd2, 0x1e, 0x3d, 0x8a, 0xf4, 0xc4, 0x15, 0x91, 0xb4, 0x66, 0x25, 0x3b, 0xcd, 0x8d,
0x23, 0xd3, 0x13, 0x17, 0x66, 0xb8, 0xf4, 0x04, 0x67, 0x2e, 0xdc, 0xca, 0x3f, 0xd0, 0xe1, 0xd4,
0x63, 0xb9, 0x64, 0xa8, 0x7b, 0xe3, 0xc0, 0x5f, 0xc0, 0x81, 0xd9, 0x17, 0x25, 0xa9, 0x2b, 0x27,
0xf4, 0xe4, 0xd5, 0xea, 0xf3, 0x7d, 0xf6, 0xd9, 0xe7, 0x4d, 0x86, 0xab, 0x5d, 0x3f, 0xb9, 0xdf,
0xdf, 0xae, 0xb9, 0x2c, 0x5c, 0xf0, 0x98, 0xbb, 0x8b, 0x7c, 0x21, 0xde, 0x73, 0x78, 0xb8, 0xeb,
0x27, 0x0b, 0x4e, 0xcf, 0x5f, 0xd8, 0x73, 0x12, 0xf7, 0x7e, 0xad, 0xc7, 0x59, 0xc2, 0x08, 0x51,
0x40, 0x2d, 0x05, 0x6a, 0x83, 0x0f, 0x4b, 0xa7, 0xe9, 0xe3, 0x1e, 0xba, 0xb1, 0xd2, 0x97, 0xae,
0x9f, 0xc2, 0xb2, 0xed, 0xaf, 0xd1, 0x4d, 0x52, 0xfa, 0x34, 0xcb, 0xc9, 0x7e, 0x0f, 0x53, 0x76,
0xae, 0xcb, 0xba, 0x4c, 0x2e, 0x17, 0xc4, 0x4a, 0xef, 0xde, 0x3a, 0xc1, 0x82, 0x24, 0xb6, 0xfb,
0x3b, 0x0b, 0xbd, 0xa0, 0xdf, 0xf5, 0x23, 0xfd, 0xa3, 0x84, 0xd5, 0xc7, 0x79, 0xb0, 0xd6, 0xa4,
0x33, 0xa4, 0x06, 0xf9, 0x88, 0x79, 0x68, 0x9b, 0x15, 0xf3, 0xca, 0xd4, 0xa2, 0x5d, 0x7b, 0x35,
0x04, 0xb5, 0x55, 0xe6, 0x61, 0xcb, 0xa0, 0x92, 0x23, 0xb7, 0x60, 0x32, 0x46, 0x3e, 0xf0, 0x5d,
0xb4, 0x73, 0x52, 0xf2, 0xff, 0x2c, 0xc9, 0x86, 0x42, 0x5a, 0x06, 0x4d, 0x69, 0x21, 0x8c, 0x30,
0xd9, 0x63, 0x7c, 0xd7, 0x3e, 0x33, 0x5e, 0xb8, 0xaa, 0x10, 0x21, 0xd4, 0xb4, 0xf0, 0x30, 0x71,
0xe2, 0x5d, 0x3b, 0x3f, 0xde, 0xc3, 0x4d, 0x27, 0x16, 0x12, 0xc9, 0x89, 0x83, 0xdc, 0xa0, 0x1f,
0x27, 0xc8, 0xed, 0x89, 0xf1, 0x07, 0x35, 0x14, 0x22, 0x0e, 0xd2, 0x34, 0xb9, 0x01, 0x56, 0x8c,
0x2e, 0xc7, 0xc4, 0xb6, 0xa4, 0xae, 0x94, 0x7d, 0x33, 0x41, 0xb4, 0x0c, 0xaa, 0x59, 0xf2, 0x29,
0x14, 0x38, 0xc6, 0xac, 0xcf, 0x5d, 0xb4, 0x27, 0xa5, 0xee, 0x72, 0x96, 0x8e, 0x6a, 0xa6, 0x65,
0xd0, 0x43, 0x9e, 0x7c, 0x0e, 0x45, 0x7c, 0x90, 0x60, 0x14, 0xfb, 0x2c, 0xb2, 0x0b, 0x52, 0xfc,
0x46, 0x96, 0xb8, 0x99, 0x42, 0x2d, 0x83, 0x1e, 0x29, 0x84, 0xc3, 0x2e, 0x8b, 0x76, 0xfc, 0xae,
0x5d, 0x1c, 0xef, 0x70, 0x43, 0x12, 0xc2, 0x61, 0xc5, 0x0a, 0xd5, 0x80, 0x05, 0xfd, 0x10, 0x6d,
0x18, 0xaf, 0xba, 0x2b, 0x09, 0xa1, 0x52, 0x6c, 0xbd, 0x90, 0x56, 0x4c, 0x75, 0x1d, 0xa6, 0x37,
0x30, 0x40, 0x37, 0xa9, 0xef, 0x6f, 0x04, 0x2c, 0x21, 0xd7, 0x01, 0x74, 0x8e, 0x3b, 0xbe, 0x27,
0xeb, 0xa8, 0x58, 0x9f, 0x19, 0x1e, 0xcc, 0x17, 0x75, 0x11, 0xb4, 0x97, 0x69, 0x51, 0x03, 0x6d,
0x8f, 0x10, 0xc8, 0xc7, 0x01, 0x4b, 0x64, 0xf1, 0xe4, 0xa9, 0x5c, 0x57, 0xd7, 0xe1, 0x6c, 0x6a,
0xb1, 0xd1, 0x8f, 0x13, 0x16, 0x0a, 0x6a, 0xd7, 0x8f, 0xb4, 0x35, 0x2a, 0xd7, 0x64, 0x0e, 0x26,
0xfc, 0xc8, 0xc3, 0x07, 0x52, 0x5a, 0xa4, 0xea, 0x41, 0xec, 0x0e, 0x9c, 0xa0, 0x8f, 0xb2, 0xa8,
0x8a, 0x54, 0x3d, 0x54, 0xff, 0xb2, 0xa0, 0x90, 0x9a, 0x24, 0x36, 0xe4, 0x0e, 0x1d, 0xb3, 0x86,
0x07, 0xf3, 0xb9, 0xf6, 0x72, 0xcb, 0xa0, 0x39, 0xdf, 0x23, 0xd7, 0xa0, 0xe8, 0x7b, 0x9d, 0x1e,
0xc7, 0x1d, 0x5f, 0x9b, 0xad, 0x4f, 0x0f, 0x0f, 0xe6, 0x0b, 0xed, 0xe5, 0x75, 0xb9, 0x27, 0x92,
0xe5, 0x7b, 0x6a, 0x4d, 0xe6, 0x20, 0x1f, 0x39, 0xa1, 0x3e, 0x48, 0xf6, 0x83, 0x13, 0x22, 0x79,
0x13, 0xa6, 0xc4, 0x6f, 0x6a, 0x24, 0xaf, 0x5f, 0x82, 0xd8, 0xd4, 0xc2, 0xdb, 0x60, 0xb9, 0xf2,
0x5a, 0xba, 0x1e, 0xab, 0xd9, 0x75, 0x75, 0x3c, 0x00, 0x32, 0x5d, 0x2a, 0x14, 0x6d, 0x98, 0x51,
0xab, 0xf4, 0x08, 0xeb, 0x35, 0x8c, 0x4c, 0x2b, 0xa9, 0x76, 0xa4, 0xf6, 0x52, 0xa6, 0x26, 0x33,
0x32, 0x25, 0xea, 0xeb, 0x28, 0x57, 0xef, 0xc0, 0xa4, 0xe8, 0x79, 0x01, 0x17, 0x24, 0x0c, 0xc3,
0x83, 0x79, 0x4b, 0x8c, 0x03, 0x49, 0x5a, 0xe2, 0x65, 0xdb, 0x23, 0x37, 0x75, 0x4a, 0x55, 0x11,
0x56, 0x4e, 0x72, 0x4c, 0x14, 0x8c, 0x08, 0x9d, 0xe0, 0xc9, 0x32, 0xcc, 0x78, 0x18, 0xfb, 0x1c,
0xbd, 0x4e, 0x9c, 0x38, 0x89, 0xaa, 0xc7, 0xb3, 0xd9, 0x1d, 0x20, 0x3a, 0x7c, 0x43, 0x40, 0xe2,
0x52, 0x5a, 0x25, 0x9f, 0xc9, 0x22, 0xe4, 0x39, 0x0b, 0xd0, 0x9e, 0x92, 0xe2, 0xcb, 0xe3, 0x06,
0x18, 0x65, 0x81, 0x1c, 0x62, 0x82, 0x25, 0x6d, 0x80, 0x10, 0xc3, 0x6d, 0xe4, 0xf1, 0x7d, 0xbf,
0x67, 0x4f, 0x4b, 0xe5, 0x7b, 0xe3, 0x94, 0x1b, 0x3d, 0x74, 0x6b, 0x2b, 0x87, 0xb8, 0x48, 0xee,
0x91, 0x98, 0xac, 0xc0, 0x05, 0x8e, 0x3b, 0xc8, 0x31, 0x72, 0xd1, 0xeb, 0xe8, 0x99, 0x25, 0x22,
0x36, 0x23, 0x23, 0x76, 0x69, 0x78, 0x30, 0x7f, 0x9e, 0x1e, 0x02, 0x7a, 0xbc, 0xc9, 0xf0, 0x9d,
0xe7, 0xaf, 0x6c, 0x7b, 0xe4, 0x2b, 0x98, 0x3b, 0x66, 0x4e, 0x8d, 0x18, 0x61, 0xed, 0xac, 0xb4,
0x76, 0x71, 0x78, 0x30, 0x4f, 0x8e, 0xac, 0xa9, 0x59, 0x24, 0x8d, 0x11, 0x3e, 0xba, 0x3b, 0x6a,
0x4b, 0x75, 0xbf, 0xb0, 0x35, 0x9b, 0x65, 0x4b, 0x8d, 0x89, 0x51, 0x5b, 0x7a, 0x57, 0x34, 0x9f,
0x6a, 0xc8, 0x73, 0x69, 0xf1, 0x8b, 0xa7, 0x7a, 0x1e, 0x72, 0xf5, 0xfd, 0xea, 0x1f, 0x39, 0x98,
0xbe, 0x27, 0x3e, 0xa3, 0x14, 0xbf, 0xe9, 0x63, 0x9c, 0x90, 0x26, 0x4c, 0x62, 0x94, 0x70, 0x1f,
0x63, 0xdb, 0xac, 0x9c, 0xb9, 0x32, 0xb5, 0x78, 0x2d, 0x2b, 0xb6, 0xc7, 0x25, 0xea, 0xa1, 0x19,
0x25, 0x7c, 0x9f, 0xa6, 0x5a, 0x72, 0x1b, 0xa6, 0x38, 0xc6, 0xfd, 0x10, 0x3b, 0x3b, 0x9c, 0x85,
0x27, 0x7d, 0x6e, 0xee, 0x22, 0x17, 0x03, 0x91, 0x82, 0xe2, 0xbf, 0xe4, 0x2c, 0x24, 0xd7, 0x81,
0xf8, 0x91, 0x1b, 0xf4, 0x3d, 0xec, 0xb0, 0xc0, 0xeb, 0xa8, 0x6f, 0xaf, 0x6c, 0xde, 0x02, 0x9d,
0xd5, 0x6f, 0xd6, 0x02, 0x4f, 0x0d, 0xb5, 0xd2, 0x0f, 0x26, 0xc0, 0x91, 0x0f, 0x99, 0xf3, 0xe7,
0x33, 0xb0, 0x1c, 0x37, 0x11, 0x93, 0x3a, 0x27, 0x0b, 0xe6, 0xad, 0xb1, 0x97, 0x5a, 0x92, 0xd8,
0x1d, 0x3f, 0xf2, 0xa8, 0x96, 0x90, 0x9b, 0x30, 0xb9, 0xe3, 0x07, 0x09, 0xf2, 0xd8, 0x3e, 0x23,
0x43, 0x72, 0xf9, 0xa4, 0x36, 0xa1, 0x29, 0x5c, 0xfd, 0x2d, 0x8d, 0xed, 0x0a, 0xc6, 0xb1, 0xd3,
0x45, 0xf2, 0x05, 0x58, 0x38, 0xc0, 0x28, 0x49, 0x43, 0xfb, 0xee, 0x58, 0x2f, 0xb4, 0xa2, 0xd6,
0x14, 0x38, 0xd5, 0x2a, 0xf2, 0x31, 0x4c, 0x0e, 0x54, 0xb4, 0xfe, 0x4b, 0x40, 0x53, 0xb6, 0xf4,
0xab, 0x09, 0x13, 0xd2, 0xd0, 0xb1, 0x30, 0x98, 0xaf, 0x1f, 0x86, 0x45, 0xb0, 0x74, 0x22, 0x72,
0xe3, 0xbf, 0x3d, 0x2a, 0x25, 0x54, 0x93, 0xe4, 0x13, 0x80, 0x91, 0x04, 0x9e, 0xac, 0x2b, 0xb2,
0x34, 0xab, 0x57, 0xff, 0x31, 0xe1, 0xdc, 0x88, 0x2b, 0xe4, 0x06, 0xcc, 0xdd, 0x5b, 0xda, 0x6c,
0xb4, 0x3a, 0x4b, 0x8d, 0xcd, 0xf6, 0xda, 0x6a, 0x67, 0x6b, 0xf5, 0xce, 0xea, 0xda, 0xbd, 0xd5,
0x59, 0xa3, 0x54, 0x7a, 0xf8, 0xa8, 0x72, 0x71, 0x04, 0xdf, 0x8a, 0x76, 0x23, 0xb6, 0x27, 0x1c,
0x3f, 0xff, 0x92, 0xaa, 0x41, 0x9b, 0x4b, 0x9b, 0xcd, 0x59, 0xb3, 0xf4, 0xbf, 0x87, 0x8f, 0x2a,
0x17, 0x46, 0x44, 0x0d, 0x8e, 0x6a, 0x32, 0xbd, 0xac, 0xd9, 0x5a, 0x5f, 0x16, 0x9a, 0x5c, 0xa6,
0x66, 0xab, 0xe7, 0x65, 0x69, 0x68, 0x73, 0x65, 0xed, 0x6e, 0x73, 0x36, 0x9f, 0xa9, 0xa1, 0x18,
0xb2, 0x01, 0x96, 0x2e, 0x7d, 0xf7, 0x53, 0xd9, 0x78, 0xfc, 0x73, 0x79, 0xf4, 0xaa, 0x8b, 0x21,
0x4c, 0xc8, 0x2d, 0xe2, 0xa5, 0x8b, 0xca, 0x69, 0x8d, 0x58, 0xaa, 0x9c, 0x56, 0x4f, 0xd5, 0x0b,
0xbf, 0xff, 0xf2, 0xf7, 0x8f, 0xb9, 0x73, 0x30, 0x23, 0x89, 0xf7, 0x43, 0x27, 0x72, 0xba, 0xc8,
0x3f, 0x30, 0xeb, 0x6f, 0x3f, 0x79, 0x5e, 0x36, 0x9e, 0x3d, 0x2f, 0x1b, 0xdf, 0x0e, 0xcb, 0xe6,
0x93, 0x61, 0xd9, 0x7c, 0x3a, 0x2c, 0x9b, 0x7f, 0x0e, 0xcb, 0xe6, 0xf7, 0x2f, 0xca, 0xc6, 0xd3,
0x17, 0x65, 0xe3, 0xd9, 0x8b, 0xb2, 0xb1, 0x6d, 0xc9, 0xbf, 0xa0, 0x1f, 0xfd, 0x1b, 0x00, 0x00,
0xff, 0xff, 0x36, 0x4b, 0xa7, 0x78, 0x99, 0x0b, 0x00, 0x00,
}
type authenticatedWrapperWatchServer struct {
@ -956,6 +970,12 @@ func (m *Object) CopyFrom(src interface{}) {
}
github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig())
m.Object = &v
case *Object_Volume:
v := Object_Volume{
Volume: &Volume{},
}
github_com_docker_swarmkit_api_deepcopy.Copy(v.Volume, o.GetVolume())
m.Object = &v
}
}
@ -1532,6 +1552,27 @@ func (m *Object_Config) MarshalToSizedBuffer(dAtA []byte) (int, error) {
}
return len(dAtA) - i, nil
}
func (m *Object_Volume) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Object_Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if m.Volume != nil {
{
size, err := m.Volume.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintWatch(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x52
}
return len(dAtA) - i, nil
}
func (m *SelectBySlot) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -2348,6 +2389,18 @@ func (m *Object_Config) Size() (n int) {
}
return n
}
func (m *Object_Volume) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Volume != nil {
l = m.Volume.Size()
n += 1 + l + sovWatch(uint64(l))
}
return n
}
func (m *SelectBySlot) Size() (n int) {
if m == nil {
return 0
@ -2749,6 +2802,16 @@ func (this *Object_Config) String() string {
}, "")
return s
}
func (this *Object_Volume) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Object_Volume{`,
`Volume:` + strings.Replace(fmt.Sprintf("%v", this.Volume), "Volume", "Volume", 1) + `,`,
`}`,
}, "")
return s
}
func (this *SelectBySlot) String() string {
if this == nil {
return "nil"
@ -3356,6 +3419,41 @@ func (m *Object) Unmarshal(dAtA []byte) error {
}
m.Object = &Object_Config{v}
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWatch
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthWatch
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthWatch
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
v := &Volume{}
if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
m.Object = &Object_Volume{v}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipWatch(dAtA[iNdEx:])

View file

@ -19,6 +19,7 @@ message Object {
Resource resource = 7;
Extension extension = 8;
Config config = 9;
Volume volume = 10;
}
}

View file

@ -14,7 +14,6 @@ import (
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
@ -687,7 +686,7 @@ func ensureCertKeyMatch(cert *x509.Certificate, key crypto.PublicKey) error {
// CA certificate, and returns the PEM-encoded Certificate if so
func GetLocalRootCA(paths CertPaths) (RootCA, error) {
// Check if we have a Certificate file
cert, err := ioutil.ReadFile(paths.Cert)
cert, err := os.ReadFile(paths.Cert)
if err != nil {
if os.IsNotExist(err) {
err = ErrNoLocalRootCA
@ -697,7 +696,7 @@ func GetLocalRootCA(paths CertPaths) (RootCA, error) {
}
signingCert := cert
key, err := ioutil.ReadFile(paths.Key)
key, err := os.ReadFile(paths.Key)
if err != nil {
if !os.IsNotExist(err) {
return RootCA{}, err
@ -910,13 +909,13 @@ func readCertValidity(kr KeyReader) (time.Time, time.Time, error) {
// SaveRootCA saves a RootCA object to disk
func SaveRootCA(rootCA RootCA, paths CertPaths) error {
// Make sure the necessary dirs exist and they are writable
err := os.MkdirAll(filepath.Dir(paths.Cert), 0755)
err := os.MkdirAll(filepath.Dir(paths.Cert), 0o755)
if err != nil {
return err
}
// If the root certificate got returned successfully, save the rootCA to disk.
return ioutils.AtomicWriteFile(paths.Cert, rootCA.Certs, 0644)
return ioutils.AtomicWriteFile(paths.Cert, rootCA.Certs, 0o644)
}
// GenerateNewCSR returns a newly generated key and CSR signed with said key

View file

@ -10,7 +10,6 @@ import (
"encoding/json"
"encoding/pem"
"io"
"io/ioutil"
"net/http"
"sync"
"time"
@ -193,7 +192,7 @@ func makeExternalSignRequest(ctx context.Context, client *http.Client, url strin
defer resp.Body.Close()
b := io.LimitReader(resp.Body, CertificateMaxSize)
body, err := ioutil.ReadAll(b)
body, err := io.ReadAll(b)
if err != nil {
return nil, recoverableErr{err: errors.Wrap(err, "unable to read CSR response body")}
}

View file

@ -3,7 +3,6 @@ package ca
import (
"crypto/x509"
"encoding/pem"
"io/ioutil"
"os"
"path/filepath"
"strconv"
@ -20,9 +19,9 @@ import (
const (
// keyPerms are the permissions used to write the TLS keys
keyPerms = 0600
keyPerms = 0o600
// certPerms are the permissions used to write TLS certificates
certPerms = 0644
certPerms = 0o644
// versionHeader is the TLS PEM key header that contains the KEK version
versionHeader = "kek-version"
)
@ -157,14 +156,14 @@ func (k *KeyReadWriter) SetKeyFormatter(kf keyutils.Formatter) {
// location than two possible key locations.
func (k *KeyReadWriter) Migrate() error {
tmpPaths := k.genTempPaths()
keyBytes, err := ioutil.ReadFile(tmpPaths.Key)
keyBytes, err := os.ReadFile(tmpPaths.Key)
if err != nil {
return nil // no key? no migration
}
// it does exist - no need to decrypt, because previous versions of swarmkit
// which supported this temporary key did not support encrypting TLS keys
cert, err := ioutil.ReadFile(k.paths.Cert)
cert, err := os.ReadFile(k.paths.Cert)
if err != nil {
return os.RemoveAll(tmpPaths.Key) // no cert? no migration
}
@ -202,7 +201,7 @@ func (k *KeyReadWriter) Read() ([]byte, []byte, error) {
}
keyBytes := pem.EncodeToMemory(keyBlock)
cert, err := ioutil.ReadFile(k.paths.Cert)
cert, err := os.ReadFile(k.paths.Cert)
// The cert is written to a temporary file first, then the key, and then
// the cert gets renamed - so, if interrupted, it's possible to end up with
// a cert that only exists in the temporary location.
@ -219,7 +218,7 @@ func (k *KeyReadWriter) Read() ([]byte, []byte, error) {
if err != nil {
var tempErr error
tmpPaths := k.genTempPaths()
cert, tempErr = ioutil.ReadFile(tmpPaths.Cert)
cert, tempErr = os.ReadFile(tmpPaths.Cert)
if tempErr != nil {
return nil, nil, err // return the original error
}
@ -308,7 +307,7 @@ func (k *KeyReadWriter) Write(certBytes, plaintextKeyBytes []byte, kekData *KEKD
defer k.mu.Unlock()
// current assumption is that the cert and key will be in the same directory
if err := os.MkdirAll(filepath.Dir(k.paths.Key), 0755); err != nil {
if err := os.MkdirAll(filepath.Dir(k.paths.Key), 0o755); err != nil {
return err
}
@ -353,7 +352,7 @@ func (k *KeyReadWriter) Target() string {
}
func (k *KeyReadWriter) readKeyblock() (*pem.Block, error) {
key, err := ioutil.ReadFile(k.paths.Key)
key, err := os.ReadFile(k.paths.Key)
if err != nil {
return nil, err
}

View file

@ -2,7 +2,6 @@ package ioutils
import (
"io"
"io/ioutil"
"os"
"path/filepath"
)
@ -11,7 +10,7 @@ import (
// AtomicWriteFile atomically writes data to a file specified by filename.
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return err
}

View file

@ -1,3 +1,4 @@
//go:build !linux && !darwin && !windows
// +build !linux,!darwin,!windows
package cnmallocator

View file

@ -15,7 +15,7 @@ import (
)
// MaxConfigSize is the maximum byte length of the `Config.Spec.Data` field.
const MaxConfigSize = 500 * 1024 // 500KB
const MaxConfigSize = 1000 * 1024 // 1000KB
// assumes spec is not nil
func configFromConfigSpec(spec *api.ConfigSpec) *api.Config {

View file

@ -0,0 +1,256 @@
package controlapi
import (
"context"
"strings"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/identity"
"github.com/docker/swarmkit/manager/state/store"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func (s *Server) CreateVolume(ctx context.Context, request *api.CreateVolumeRequest) (*api.CreateVolumeResponse, error) {
if request.Spec == nil {
return nil, status.Errorf(codes.InvalidArgument, "spec must not be nil")
}
// validate the volume spec
if request.Spec.Driver == nil {
return nil, status.Errorf(codes.InvalidArgument, "driver must be specified")
}
if request.Spec.Annotations.Name == "" {
return nil, status.Errorf(codes.InvalidArgument, "meta: name must be provided")
}
if request.Spec.AccessMode == nil {
return nil, status.Errorf(codes.InvalidArgument, "AccessMode must not be nil")
}
if request.Spec.AccessMode.GetAccessType() == nil {
return nil, status.Errorf(codes.InvalidArgument, "Volume AccessMode must specify either Mount or Block access type")
}
volume := &api.Volume{
ID: identity.NewID(),
Spec: *request.Spec,
}
err := s.store.Update(func(tx store.Tx) error {
// check all secrets, so that we can return an error indicating ALL
// missing secrets, instead of just the first one.
var missingSecrets []string
for _, secret := range volume.Spec.Secrets {
s := store.GetSecret(tx, secret.Secret)
if s == nil {
missingSecrets = append(missingSecrets, secret.Secret)
}
}
if len(missingSecrets) > 0 {
secretStr := "secrets"
if len(missingSecrets) == 1 {
secretStr = "secret"
}
return status.Errorf(codes.InvalidArgument, "%s not found: %v", secretStr, strings.Join(missingSecrets, ", "))
}
return store.CreateVolume(tx, volume)
})
if err != nil {
return nil, err
}
return &api.CreateVolumeResponse{
Volume: volume,
}, nil
}
func (s *Server) UpdateVolume(ctx context.Context, request *api.UpdateVolumeRequest) (*api.UpdateVolumeResponse, error) {
if request.VolumeID == "" {
return nil, status.Errorf(codes.InvalidArgument, "VolumeID must not be empty")
}
if request.Spec == nil {
return nil, status.Errorf(codes.InvalidArgument, "Spec must not be empty")
}
if request.VolumeVersion == nil {
return nil, status.Errorf(codes.InvalidArgument, "VolumeVersion must not be empty")
}
var volume *api.Volume
if err := s.store.Update(func(tx store.Tx) error {
volume = store.GetVolume(tx, request.VolumeID)
if volume == nil {
return status.Errorf(codes.NotFound, "volume %v not found", request.VolumeID)
}
// compare specs, to see if any invalid fields have changed
if request.Spec.Annotations.Name != volume.Spec.Annotations.Name {
return status.Errorf(codes.InvalidArgument, "Name cannot be updated")
}
if request.Spec.Group != volume.Spec.Group {
return status.Errorf(codes.InvalidArgument, "Group cannot be updated")
}
if request.Spec.AccessibilityRequirements != volume.Spec.AccessibilityRequirements {
return status.Errorf(codes.InvalidArgument, "AccessibilityRequirements cannot be updated")
}
if request.Spec.Driver == nil || request.Spec.Driver.Name != volume.Spec.Driver.Name {
return status.Errorf(codes.InvalidArgument, "Driver cannot be updated")
}
if request.Spec.AccessMode.Scope != volume.Spec.AccessMode.Scope || request.Spec.AccessMode.Sharing != volume.Spec.AccessMode.Sharing {
return status.Errorf(codes.InvalidArgument, "AccessMode cannot be updated")
}
volume.Spec = *request.Spec
volume.Meta.Version = *request.VolumeVersion
if err := store.UpdateVolume(tx, volume); err != nil {
return err
}
// read the volume back out, so it has the correct meta version
// TODO(dperny): this behavior, while likely more correct, may not be
// consistent with the rest of swarmkit...
volume = store.GetVolume(tx, request.VolumeID)
return nil
}); err != nil {
return nil, err
}
return &api.UpdateVolumeResponse{
Volume: volume,
}, nil
}
func (s *Server) ListVolumes(ctx context.Context, request *api.ListVolumesRequest) (*api.ListVolumesResponse, error) {
var (
volumes []*api.Volume
err error
)
// so the way we do this is with two filtering passes. first, we do a store
// request, filtering on one of the parameters. then, from the result of
// the store request, we filter on the remaining filters. This is necessary
// because the store filters do not expose an AND function.
s.store.View(func(tx store.ReadTx) {
var by store.By = store.All
switch {
case request.Filters == nil:
// short circuit to avoid nil pointer deref
case len(request.Filters.Names) > 0:
by = buildFilters(store.ByName, request.Filters.Names)
case len(request.Filters.IDPrefixes) > 0:
by = buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes)
case len(request.Filters.Groups) > 0:
by = buildFilters(store.ByVolumeGroup, request.Filters.Groups)
case len(request.Filters.Drivers) > 0:
by = buildFilters(store.ByDriver, request.Filters.Drivers)
case len(request.Filters.NamePrefixes) > 0:
by = buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes)
}
volumes, err = store.FindVolumes(tx, by)
})
if err != nil {
return nil, err
}
if request.Filters == nil {
return &api.ListVolumesResponse{Volumes: volumes}, nil
}
volumes = filterVolumes(volumes,
// Names
func(v *api.Volume) bool {
return filterContains(v.Spec.Annotations.Name, request.Filters.Names)
},
// NamePrefixes
func(v *api.Volume) bool {
return filterContainsPrefix(v.Spec.Annotations.Name, request.Filters.NamePrefixes)
},
// IDPrefixes
func(v *api.Volume) bool {
return filterContainsPrefix(v.ID, request.Filters.IDPrefixes)
},
// Labels
func(v *api.Volume) bool {
return filterMatchLabels(v.Spec.Annotations.Labels, request.Filters.Labels)
},
// Groups
func(v *api.Volume) bool {
return filterContains(v.Spec.Group, request.Filters.Groups)
},
// Drivers
func(v *api.Volume) bool {
return v.Spec.Driver != nil && filterContains(v.Spec.Driver.Name, request.Filters.Drivers)
},
)
return &api.ListVolumesResponse{
Volumes: volumes,
}, nil
}
func filterVolumes(candidates []*api.Volume, filters ...func(*api.Volume) bool) []*api.Volume {
result := []*api.Volume{}
for _, c := range candidates {
match := true
for _, f := range filters {
if !f(c) {
match = false
break
}
}
if match {
result = append(result, c)
}
}
return result
}
func (s *Server) GetVolume(ctx context.Context, request *api.GetVolumeRequest) (*api.GetVolumeResponse, error) {
var volume *api.Volume
s.store.View(func(tx store.ReadTx) {
volume = store.GetVolume(tx, request.VolumeID)
})
if volume == nil {
return nil, status.Errorf(codes.NotFound, "volume %v not found", request.VolumeID)
}
return &api.GetVolumeResponse{
Volume: volume,
}, nil
}
// RemoveVolume marks a Volume for removal. For a Volume to be removed, it must
// have Availability set to Drain. RemoveVolume does not immediately delete the
// volume, because some clean-up must occur before it can be removed. However,
// calling RemoveVolume is an irrevocable action, and once it occurs, the
// Volume can no longer be used in any way.
func (s *Server) RemoveVolume(ctx context.Context, request *api.RemoveVolumeRequest) (*api.RemoveVolumeResponse, error) {
err := s.store.Update(func(tx store.Tx) error {
volume := store.GetVolume(tx, request.VolumeID)
if volume == nil {
return status.Errorf(codes.NotFound, "volume %s not found", request.VolumeID)
}
// If this is a force delete, we force the delete. No survivors. This
// is a last resort to resolve otherwise intractable problems with
// volumes. Using this has the potential to break other things in the
// cluster, because testing every case where we force-remove a volume
// is difficult at best.
if request.Force {
return store.DeleteVolume(tx, request.VolumeID)
}
if len(volume.PublishStatus) != 0 {
return status.Error(codes.FailedPrecondition, "volume is still in use")
}
volume.PendingDelete = true
return store.UpdateVolume(tx, volume)
})
if err != nil {
return nil, err
}
return &api.RemoveVolumeResponse{}, nil
}

View file

@ -0,0 +1,138 @@
package csi
import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/docker/swarmkit/api"
)
// convert.go contains functions for converting swarm objects into CSI requests
// and back again.
// makeTopology converts a swarmkit topology into a CSI topology.
func makeTopologyRequirement(t *api.TopologyRequirement) *csi.TopologyRequirement {
if t == nil {
return nil
}
return &csi.TopologyRequirement{
Requisite: makeTopologies(t.Requisite),
Preferred: makeTopologies(t.Preferred),
}
}
// makeTopologies converts a slice of swarmkit topologies into a slice of CSI
// topologies.
func makeTopologies(ts []*api.Topology) []*csi.Topology {
if ts == nil {
return nil
}
csiTops := make([]*csi.Topology, len(ts))
for i, t := range ts {
csiTops[i] = makeTopology(t)
}
return csiTops
}
// makeTopology converts a swarmkit topology into a CSI topology. These types
// are essentially homologous, with the swarm type being copied verbatim from
// the CSI type (for build reasons).
func makeTopology(t *api.Topology) *csi.Topology {
if t == nil {
return nil
}
return &csi.Topology{
Segments: t.Segments,
}
}
func makeCapability(am *api.VolumeAccessMode) *csi.VolumeCapability {
var mode csi.VolumeCapability_AccessMode_Mode
switch am.Scope {
case api.VolumeScopeSingleNode:
switch am.Sharing {
case api.VolumeSharingNone, api.VolumeSharingOneWriter, api.VolumeSharingAll:
mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
case api.VolumeSharingReadOnly:
mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY
}
case api.VolumeScopeMultiNode:
switch am.Sharing {
case api.VolumeSharingReadOnly:
mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case api.VolumeSharingOneWriter:
mode = csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER
case api.VolumeSharingAll:
mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
}
}
capability := &csi.VolumeCapability{
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: mode,
},
}
if block := am.GetBlock(); block != nil {
capability.AccessType = &csi.VolumeCapability_Block{
// Block type is empty.
Block: &csi.VolumeCapability_BlockVolume{},
}
}
if mount := am.GetMount(); mount != nil {
capability.AccessType = &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{
FsType: mount.FsType,
MountFlags: mount.MountFlags,
},
}
}
return capability
}
// makeCapcityRange converts the swarmkit CapacityRange object to the
// equivalent CSI object
func makeCapacityRange(cr *api.CapacityRange) *csi.CapacityRange {
if cr == nil {
return nil
}
return &csi.CapacityRange{
RequiredBytes: cr.RequiredBytes,
LimitBytes: cr.LimitBytes,
}
}
// unmakeTopologies transforms a CSI-type topology into the equivalent swarm
// type. it is called "unmakeTopologies" because it performs the inverse of
// "makeTopologies".
func unmakeTopologies(topologies []*csi.Topology) []*api.Topology {
if topologies == nil {
return nil
}
swarmTopologies := make([]*api.Topology, len(topologies))
for i, t := range topologies {
swarmTopologies[i] = unmakeTopology(t)
}
return swarmTopologies
}
// unmakeTopology transforms a CSI-type topology into the equivalent swarm
// type.
func unmakeTopology(topology *csi.Topology) *api.Topology {
return &api.Topology{
Segments: topology.Segments,
}
}
// makeVolumeInfo converts a csi.Volume object into a swarmkit VolumeInfo
// object.
func makeVolumeInfo(csiVolume *csi.Volume) *api.VolumeInfo {
return &api.VolumeInfo{
CapacityBytes: csiVolume.CapacityBytes,
VolumeContext: csiVolume.VolumeContext,
VolumeID: csiVolume.VolumeId,
AccessibleTopology: unmakeTopologies(csiVolume.AccessibleTopology),
}
}

29
vendor/github.com/docker/swarmkit/manager/csi/doc.go generated vendored Normal file
View file

@ -0,0 +1,29 @@
package csi
// The `csi` package contains code for managing Swarmkit Cluster Volumes,
// which are powered by CSI drivers.
//
// This package stands separately from other manager components because of the
// unique nature of volumes. Volumes need to be allocated before they can be
// used, but the availability of a volume also imposes a scheduling constraint
// on the node. Further, the CSI lifecycle requires many different RPC calls at
// many points in the volume's life, which brings it out of the purview of any
// one component.
//
// In an ideal world, this package would live wholely within the allocator
// package, but the allocator is very fragile, and modifying it is more trouble
// than it's worth.
// Volume Lifecycle in Swarm
//
// Creation
//
// When a volume is created, the first thing the allocator does is contact the
// relevant CSI plugin in order to ensure that the volume is created, and to
// retrieve the associated volume ID. Volumes are always created when the
// swarmkit object is created, as opposed to being created when demanded by a
// Service.
//
// Assignment
//
// After a volume has been created, it may be used by one or more Tasks.

View file

@ -0,0 +1,481 @@
package csi
import (
"context"
"errors"
"fmt"
"sync"
"github.com/docker/go-events"
"github.com/sirupsen/logrus"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/manager/state/store"
"github.com/docker/swarmkit/volumequeue"
)
const (
// DockerCSIPluginCap is the capability name of the plugins we use with the
// PluginGetter to get only the plugins we need. The full name of the
// plugin interface is "docker.csicontroller/1.0". This gets only the CSI
// plugins with Controller capability.
DockerCSIPluginCap = "csicontroller"
)
type Manager struct {
store *store.MemoryStore
// provider is the SecretProvider which allows retrieving secrets. Used
// when creating new Plugin objects.
provider SecretProvider
// pg is the plugingetter, which allows us to access the Docker Engine's
// plugin store.
pg plugingetter.PluginGetter
// newPlugin is a function which returns an object implementing the Plugin
// interface. It allows us to swap out the implementation of plugins while
// unit-testing the Manager
newPlugin func(pc plugingetter.CompatPlugin, pa plugingetter.PluginAddr, provider SecretProvider) Plugin
// synchronization for starting and stopping the Manager
startOnce sync.Once
stopChan chan struct{}
stopOnce sync.Once
doneChan chan struct{}
plugins map[string]Plugin
pendingVolumes *volumequeue.VolumeQueue
}
func NewManager(s *store.MemoryStore, pg plugingetter.PluginGetter) *Manager {
return &Manager{
store: s,
stopChan: make(chan struct{}),
doneChan: make(chan struct{}),
newPlugin: NewPlugin,
pg: pg,
plugins: map[string]Plugin{},
provider: NewSecretProvider(s),
pendingVolumes: volumequeue.NewVolumeQueue(),
}
}
// Run runs the manager. The provided context is used as the parent for all RPC
// calls made to the CSI plugins. Canceling this context will cancel those RPC
// calls by the nature of contexts, but this is not the preferred way to stop
// the Manager. Instead, Stop should be called, which cause all RPC calls to be
// canceled anyway. The context is also used to get the logging context for the
// Manager.
func (vm *Manager) Run(ctx context.Context) {
vm.startOnce.Do(func() {
vm.run(ctx)
})
}
// run performs the actual meat of the run operation.
//
// the argument is called pctx because it's the parent context, from which we
// immediately resolve a new child context.
func (vm *Manager) run(pctx context.Context) {
defer close(vm.doneChan)
ctx, ctxCancel := context.WithCancel(
log.WithModule(pctx, "csi/manager"),
)
defer ctxCancel()
watch, cancel, err := store.ViewAndWatch(vm.store, func(tx store.ReadTx) error {
// TODO(dperny): change this from ViewAndWatch to one that's just
// Watch.
return nil
})
if err != nil {
log.G(ctx).WithError(err).Error("error in store view and watch")
return
}
defer cancel()
vm.init(ctx)
// run a goroutine which periodically processes incoming volumes. the
// handle function will trigger processing every time new events come in
// by writing to the channel
doneProc := make(chan struct{})
go func() {
for {
id, attempt := vm.pendingVolumes.Wait()
// this case occurs when the stop method has been called on
// pendingVolumes. stop is called on pendingVolumes when Stop is
// called on the CSI manager.
if id == "" && attempt == 0 {
break
}
// TODO(dperny): we can launch some number of workers and process
// more than one volume at a time, if desired.
vm.processVolume(ctx, id, attempt)
}
// closing doneProc signals that this routine has exited, and allows
// the main Run routine to exit.
close(doneProc)
}()
// defer read from doneProc. doneProc is closed in the goroutine above,
// and this defer will block until then. Because defers are executed as a
// stack, this in turn blocks the final defer (closing doneChan) from
// running. Ultimately, this prevents Stop from returning until the above
// goroutine is closed.
defer func() {
<-doneProc
}()
for {
select {
case ev := <-watch:
vm.handleEvent(ev)
case <-vm.stopChan:
vm.pendingVolumes.Stop()
return
}
}
}
// processVolumes encapuslates the logic for processing pending Volumes.
func (vm *Manager) processVolume(ctx context.Context, id string, attempt uint) {
// set up log fields for a derrived context to pass to handleVolume.
dctx := log.WithFields(ctx, logrus.Fields{
"volume.id": id,
"attempt": attempt,
})
err := vm.handleVolume(dctx, id)
// TODO(dperny): differentiate between retryable and non-retryable
// errors.
if err != nil {
log.G(dctx).WithError(err).Info("error handling volume")
vm.pendingVolumes.Enqueue(id, attempt+1)
}
}
// init does one-time setup work for the Manager, like creating all of
// the Plugins and initializing the local state of the component.
func (vm *Manager) init(ctx context.Context) {
var (
nodes []*api.Node
volumes []*api.Volume
)
vm.store.View(func(tx store.ReadTx) {
var err error
nodes, err = store.FindNodes(tx, store.All)
if err != nil {
// this should *never happen*. Find only returns errors if the find
// by is invalid.
log.G(ctx).WithError(err).Error("error finding nodes")
}
volumes, err = store.FindVolumes(tx, store.All)
if err != nil {
// likewise, should never happen.
log.G(ctx).WithError(err).Error("error finding volumes")
}
})
for _, node := range nodes {
vm.handleNode(node)
}
// on initialization, we enqueue all of the Volumes. The easiest way to
// know if a Volume needs some work performed is to just pass it through
// the VolumeManager. If it doesn't need any work, then we will quickly
// skip by it. Otherwise, the needed work will be performed.
for _, volume := range volumes {
vm.enqueueVolume(volume.ID)
}
}
func (vm *Manager) Stop() {
vm.stopOnce.Do(func() {
close(vm.stopChan)
})
<-vm.doneChan
}
func (vm *Manager) handleEvent(ev events.Event) {
switch e := ev.(type) {
case api.EventCreateVolume:
vm.enqueueVolume(e.Volume.ID)
case api.EventUpdateVolume:
vm.enqueueVolume(e.Volume.ID)
case api.EventCreateNode:
vm.handleNode(e.Node)
case api.EventUpdateNode:
// for updates, we're only adding the node to every plugin. if the node
// no longer reports CSIInfo for a specific plugin, we will just leave
// the stale data in the plugin. this should not have any adverse
// effect, because the memory impact is small, and this operation
// should not be frequent. this may change as the code for volumes
// becomes more polished.
vm.handleNode(e.Node)
case api.EventDeleteNode:
vm.handleNodeRemove(e.Node.ID)
}
}
func (vm *Manager) createVolume(ctx context.Context, v *api.Volume) error {
l := log.G(ctx).WithField("volume.id", v.ID).WithField("driver", v.Spec.Driver.Name)
l.Info("creating volume")
p, err := vm.getPlugin(v.Spec.Driver.Name)
if err != nil {
l.Errorf("volume creation failed: %s", err.Error())
return err
}
info, err := p.CreateVolume(ctx, v)
if err != nil {
l.WithError(err).Error("volume create failed")
return err
}
err = vm.store.Update(func(tx store.Tx) error {
v2 := store.GetVolume(tx, v.ID)
// the volume should never be missing. I don't know of even any race
// condition that could result in this behavior. nevertheless, it's
// better to do this than to segfault.
if v2 == nil {
return nil
}
v2.VolumeInfo = info
return store.UpdateVolume(tx, v2)
})
if err != nil {
l.WithError(err).Error("committing created volume to store failed")
}
return err
}
// enqueueVolume enqueues a new volume event, placing the Volume ID into
// pendingVolumes to be processed. Because enqueueVolume is only called in
// response to a new Volume update event, not for a retry, the retry number is
// always reset to 0.
func (vm *Manager) enqueueVolume(id string) {
vm.pendingVolumes.Enqueue(id, 0)
}
// handleVolume processes a Volume. It determines if any relevant update has
// occurred, and does the required work to handle that update if so.
//
// returns an error if handling the volume failed and needs to be retried.
//
// even if an error is returned, the store may still be updated.
func (vm *Manager) handleVolume(ctx context.Context, id string) error {
var volume *api.Volume
vm.store.View(func(tx store.ReadTx) {
volume = store.GetVolume(tx, id)
})
if volume == nil {
// if the volume no longer exists, there is nothing to do, nothing to
// retry, and no relevant error.
return nil
}
if volume.VolumeInfo == nil {
return vm.createVolume(ctx, volume)
}
if volume.PendingDelete {
return vm.deleteVolume(ctx, volume)
}
updated := false
// TODO(dperny): it's just pointers, but copying the entire PublishStatus
// on each update might be intensive.
// we take a copy of the PublishStatus slice, because if we succeed in an
// unpublish operation, we will delete that status from PublishStatus.
statuses := make([]*api.VolumePublishStatus, len(volume.PublishStatus))
copy(statuses, volume.PublishStatus)
// failedPublishOrUnpublish is a slice of nodes where publish or unpublish
// operations failed. Publishing or unpublishing a volume can succeed or
// fail in part. If any failures occur, we will add the node ID of the
// publish operation that failed to this slice. Then, at the end of this
// function, after we update the store, if there are any failed operations,
// we will still return an error.
failedPublishOrUnpublish := []string{}
// adjustIndex is the number of entries deleted from volume.PublishStatus.
// when we're deleting entries from volume.PublishStatus, the index of the
// entry in statuses will no longer match the index of the same entry in
// volume.PublishStatus. we subtract adjustIndex from i to get the index
// where the entry is found after taking into account the deleted entries.
adjustIndex := 0
for i, status := range statuses {
switch status.State {
case api.VolumePublishStatus_PENDING_PUBLISH:
plug, err := vm.getPlugin(volume.Spec.Driver.Name)
if err != nil {
status.Message = fmt.Sprintf("error publishing volume: %v", err)
failedPublishOrUnpublish = append(failedPublishOrUnpublish, status.NodeID)
} else {
publishContext, err := plug.PublishVolume(ctx, volume, status.NodeID)
if err == nil {
status.State = api.VolumePublishStatus_PUBLISHED
status.PublishContext = publishContext
status.Message = ""
} else {
status.Message = fmt.Sprintf("error publishing volume: %v", err)
failedPublishOrUnpublish = append(failedPublishOrUnpublish, status.NodeID)
}
}
updated = true
case api.VolumePublishStatus_PENDING_UNPUBLISH:
plug, err := vm.getPlugin(volume.Spec.Driver.Name)
if err != nil {
status.Message = fmt.Sprintf("error unpublishing volume: %v", err)
failedPublishOrUnpublish = append(failedPublishOrUnpublish, status.NodeID)
} else {
err := plug.UnpublishVolume(ctx, volume, status.NodeID)
if err == nil {
// if there is no error with unpublishing, then we delete the
// status from the statuses slice.
j := i - adjustIndex
volume.PublishStatus = append(volume.PublishStatus[:j], volume.PublishStatus[j+1:]...)
adjustIndex++
} else {
status.Message = fmt.Sprintf("error unpublishing volume: %v", err)
failedPublishOrUnpublish = append(failedPublishOrUnpublish, status.NodeID)
}
}
updated = true
}
}
if updated {
if err := vm.store.Update(func(tx store.Tx) error {
// the publish status is now authoritative. read-update-write the
// volume object.
v := store.GetVolume(tx, volume.ID)
if v == nil {
// volume should never be deleted with pending publishes. if
// this does occur somehow, then we will just ignore it, rather
// than crashing.
return nil
}
v.PublishStatus = volume.PublishStatus
return store.UpdateVolume(tx, v)
}); err != nil {
return err
}
}
if len(failedPublishOrUnpublish) > 0 {
return fmt.Errorf("error publishing or unpublishing to some nodes: %v", failedPublishOrUnpublish)
}
return nil
}
// handleNode handles one node event
func (vm *Manager) handleNode(n *api.Node) {
if n.Description == nil {
return
}
// we just call AddNode on every update. Because it's just a map
// assignment, this is probably faster than checking if something changed.
for _, info := range n.Description.CSIInfo {
p, err := vm.getPlugin(info.PluginName)
if err != nil {
log.L.Warnf("error handling node: %v", err)
// TODO(dperny): log something
continue
}
p.AddNode(n.ID, info.NodeID)
}
}
// handleNodeRemove handles a node delete event
func (vm *Manager) handleNodeRemove(nodeID string) {
// we just call RemoveNode on every plugin, because it's probably quicker
// than checking if the node was using that plugin.
//
// we don't need to worry about lazy-loading here, because if don't have
// the plugin loaded, there's no need to call remove.
for _, plugin := range vm.plugins {
plugin.RemoveNode(nodeID)
}
}
func (vm *Manager) deleteVolume(ctx context.Context, v *api.Volume) error {
// TODO(dperny): handle missing plugin
plug, err := vm.getPlugin(v.Spec.Driver.Name)
if err != nil {
return err
}
err = plug.DeleteVolume(ctx, v)
if err != nil {
return err
}
// TODO(dperny): handle update error
return vm.store.Update(func(tx store.Tx) error {
return store.DeleteVolume(tx, v.ID)
})
}
// getPlugin returns the plugin with the given name.
//
// In a previous iteration of the architecture of this component, plugins were
// added to the manager through an update to the Cluster object, which
// triggered an event. In other words, they were eagerly loaded.
//
// When rearchitecting to use the plugingetter.PluginGetter interface, that
// eager loading is no longer practical, because the method for getting events
// about new plugins would be difficult to plumb this deep into swarm.
//
// Instead, we change from what was previously a bunch of raw map lookups to
// instead a method call which lazy-loads the plugins as needed. This is fine,
// because in the Plugin object itself, the network connection is made lazily
// as well.
//
// TODO(dperny): There is no way to unload a plugin. Unloading plugins will
// happen as part of a leadership change, but otherwise, on especially
// long-lived managers with especially high plugin churn, this is a memory
// leak. It's acceptable for now because we expect neither exceptionally long
// lived managers nor exceptionally high plugin churn.
func (vm *Manager) getPlugin(name string) (Plugin, error) {
// if the plugin already exists, we can just return it.
if p, ok := vm.plugins[name]; ok {
return p, nil
}
// otherwise, we need to load the plugin.
pc, err := vm.pg.Get(name, DockerCSIPluginCap, plugingetter.Lookup)
if err != nil {
return nil, err
}
if pc == nil {
return nil, errors.New("driver \"" + name + "\" not found")
}
pa, ok := pc.(plugingetter.PluginAddr)
if !ok {
return nil, errors.New("plugin for driver \"" + name + "\" does not implement PluginAddr")
}
p := vm.newPlugin(pc, pa, vm.provider)
vm.plugins[name] = p
return p, nil
}

334
vendor/github.com/docker/swarmkit/manager/csi/plugin.go generated vendored Normal file
View file

@ -0,0 +1,334 @@
package csi
import (
"context"
"errors"
"fmt"
"google.golang.org/grpc"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/swarmkit/api"
)
// Plugin is the interface for a CSI controller plugin.
//
// In this package, the word "plugin" is unfortunately overused. This
// particular "Plugin" is the interface used by volume Manager to interact with
// CSI controller plugins. It should not be confused with the "plugin" returned
// from the plugingetter interface, which is the interface that gives us the
// information we need to create this Plugin.
type Plugin interface {
CreateVolume(context.Context, *api.Volume) (*api.VolumeInfo, error)
DeleteVolume(context.Context, *api.Volume) error
PublishVolume(context.Context, *api.Volume, string) (map[string]string, error)
UnpublishVolume(context.Context, *api.Volume, string) error
AddNode(swarmID, csiID string)
RemoveNode(swarmID string)
}
// plugin represents an individual CSI controller plugin
type plugin struct {
// name is the name of the plugin, which is also the name used as the
// Driver.Name field
name string
// socket is the unix socket to connect to this plugin at.
socket string
// provider is the SecretProvider, which allows retrieving secrets for CSI
// calls.
provider SecretProvider
// cc is the grpc client connection
// TODO(dperny): the client is never closed. it may be closed when it goes
// out of scope, but this should be verified.
cc *grpc.ClientConn
// idClient is the identity service client
idClient csi.IdentityClient
// controllerClient is the controller service client
controllerClient csi.ControllerClient
// controller indicates that the plugin has controller capabilities.
controller bool
// publisher indicates that the controller plugin has
// PUBLISH_UNPUBLISH_VOLUME capability.
publisher bool
// swarmToCSI maps a swarm node ID to the corresponding CSI node ID
swarmToCSI map[string]string
// csiToSwarm maps a CSI node ID back to the swarm node ID.
csiToSwarm map[string]string
}
// NewPlugin creates a new Plugin object.
//
// NewPlugin takes both the CompatPlugin and the PluginAddr. These should be
// the same object. By taking both parts here, we can push off the work of
// assuring that the given plugin implements the PluginAddr interface without
// having to typecast in this constructor.
func NewPlugin(pc plugingetter.CompatPlugin, pa plugingetter.PluginAddr, provider SecretProvider) Plugin {
return &plugin{
name: pc.Name(),
// TODO(dperny): verify that we do not need to include the Network()
// portion of the Addr.
socket: fmt.Sprintf("%s://%s", pa.Addr().Network(), pa.Addr().String()),
provider: provider,
swarmToCSI: map[string]string{},
csiToSwarm: map[string]string{},
}
}
// connect is a private method that initializes a gRPC ClientConn and creates
// the IdentityClient and ControllerClient.
func (p *plugin) connect(ctx context.Context) error {
cc, err := grpc.DialContext(ctx, p.socket, grpc.WithInsecure())
if err != nil {
return err
}
p.cc = cc
// first, probe the plugin, to ensure that it exists and is ready to go
idc := csi.NewIdentityClient(cc)
p.idClient = idc
// controllerClient may not do anything if the plugin does not support
// the controller service, but it should not be an error to create it now
// anyway
p.controllerClient = csi.NewControllerClient(cc)
return p.init(ctx)
}
// init checks uses the identity service to check the properties of the plugin,
// most importantly, its capabilities.
func (p *plugin) init(ctx context.Context) error {
probe, err := p.idClient.Probe(ctx, &csi.ProbeRequest{})
if err != nil {
return err
}
if probe.Ready != nil && !probe.Ready.Value {
return errors.New("plugin not ready")
}
resp, err := p.idClient.GetPluginCapabilities(ctx, &csi.GetPluginCapabilitiesRequest{})
if err != nil {
return err
}
if resp == nil {
return nil
}
for _, c := range resp.Capabilities {
if sc := c.GetService(); sc != nil {
switch sc.Type {
case csi.PluginCapability_Service_CONTROLLER_SERVICE:
p.controller = true
}
}
}
if p.controller {
cCapResp, err := p.controllerClient.ControllerGetCapabilities(
ctx, &csi.ControllerGetCapabilitiesRequest{},
)
if err != nil {
return err
}
for _, c := range cCapResp.Capabilities {
rpc := c.GetRpc()
if rpc.Type == csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME {
p.publisher = true
}
}
}
return nil
}
// CreateVolume wraps and abstracts the CSI CreateVolume logic and returns
// the volume info, or an error.
func (p *plugin) CreateVolume(ctx context.Context, v *api.Volume) (*api.VolumeInfo, error) {
c, err := p.Client(ctx)
if err != nil {
return nil, err
}
if !p.controller {
// TODO(dperny): come up with a scheme to handle headless plugins
// TODO(dperny): handle plugins without create volume capabilities
return &api.VolumeInfo{VolumeID: v.Spec.Annotations.Name}, nil
}
createVolumeRequest := p.makeCreateVolume(v)
resp, err := c.CreateVolume(ctx, createVolumeRequest)
if err != nil {
return nil, err
}
return makeVolumeInfo(resp.Volume), nil
}
func (p *plugin) DeleteVolume(ctx context.Context, v *api.Volume) error {
if v.VolumeInfo == nil {
return errors.New("VolumeInfo must not be nil")
}
// we won't use a fancy createDeleteVolumeRequest method because the
// request is simple enough to not bother with it
secrets := p.makeSecrets(v)
req := &csi.DeleteVolumeRequest{
VolumeId: v.VolumeInfo.VolumeID,
Secrets: secrets,
}
c, err := p.Client(ctx)
if err != nil {
return err
}
// response from RPC intentionally left blank
_, err = c.DeleteVolume(ctx, req)
return err
}
// PublishVolume calls ControllerPublishVolume to publish the given Volume to
// the Node with the given swarmkit ID. It returns a map, which is the
// PublishContext for this Volume on this Node.
func (p *plugin) PublishVolume(ctx context.Context, v *api.Volume, nodeID string) (map[string]string, error) {
if !p.publisher {
return nil, nil
}
req := p.makeControllerPublishVolumeRequest(v, nodeID)
c, err := p.Client(ctx)
if err != nil {
return nil, err
}
resp, err := c.ControllerPublishVolume(ctx, req)
if err != nil {
return nil, err
}
return resp.PublishContext, nil
}
// UnpublishVolume calls ControllerUnpublishVolume to unpublish the given
// Volume from the Node with the given swarmkit ID. It returns an error if the
// unpublish does not succeed
func (p *plugin) UnpublishVolume(ctx context.Context, v *api.Volume, nodeID string) error {
if !p.publisher {
return nil
}
req := p.makeControllerUnpublishVolumeRequest(v, nodeID)
c, err := p.Client(ctx)
if err != nil {
return err
}
// response of the RPC intentionally left blank
_, err = c.ControllerUnpublishVolume(ctx, req)
return err
}
// AddNode adds a mapping for a node's swarm ID to the ID provided by this CSI
// plugin. This allows future calls to the plugin to be done entirely in terms
// of the swarm node ID.
//
// The CSI node ID is provided by the node as part of the NodeDescription.
func (p *plugin) AddNode(swarmID, csiID string) {
p.swarmToCSI[swarmID] = csiID
p.csiToSwarm[csiID] = swarmID
}
// RemoveNode removes a node from this plugin's node mappings.
func (p *plugin) RemoveNode(swarmID string) {
csiID := p.swarmToCSI[swarmID]
delete(p.swarmToCSI, swarmID)
delete(p.csiToSwarm, csiID)
}
// Client retrieves a csi.ControllerClient for this plugin
//
// If this is the first time client has been called and no client yet exists,
// it will initialize the gRPC connection to the remote plugin and create a new
// ControllerClient.
func (p *plugin) Client(ctx context.Context) (csi.ControllerClient, error) {
if p.controllerClient == nil {
if err := p.connect(ctx); err != nil {
return nil, err
}
}
return p.controllerClient, nil
}
// makeCreateVolume makes a csi.CreateVolumeRequest from the volume object and
// spec. it uses the Plugin's SecretProvider to retrieve relevant secrets.
func (p *plugin) makeCreateVolume(v *api.Volume) *csi.CreateVolumeRequest {
secrets := p.makeSecrets(v)
return &csi.CreateVolumeRequest{
Name: v.Spec.Annotations.Name,
Parameters: v.Spec.Driver.Options,
VolumeCapabilities: []*csi.VolumeCapability{
makeCapability(v.Spec.AccessMode),
},
Secrets: secrets,
AccessibilityRequirements: makeTopologyRequirement(v.Spec.AccessibilityRequirements),
CapacityRange: makeCapacityRange(v.Spec.CapacityRange),
}
}
// makeSecrets uses the plugin's SecretProvider to make the secrets map to pass
// to CSI RPCs.
func (p *plugin) makeSecrets(v *api.Volume) map[string]string {
secrets := map[string]string{}
for _, vs := range v.Spec.Secrets {
// a secret should never be nil, but check just to be sure
if vs != nil {
secret := p.provider.GetSecret(vs.Secret)
if secret != nil {
// TODO(dperny): return an error, but this should never happen,
// as secrets should be validated at volume creation time
secrets[vs.Key] = string(secret.Spec.Data)
}
}
}
return secrets
}
func (p *plugin) makeControllerPublishVolumeRequest(v *api.Volume, nodeID string) *csi.ControllerPublishVolumeRequest {
if v.VolumeInfo == nil {
return nil
}
secrets := p.makeSecrets(v)
capability := makeCapability(v.Spec.AccessMode)
capability.AccessType = &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
}
return &csi.ControllerPublishVolumeRequest{
VolumeId: v.VolumeInfo.VolumeID,
NodeId: p.swarmToCSI[nodeID],
Secrets: secrets,
VolumeCapability: capability,
VolumeContext: v.VolumeInfo.VolumeContext,
}
}
func (p *plugin) makeControllerUnpublishVolumeRequest(v *api.Volume, nodeID string) *csi.ControllerUnpublishVolumeRequest {
if v.VolumeInfo == nil {
return nil
}
secrets := p.makeSecrets(v)
return &csi.ControllerUnpublishVolumeRequest{
VolumeId: v.VolumeInfo.VolumeID,
NodeId: p.swarmToCSI[nodeID],
Secrets: secrets,
}
}

View file

@ -0,0 +1,34 @@
package csi
import (
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/manager/state/store"
)
// SecretProvider is an interface for retrieving secrets to use with CSI calls.
type SecretProvider interface {
// GetSecret returns the secret with the given ID, or nil if not found.
GetSecret(id string) *api.Secret
}
type secretProvider struct {
s *store.MemoryStore
}
func NewSecretProvider(s *store.MemoryStore) SecretProvider {
return &secretProvider{
s: s,
}
}
// GetSecret returns the secret with the given ID, or nil if not found.
//
// This method accesses the store, and so should not be called from inside
// another store transaction
func (p *secretProvider) GetSecret(id string) *api.Secret {
var secret *api.Secret
p.s.View(func(tx store.ReadTx) {
secret = store.GetSecret(tx, id)
})
return secret
}

View file

@ -18,18 +18,30 @@ type typeAndID struct {
}
type assignmentSet struct {
dp *drivers.DriverProvider
tasksMap map[string]*api.Task
nodeID string
dp *drivers.DriverProvider
tasksMap map[string]*api.Task
// volumesMap keeps track of the VolumePublishStatus of the given volumes.
// this tells us both which volumes are assigned to the node, and what the
// last known VolumePublishStatus was, so we can understand if we need to
// send an update.
volumesMap map[string]*api.VolumePublishStatus
// tasksUsingDependency tracks both tasks and volumes using a given
// dependency. this works because the ID generated for swarm comes from a
// large enough space that it is reliably astronomically unlikely that IDs
// will ever collide.
tasksUsingDependency map[typeAndID]map[string]struct{}
changes map[typeAndID]*api.AssignmentChange
log *logrus.Entry
}
func newAssignmentSet(log *logrus.Entry, dp *drivers.DriverProvider) *assignmentSet {
func newAssignmentSet(nodeID string, log *logrus.Entry, dp *drivers.DriverProvider) *assignmentSet {
return &assignmentSet{
nodeID: nodeID,
dp: dp,
changes: make(map[typeAndID]*api.AssignmentChange),
tasksMap: make(map[string]*api.Task),
volumesMap: make(map[string]*api.VolumePublishStatus),
tasksUsingDependency: make(map[typeAndID]map[string]struct{}),
log: log,
}
@ -48,15 +60,17 @@ func assignSecret(a *assignmentSet, readTx store.ReadTx, mapKey typeAndID, t *ap
}).Debug("failed to fetch secret")
return
}
// If the secret should not be reused for other tasks, give it a unique ID for the task to allow different values for different tasks.
// If the secret should not be reused for other tasks, give it a unique ID
// for the task to allow different values for different tasks.
if doNotReuse {
// Give the secret a new ID and mark it as internal
originalSecretID := secret.ID
taskSpecificID := identity.CombineTwoIDs(originalSecretID, t.ID)
secret.ID = taskSpecificID
secret.Internal = true
// Create a new mapKey with the new ID and insert it into the dependencies map for the task.
// This will make the changes map contain an entry with the new ID rather than the original one.
// Create a new mapKey with the new ID and insert it into the
// dependencies map for the task. This will make the changes map
// contain an entry with the new ID rather than the original one.
mapKey = typeAndID{objType: mapKey.objType, id: secret.ID}
a.tasksUsingDependency[mapKey] = make(map[string]struct{})
a.tasksUsingDependency[mapKey][t.ID] = struct{}{}
@ -92,8 +106,12 @@ func assignConfig(a *assignmentSet, readTx store.ReadTx, mapKey typeAndID) {
}
func (a *assignmentSet) addTaskDependencies(readTx store.ReadTx, t *api.Task) {
// first, we go through all ResourceReferences, which give us the necessary
// information about which secrets and configs are in use.
for _, resourceRef := range t.Spec.ResourceReferences {
mapKey := typeAndID{objType: resourceRef.ResourceType, id: resourceRef.ResourceID}
// if there are no tasks using this dependency yet, then we can assign
// it.
if len(a.tasksUsingDependency[mapKey]) == 0 {
switch resourceRef.ResourceType {
case api.ResourceType_SECRET:
@ -107,6 +125,8 @@ func (a *assignmentSet) addTaskDependencies(readTx store.ReadTx, t *api.Task) {
continue
}
}
// otherwise, we don't need to add a new assignment. we just need to
// track the fact that another task is now using this dependency.
a.tasksUsingDependency[mapKey][t.ID] = struct{}{}
}
@ -160,7 +180,9 @@ func (a *assignmentSet) releaseDependency(mapKey typeAndID, assignment *api.Assi
return true
}
func (a *assignmentSet) releaseTaskDependencies(t *api.Task) bool {
// releaseTaskDependencies needs a store transaction because volumes have
// associated Secrets which need to be released.
func (a *assignmentSet) releaseTaskDependencies(readTx store.ReadTx, t *api.Task) bool {
var modified bool
for _, resourceRef := range t.Spec.ResourceReferences {
@ -251,7 +273,7 @@ func (a *assignmentSet) addOrUpdateTask(readTx store.ReadTx, t *api.Task) bool {
// If releasing the dependencies caused us to
// remove something from the assignment set,
// mark one modification.
return a.releaseTaskDependencies(t)
return a.releaseTaskDependencies(readTx, t)
}
return false
}
@ -274,7 +296,113 @@ func (a *assignmentSet) addOrUpdateTask(readTx store.ReadTx, t *api.Task) bool {
return true
}
func (a *assignmentSet) removeTask(t *api.Task) bool {
// addOrUpdateVolume tracks a Volume assigned to a node.
func (a *assignmentSet) addOrUpdateVolume(readTx store.ReadTx, v *api.Volume) bool {
var publishStatus *api.VolumePublishStatus
for _, status := range v.PublishStatus {
if status.NodeID == a.nodeID {
publishStatus = status
break
}
}
// if there is no publishStatus for this Volume on this Node, or if the
// Volume has not yet been published to this node, then we do not need to
// track this assignment.
if publishStatus == nil || publishStatus.State < api.VolumePublishStatus_PUBLISHED {
return false
}
// check if we are already tracking this volume, and what its old status
// is. if the states are identical, then we don't have any update to make.
if oldStatus, ok := a.volumesMap[v.ID]; ok && oldStatus.State == publishStatus.State {
return false
}
// if the volume has already been confirmed as unpublished, we can stop
// tracking it and remove its dependencies.
if publishStatus.State > api.VolumePublishStatus_PENDING_NODE_UNPUBLISH {
return a.removeVolume(readTx, v)
}
for _, secret := range v.Spec.Secrets {
mapKey := typeAndID{objType: api.ResourceType_SECRET, id: secret.Secret}
if len(a.tasksUsingDependency[mapKey]) == 0 {
// we can call assignSecret with task being nil, but it does mean
// that any secret that uses a driver will not work. we'll call
// that a limitation of volumes for now.
assignSecret(a, readTx, mapKey, nil)
}
a.tasksUsingDependency[mapKey][v.ID] = struct{}{}
}
// volumes are sent to nodes as VolumeAssignments. This is because a node
// needs node-specific information (the PublishContext from
// ControllerPublishVolume).
assignment := &api.VolumeAssignment{
ID: v.ID,
VolumeID: v.VolumeInfo.VolumeID,
Driver: v.Spec.Driver,
VolumeContext: v.VolumeInfo.VolumeContext,
PublishContext: publishStatus.PublishContext,
AccessMode: v.Spec.AccessMode,
Secrets: v.Spec.Secrets,
}
volumeKey := typeAndID{objType: api.ResourceType_VOLUME, id: v.ID}
// assignmentChange is the whole assignment without the action, which we
// will set next
assignmentChange := &api.AssignmentChange{
Assignment: &api.Assignment{
Item: &api.Assignment_Volume{
Volume: assignment,
},
},
}
// if we're in state PENDING_NODE_UNPUBLISH, we actually need to send a
// remove message. we do this every time, even if the node never got the
// first add assignment. This is because the node might not know that it
// has a volume published; for example, the node may be restarting, and
// the in-memory store does not have knowledge of the volume.
if publishStatus.State == api.VolumePublishStatus_PENDING_NODE_UNPUBLISH {
assignmentChange.Action = api.AssignmentChange_AssignmentActionRemove
} else {
assignmentChange.Action = api.AssignmentChange_AssignmentActionUpdate
}
a.changes[volumeKey] = assignmentChange
a.volumesMap[v.ID] = publishStatus
return true
}
func (a *assignmentSet) removeVolume(readTx store.ReadTx, v *api.Volume) bool {
if _, exists := a.volumesMap[v.ID]; !exists {
return false
}
modified := false
// if the volume does exists, we can release its secrets
for _, secret := range v.Spec.Secrets {
mapKey := typeAndID{objType: api.ResourceType_SECRET, id: secret.Secret}
assignment := &api.Assignment{
Item: &api.Assignment_Secret{
Secret: &api.Secret{ID: secret.Secret},
},
}
if a.releaseDependency(mapKey, assignment, v.ID) {
modified = true
}
}
// we don't need to add a removal message. the removal of the
// VolumeAssignment will have already happened.
delete(a.volumesMap, v.ID)
return modified
}
func (a *assignmentSet) removeTask(readTx store.ReadTx, t *api.Task) bool {
if _, exists := a.tasksMap[t.ID]; !exists {
return false
}
@ -293,7 +421,7 @@ func (a *assignmentSet) removeTask(t *api.Task) bool {
// Release the dependencies being used by this task.
// Ignoring the return here. We will always mark this as a
// modification, since a task is being removed.
a.releaseTaskDependencies(t)
a.releaseTaskDependencies(readTx, t)
return true
}

View file

@ -154,6 +154,12 @@ type Dispatcher struct {
nodeUpdates map[string]nodeUpdate // indexed by node ID
nodeUpdatesLock sync.Mutex
// unpublishedVolumes keeps track of Volumes that Nodes have reported as
// unpublished. it maps the volume ID to a list of nodes it has been
// unpublished on.
unpublishedVolumes map[string][]string
unpublishedVolumesLock sync.Mutex
downNodes *nodeStore
processUpdatesTrigger chan struct{}
@ -223,6 +229,10 @@ func (d *Dispatcher) Run(ctx context.Context) error {
d.nodeUpdates = make(map[string]nodeUpdate)
d.nodeUpdatesLock.Unlock()
d.unpublishedVolumesLock.Lock()
d.unpublishedVolumes = make(map[string][]string)
d.unpublishedVolumesLock.Unlock()
d.mu.Lock()
if d.isRunning() {
d.mu.Unlock()
@ -305,6 +315,8 @@ func (d *Dispatcher) Run(ctx context.Context) error {
// batch timer has already expired, so no need to drain
batchTimer.Reset(maxBatchInterval)
case v := <-configWatcher:
// TODO(dperny): remove extraneous log message
log.G(ctx).Info("cluster update event")
cluster := v.(api.EventUpdateCluster)
d.mu.Lock()
if cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod != nil {
@ -664,14 +676,61 @@ func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStat
case <-dctx.Done():
}
}
return nil, nil
return &api.UpdateTaskStatusResponse{}, nil
}
func (d *Dispatcher) UpdateVolumeStatus(ctx context.Context, r *api.UpdateVolumeStatusRequest) (*api.UpdateVolumeStatusResponse, error) {
d.rpcRW.RLock()
defer d.rpcRW.RUnlock()
_, err := d.isRunningLocked()
if err != nil {
return nil, err
}
nodeInfo, err := ca.RemoteNode(ctx)
if err != nil {
return nil, err
}
nodeID := nodeInfo.NodeID
fields := logrus.Fields{
"node.id": nodeID,
"node.session": r.SessionID,
"method": "(*Dispatcher).UpdateVolumeStatus",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log := log.G(ctx).WithFields(fields)
if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return nil, err
}
d.unpublishedVolumesLock.Lock()
for _, status := range r.Updates {
if status.Unpublished {
// it's ok if nodes is nil, because append works on a nil slice.
nodes := append(d.unpublishedVolumes[status.ID], nodeID)
d.unpublishedVolumes[status.ID] = nodes
log.Debugf("volume %s unpublished on node %s", status.ID, nodeID)
}
}
d.unpublishedVolumesLock.Unlock()
// we won't kick off a batch here, we'll just wait for the timer.
return &api.UpdateVolumeStatusResponse{}, nil
}
func (d *Dispatcher) processUpdates(ctx context.Context) {
var (
taskUpdates map[string]*api.TaskStatus
nodeUpdates map[string]nodeUpdate
taskUpdates map[string]*api.TaskStatus
nodeUpdates map[string]nodeUpdate
unpublishedVolumes map[string][]string
)
d.taskUpdatesLock.Lock()
if len(d.taskUpdates) != 0 {
taskUpdates = d.taskUpdates
@ -686,7 +745,14 @@ func (d *Dispatcher) processUpdates(ctx context.Context) {
}
d.nodeUpdatesLock.Unlock()
if len(taskUpdates) == 0 && len(nodeUpdates) == 0 {
d.unpublishedVolumesLock.Lock()
if len(d.unpublishedVolumes) != 0 {
unpublishedVolumes = d.unpublishedVolumes
d.unpublishedVolumes = make(map[string][]string)
}
d.unpublishedVolumesLock.Unlock()
if len(taskUpdates) == 0 && len(nodeUpdates) == 0 && len(unpublishedVolumes) == 0 {
return
}
@ -749,7 +815,7 @@ func (d *Dispatcher) processUpdates(ctx context.Context) {
logger := log.WithField("node.id", nodeID)
node := store.GetNode(tx, nodeID)
if node == nil {
logger.Errorf("node unavailable")
logger.Error("node unavailable")
return nil
}
@ -776,6 +842,37 @@ func (d *Dispatcher) processUpdates(ctx context.Context) {
}
}
for volumeID, nodes := range unpublishedVolumes {
err := batch.Update(func(tx store.Tx) error {
logger := log.WithField("volume.id", volumeID)
volume := store.GetVolume(tx, volumeID)
if volume == nil {
logger.Error("volume unavailable")
}
// buckle your seatbelts, we're going quadratic.
nodesLoop:
for _, nodeID := range nodes {
for _, status := range volume.PublishStatus {
if status.NodeID == nodeID {
status.State = api.VolumePublishStatus_PENDING_UNPUBLISH
continue nodesLoop
}
}
}
if err := store.UpdateVolume(tx, volume); err != nil {
logger.WithError(err).Error("failed to update volume")
return nil
}
return nil
})
if err != nil {
log.WithError(err).Error("dispatcher volume update transaction failed")
}
}
return nil
})
if err != nil {
@ -947,7 +1044,7 @@ func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatche
var (
sequence int64
appliesTo string
assignments = newAssignmentSet(log, d.dp)
assignments = newAssignmentSet(nodeID, log, d.dp)
)
sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
@ -974,12 +1071,45 @@ func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatche
assignments.addOrUpdateTask(readTx, t)
}
// there is no quick index for which nodes are using a volume, but
// there should not be thousands of volumes in a typical
// deployment, so this should be ok
volumes, err := store.FindVolumes(readTx, store.All)
if err != nil {
return err
}
for _, v := range volumes {
for _, status := range v.PublishStatus {
if status.NodeID == nodeID {
assignments.addOrUpdateVolume(readTx, v)
}
}
}
return nil
},
api.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
api.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}},
api.EventUpdateVolume{
// typically, a check function takes an object from this
// prototypical event and compares it to the object from the
// incoming event. However, because this is a bespoke, in-line
// matcher, we can discard the first argument (the prototype) and
// instead pass the desired node ID in as part of a closure.
Checks: []api.VolumeCheckFunc{
func(v1, v2 *api.Volume) bool {
for _, status := range v2.PublishStatus {
if status.NodeID == nodeID {
return true
}
}
return false
},
},
},
)
if err != nil {
return err
@ -1035,11 +1165,26 @@ func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatche
}
})
case api.EventDeleteTask:
if assignments.removeTask(v.Task) {
oneModification()
}
d.store.View(func(readTx store.ReadTx) {
if assignments.removeTask(readTx, v.Task) {
oneModification()
}
})
// TODO(aaronl): For node secrets, we'll need to handle
// EventCreateSecret.
case api.EventUpdateVolume:
d.store.View(func(readTx store.ReadTx) {
vol := store.GetVolume(readTx, v.Volume.ID)
// check through the PublishStatus to see if there is
// one for this node.
for _, status := range vol.PublishStatus {
if status.NodeID == nodeID {
if assignments.addOrUpdateVolume(readTx, vol) {
oneModification()
}
}
}
})
}
case <-batchingTimeout:
break batchingLoop

View file

@ -25,6 +25,7 @@ import (
"github.com/docker/swarmkit/manager/allocator/cnmallocator"
"github.com/docker/swarmkit/manager/allocator/networkallocator"
"github.com/docker/swarmkit/manager/controlapi"
"github.com/docker/swarmkit/manager/csi"
"github.com/docker/swarmkit/manager/dispatcher"
"github.com/docker/swarmkit/manager/drivers"
"github.com/docker/swarmkit/manager/health"
@ -36,6 +37,7 @@ import (
"github.com/docker/swarmkit/manager/orchestrator/jobs"
"github.com/docker/swarmkit/manager/orchestrator/replicated"
"github.com/docker/swarmkit/manager/orchestrator/taskreaper"
"github.com/docker/swarmkit/manager/orchestrator/volumeenforcer"
"github.com/docker/swarmkit/manager/resourceapi"
"github.com/docker/swarmkit/manager/scheduler"
"github.com/docker/swarmkit/manager/state/raft"
@ -150,8 +152,10 @@ type Manager struct {
jobsOrchestrator *jobs.Orchestrator
taskReaper *taskreaper.TaskReaper
constraintEnforcer *constraintenforcer.ConstraintEnforcer
volumeEnforcer *volumeenforcer.VolumeEnforcer
scheduler *scheduler.Scheduler
allocator *allocator.Allocator
volumeManager *csi.Manager
keyManager *keymanager.KeyManager
server *grpc.Server
localserver *grpc.Server
@ -200,13 +204,13 @@ func (l *closeOnceListener) Close() error {
// New creates a Manager which has not started to accept requests yet.
func New(config *Config) (*Manager, error) {
err := os.MkdirAll(config.StateDir, 0700)
err := os.MkdirAll(config.StateDir, 0o700)
if err != nil {
return nil, errors.Wrap(err, "failed to create state directory")
}
raftStateDir := filepath.Join(config.StateDir, "raft")
err = os.MkdirAll(raftStateDir, 0700)
err = os.MkdirAll(raftStateDir, 0o700)
if err != nil {
return nil, errors.Wrap(err, "failed to create raft state directory")
}
@ -328,7 +332,7 @@ func (m *Manager) BindControl(addr string) error {
// don't create a socket directory if we're on windows. we used named pipe
if runtime.GOOS != "windows" {
err := os.MkdirAll(filepath.Dir(addr), 0700)
err := os.MkdirAll(filepath.Dir(addr), 0o700)
if err != nil {
return errors.Wrap(err, "failed to create socket directory")
}
@ -692,6 +696,9 @@ func (m *Manager) Stop(ctx context.Context, clearData bool) {
if m.constraintEnforcer != nil {
m.constraintEnforcer.Stop()
}
if m.volumeEnforcer != nil {
m.volumeEnforcer.Stop()
}
if m.scheduler != nil {
m.scheduler.Stop()
}
@ -998,12 +1005,14 @@ func (m *Manager) becomeLeader(ctx context.Context) {
m.replicatedOrchestrator = replicated.NewReplicatedOrchestrator(s)
m.constraintEnforcer = constraintenforcer.New(s)
m.volumeEnforcer = volumeenforcer.New(s)
m.globalOrchestrator = global.NewGlobalOrchestrator(s)
m.jobsOrchestrator = jobs.NewOrchestrator(s)
m.taskReaper = taskreaper.New(s)
m.scheduler = scheduler.New(s)
m.keyManager = keymanager.New(s, keymanager.DefaultConfig())
m.roleManager = newRoleManager(s, m.raftNode)
m.volumeManager = csi.NewManager(s, m.config.PluginGetter)
// TODO(stevvooe): Allocate a context that can be used to
// shutdown underlying manager processes when leadership isTestUpdaterRollback
@ -1095,6 +1104,10 @@ func (m *Manager) becomeLeader(ctx context.Context) {
constraintEnforcer.Run()
}(m.constraintEnforcer)
go func(volumeEnforcer *volumeenforcer.VolumeEnforcer) {
volumeEnforcer.Run()
}(m.volumeEnforcer)
go func(taskReaper *taskreaper.TaskReaper) {
taskReaper.Run(ctx)
}(m.taskReaper)
@ -1119,6 +1132,10 @@ func (m *Manager) becomeLeader(ctx context.Context) {
go func(roleManager *roleManager) {
roleManager.Run(ctx)
}(m.roleManager)
go func(volumeManager *csi.Manager) {
volumeManager.Run(ctx)
}(m.volumeManager)
}
// becomeFollower shuts down the subsystems that are only run by the leader.
@ -1139,6 +1156,9 @@ func (m *Manager) becomeFollower() {
m.constraintEnforcer.Stop()
m.constraintEnforcer = nil
m.volumeEnforcer.Stop()
m.volumeEnforcer = nil
m.replicatedOrchestrator.Stop()
m.replicatedOrchestrator = nil
@ -1158,6 +1178,9 @@ func (m *Manager) becomeFollower() {
m.keyManager.Stop()
m.keyManager = nil
}
m.volumeManager.Stop()
m.volumeManager = nil
}
// defaultClusterObject creates a default cluster.

View file

@ -0,0 +1,114 @@
package volumeenforcer
import (
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/manager/state/store"
)
// VolumeEnforcer is a component, styled off of the ConstraintEnforcer, that
// watches for updates to Volumes, and shuts down tasks if those Volumes are
// being drained.
type VolumeEnforcer struct {
store *store.MemoryStore
stopChan chan struct{}
doneChan chan struct{}
}
func New(s *store.MemoryStore) *VolumeEnforcer {
return &VolumeEnforcer{
store: s,
stopChan: make(chan struct{}),
doneChan: make(chan struct{}),
}
}
func (ve *VolumeEnforcer) Run() {
defer close(ve.doneChan)
var volumes []*api.Volume
watcher, cancelWatch, _ := store.ViewAndWatch(ve.store, func(tx store.ReadTx) error {
var err error
volumes, err = store.FindVolumes(tx, store.All)
return err
}, api.EventUpdateVolume{})
defer cancelWatch()
for _, volume := range volumes {
ve.rejectNoncompliantTasks(volume)
}
for {
select {
case event := <-watcher:
v := event.(api.EventUpdateVolume).Volume
ve.rejectNoncompliantTasks(v)
case <-ve.stopChan:
return
}
}
}
func (ve *VolumeEnforcer) Stop() {
close(ve.stopChan)
<-ve.doneChan
}
func (ve *VolumeEnforcer) rejectNoncompliantTasks(v *api.Volume) {
if v.Spec.Availability != api.VolumeAvailabilityDrain {
return
}
var volumeTasks []*api.Task
ve.store.View(func(tx store.ReadTx) {
// ignore the error, it only happens if you pass an invalid find by
volumeTasks, _ = store.FindTasks(tx, store.ByVolumeAttachment(v.ID))
})
if len(volumeTasks) != 0 {
err := ve.store.Batch(func(batch *store.Batch) error {
for _, t := range volumeTasks {
// skip any tasks we know are already shut down or shutting
// down. Do this before we open the transaction. This saves us
// copying volumeTasks while still avoiding unnecessary
// transactions. we will still need to check again once we
// start the transaction against the latest version of the
// task.
if t.DesiredState > api.TaskStateCompleted || t.Status.State >= api.TaskStateCompleted {
continue
}
err := batch.Update(func(tx store.Tx) error {
t = store.GetTask(tx, t.ID)
// another check for task liveness.
if t == nil || t.DesiredState > api.TaskStateCompleted || t.Status.State >= api.TaskStateCompleted {
return nil
}
// as documented in the ConstraintEnforcer:
//
// We set the observed state to
// REJECTED, rather than the desired
// state. Desired state is owned by the
// orchestrator, and setting it directly
// will bypass actions such as
// restarting the task on another node
// (if applicable).
t.Status.State = api.TaskStateRejected
t.Status.Message = "task rejected by volume enforcer"
t.Status.Err = "attached to volume which is being drained"
return store.UpdateTask(tx, t)
})
if err != nil {
log.L.WithField("module", "volumeenforcer").WithError(err).Errorf("failed to shut down task %s", t.ID)
}
}
return nil
})
if err != nil {
log.L.WithField("module", "volumeenforcer").WithError(err).Errorf("failed to shut down tasks for volume %s", v.ID)
}
}
}

View file

@ -384,3 +384,64 @@ func (f *MaxReplicasFilter) Check(n *NodeInfo) bool {
func (f *MaxReplicasFilter) Explain(nodes int) string {
return "max replicas per node limit exceed"
}
type VolumesFilter struct {
vs *volumeSet
t *api.Task
// requestedVolumes is a set of volumes requested by the task. This can
// include either volume names or volume groups. Volume groups, as in the
// Mount.Source field, are prefixed with "group:"
requestedVolumes []*api.Mount
}
func (f *VolumesFilter) SetTask(t *api.Task) bool {
// if there is no volume Manager, skip this filter always
if f.vs == nil {
return false
}
f.t = t
// reset requestedVolumes every time we set a task, so we don't
// accidentally append to the last task's set of requested volumes.
f.requestedVolumes = []*api.Mount{}
// t should never be nil, but we should ensure that it is not just in case
// we make mistakes in the future.
if t == nil {
return false
}
c := t.Spec.GetContainer()
if c == nil {
return false
}
// hasCSI will be set true if one of the mounts is a CSI-type mount.
hasCSI := false
for _, mount := range c.Mounts {
if mount.Type == api.MountTypeCSI {
hasCSI = true
f.requestedVolumes = append(f.requestedVolumes, &mount)
}
}
return hasCSI
}
func (f *VolumesFilter) Check(nodeInfo *NodeInfo) bool {
for _, mount := range f.requestedVolumes {
if f.vs.isVolumeAvailableOnNode(mount, nodeInfo) != "" {
return true
}
}
return false
}
func (f *VolumesFilter) Explain(nodes int) string {
if nodes == 1 {
return "cannot fulfill requested CSI volume mounts on 1 node"
}
return fmt.Sprintf(
"cannot fulfill requested CSI volume mounts on %d nodes", nodes,
)
}

View file

@ -67,6 +67,10 @@ func (p *Pipeline) Process(n *NodeInfo) bool {
return true
}
func (p *Pipeline) AddFilter(f Filter) {
p.checklist = append(p.checklist, checklistEntry{f: f})
}
// SetTask sets up the filters to process a new task. Once this is called,
// Process can be called repeatedly to try to assign the task various nodes.
func (p *Pipeline) SetTask(t *api.Task) {

View file

@ -2,6 +2,7 @@ package scheduler
import (
"context"
"sync"
"time"
"github.com/docker/swarmkit/api"
@ -39,7 +40,10 @@ type Scheduler struct {
nodeSet nodeSet
allTasks map[string]*api.Task
pipeline *Pipeline
volumes *volumeSet
// stopOnce is a sync.Once used to ensure that Stop is idempotent
stopOnce sync.Once
// stopChan signals to the state machine to stop running
stopChan chan struct{}
// doneChan is closed when the state machine terminates
@ -57,10 +61,25 @@ func New(store *store.MemoryStore) *Scheduler {
stopChan: make(chan struct{}),
doneChan: make(chan struct{}),
pipeline: NewPipeline(),
volumes: newVolumeSet(),
}
}
func (s *Scheduler) setupTasksList(tx store.ReadTx) error {
// add all volumes that are ready to the volumeSet
volumes, err := store.FindVolumes(tx, store.All)
if err != nil {
return err
}
for _, volume := range volumes {
// only add volumes that have been created, meaning they have a
// VolumeID.
if volume.VolumeInfo != nil && volume.VolumeInfo.VolumeID != "" {
s.volumes.addOrUpdateVolume(volume)
}
}
tasks, err := store.FindTasks(tx, store.All)
if err != nil {
return err
@ -93,6 +112,9 @@ func (s *Scheduler) setupTasksList(tx store.ReadTx) error {
continue
}
// track the volumes in use by the task
s.volumes.reserveTaskVolumes(t)
if tasksByNode[t.NodeID] == nil {
tasksByNode[t.NodeID] = make(map[string]*api.Task)
}
@ -103,9 +125,12 @@ func (s *Scheduler) setupTasksList(tx store.ReadTx) error {
}
// Run is the scheduler event loop.
func (s *Scheduler) Run(ctx context.Context) error {
func (s *Scheduler) Run(pctx context.Context) error {
ctx := log.WithModule(pctx, "scheduler")
defer close(s.doneChan)
s.pipeline.AddFilter(&VolumesFilter{vs: s.volumes})
updates, cancel, err := store.ViewAndWatch(s.store, s.setupTasksList)
if err != nil {
log.G(ctx).WithError(err).Errorf("snapshot store update failed")
@ -172,6 +197,20 @@ func (s *Scheduler) Run(ctx context.Context) error {
tickRequired = true
case api.EventDeleteNode:
s.nodeSet.remove(v.Node.ID)
case api.EventUpdateVolume:
// there is no need for a EventCreateVolume case, because
// volumes are not ready to use until they've passed through
// the volume manager and been created with the plugin
//
// as such, only addOrUpdateVolume if the VolumeInfo exists and
// has a nonempty VolumeID
if v.Volume.VolumeInfo != nil && v.Volume.VolumeInfo.VolumeID != "" {
// TODO(dperny): verify that updating volumes doesn't break
// scheduling
log.G(ctx).WithField("volume.id", v.Volume.ID).Debug("updated volume")
s.volumes.addOrUpdateVolume(v.Volume)
tickRequired = true
}
case state.EventCommit:
if commitDebounceTimer != nil {
if time.Since(debouncingStarted) > maxLatency {
@ -200,7 +239,10 @@ func (s *Scheduler) Run(ctx context.Context) error {
// Stop causes the scheduler event loop to stop running.
func (s *Scheduler) Stop() {
close(s.stopChan)
// ensure stop is called only once. this helps in some test cases.
s.stopOnce.Do(func() {
close(s.stopChan)
})
<-s.doneChan
}
@ -309,6 +351,12 @@ func (s *Scheduler) deleteTask(t *api.Task) bool {
delete(s.allTasks, t.ID)
delete(s.preassignedTasks, t.ID)
delete(s.pendingPreassignedTasks, t.ID)
// remove the task volume reservations as well, if any
for _, attachment := range t.Volumes {
s.volumes.releaseVolume(attachment.ID, t.ID)
}
nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID)
if err == nil && nodeInfo.removeTask(t) {
s.nodeSet.updateNode(nodeInfo)
@ -370,6 +418,10 @@ func (s *Scheduler) processPreassignedTasks(ctx context.Context) {
if err == nil && nodeInfo.removeTask(decision.new) {
s.nodeSet.updateNode(nodeInfo)
}
for _, va := range decision.new.Volumes {
s.volumes.releaseVolume(va.ID, decision.new.ID)
}
}
}
@ -425,6 +477,11 @@ func (s *Scheduler) tick(ctx context.Context) {
s.nodeSet.updateNode(nodeInfo)
}
// release the volumes we tried to use
for _, va := range decision.new.Volumes {
s.volumes.releaseVolume(va.ID, decision.new.ID)
}
// enqueue task for next scheduling attempt
s.enqueue(decision.old)
}
@ -443,6 +500,7 @@ func (s *Scheduler) applySchedulingDecisions(ctx context.Context, schedulingDeci
err := batch.Update(func(tx store.Tx) error {
// Update exactly one task inside this Update
// callback.
taskLoop:
for taskID, decision := range schedulingDecisions {
delete(schedulingDecisions, taskID)
@ -474,11 +532,82 @@ func (s *Scheduler) applySchedulingDecisions(ctx context.Context, schedulingDeci
}
}
volumes := []*api.Volume{}
for _, va := range decision.new.Volumes {
v := store.GetVolume(tx, va.ID)
if v == nil {
log.G(ctx).Debugf(
"scheduler failed to update task %s because volume %s could not be found",
taskID,
va.ID,
)
failed = append(failed, decision)
continue taskLoop
}
// it's ok if the copy of the Volume we scheduled off
// of is out of date, because the Scheduler is the only
// component which add new uses of a particular Volume,
// which means that in most cases, no update to the
// volume could conflict with the copy the Scheduler
// used to make decisions.
//
// the exception is that the VolumeAvailability could
// have been changed. both Pause and Drain
// availabilities mean the Volume should not be
// scheduled, and so we call off our attempt to commit
// this scheduling decision. this is the only field we
// must check for conflicts.
//
// this is, additionally, the reason that a Volume must
// be set to Drain before it can be deleted. it stops
// us from having to worry about any other field when
// attempting to use the Volume.
if v.Spec.Availability != api.VolumeAvailabilityActive {
log.G(ctx).Debugf(
"scheduler failed to update task %s because volume %s has availability %s",
taskID, v.ID, v.Spec.Availability.String(),
)
failed = append(failed, decision)
continue taskLoop
}
alreadyPublished := false
for _, ps := range v.PublishStatus {
if ps.NodeID == decision.new.NodeID {
alreadyPublished = true
break
}
}
if !alreadyPublished {
v.PublishStatus = append(
v.PublishStatus,
&api.VolumePublishStatus{
NodeID: decision.new.NodeID,
State: api.VolumePublishStatus_PENDING_PUBLISH,
},
)
volumes = append(volumes, v)
}
}
if err := store.UpdateTask(tx, decision.new); err != nil {
log.G(ctx).Debugf("scheduler failed to update task %s; will retry", taskID)
failed = append(failed, decision)
continue
}
for _, v := range volumes {
if err := store.UpdateVolume(tx, v); err != nil {
// TODO(dperny): handle the case of a partial
// update?
log.G(ctx).WithError(err).Debugf(
"scheduler failed to update task %v; volume %v could not be updated",
taskID, v.ID,
)
failed = append(failed, decision)
continue taskLoop
}
}
successful = append(successful, decision)
return nil
}
@ -488,7 +617,11 @@ func (s *Scheduler) applySchedulingDecisions(ctx context.Context, schedulingDeci
return err
}
}
return nil
// finally, every time we make new scheduling decisions, take the
// opportunity to release volumes.
return batch.Update(func(tx store.Tx) error {
return s.volumes.freeVolumes(tx)
})
})
if err != nil {
@ -516,6 +649,23 @@ func (s *Scheduler) taskFitNode(ctx context.Context, t *api.Task, nodeID string)
return &newT
}
// before doing all of the updating logic, get the volume attachments
// for the task on this node. this should always succeed, because we
// should already have filtered nodes based on volume availability, but
// just in case we missed something and it doesn't, we have an error
// case.
attachments, err := s.volumes.chooseTaskVolumes(t, &nodeInfo)
if err != nil {
newT.Status.Timestamp = ptypes.MustTimestampProto(time.Now())
newT.Status.Err = err.Error()
s.allTasks[t.ID] = &newT
return &newT
}
newT.Volumes = attachments
newT.Status = api.TaskStatus{
State: api.TaskStateAssigned,
Timestamp: ptypes.MustTimestampProto(time.Now()),
@ -587,6 +737,28 @@ func (s *Scheduler) scheduleTaskGroup(ctx context.Context, taskGroup map[string]
}
}
// scheduleNTasksOnSubtree schedules a set of tasks with identical constraints
// onto a set of nodes, taking into account placement preferences.
//
// placement preferences are used to create a tree such that every branch
// represents one subset of nodes across which tasks should be spread.
//
// because of this tree structure, scheduleNTasksOnSubtree is a recursive
// function. If there are subtrees of the current tree, then we recurse. if we
// are at a leaf node, past which there are no subtrees, then we try to
// schedule a proportional number of tasks to the nodes of that branch.
//
// - n is the number of tasks being scheduled on this subtree
// - taskGroup is a set of tasks to schedule, taking the form of a map from the
// task ID to the task object.
// - tree is the decision tree we're scheduling on. this is, effectively, the
// set of nodes that meet scheduling constraints. these nodes are arranged
// into a tree so that placement preferences can be taken into account when
// spreading tasks across nodes.
// - schedulingDecisions is a set of the scheduling decisions already made for
// this tree
// - nodeLess is a comparator that chooses which of the two nodes is preferable
// to schedule on.
func (s *Scheduler) scheduleNTasksOnSubtree(ctx context.Context, n int, taskGroup map[string]*api.Task, tree *decisionTree, schedulingDecisions map[string]schedulingDecision, nodeLess func(a *NodeInfo, b *NodeInfo) bool) int {
if tree.next == nil {
nodes := tree.orderedNodes(s.pipeline.Process, nodeLess)
@ -639,6 +811,23 @@ func (s *Scheduler) scheduleNTasksOnSubtree(ctx context.Context, n int, taskGrou
return tasksScheduled
}
// scheduleNTasksOnNodes schedules some number of tasks on the set of provided
// nodes. The number of tasks being scheduled may be less than the total number
// of tasks, as the Nodes may be one branch of a tree used to spread tasks.
//
// returns the number of tasks actually scheduled to these nodes. this may be
// fewer than the number of tasks desired to be scheduled, if there are
// insufficient nodes to meet resource constraints.
//
// - n is the number of tasks desired to be scheduled to this set of nodes
// - taskGroup is the tasks desired to be scheduled, in the form of a map from
// task ID to task object. this argument is mutated; tasks which have been
// scheduled are removed from the map.
// - nodes is the set of nodes to schedule to
// - schedulingDecisions is the set of scheduling decisions that have been made
// thus far, in the form of a map from task ID to the decision made.
// - nodeLess is a simple comparator that chooses which of two nodes would be
// preferable to schedule on.
func (s *Scheduler) scheduleNTasksOnNodes(ctx context.Context, n int, taskGroup map[string]*api.Task, nodes []NodeInfo, schedulingDecisions map[string]schedulingDecision, nodeLess func(a *NodeInfo, b *NodeInfo) bool) int {
tasksScheduled := 0
failedConstraints := make(map[int]bool) // key is index in nodes slice
@ -652,10 +841,24 @@ func (s *Scheduler) scheduleNTasksOnNodes(ctx context.Context, n int, taskGroup
}
node := &nodes[nodeIter%nodeCount]
// before doing all of the updating logic, get the volume attachments
// for the task on this node. this should always succeed, because we
// should already have filtered nodes based on volume availability, but
// just in case we missed something and it doesn't, we have an error
// case.
attachments, err := s.volumes.chooseTaskVolumes(t, node)
if err != nil {
// TODO(dperny) if there's an error, then what? i'm frankly not
// sure.
log.G(ctx).WithField("task.id", t.ID).WithError(err).Error("could not find task volumes")
}
log.G(ctx).WithField("task.id", t.ID).Debugf("assigning to node %s", node.ID)
// she turned me into a newT!
newT := *t
newT.Volumes = attachments
newT.NodeID = node.ID
s.volumes.reserveTaskVolumes(&newT)
newT.Status = api.TaskStatus{
State: api.TaskStateAssigned,
Timestamp: ptypes.MustTimestampProto(time.Now()),
@ -663,6 +866,10 @@ func (s *Scheduler) scheduleNTasksOnNodes(ctx context.Context, n int, taskGroup
}
s.allTasks[t.ID] = &newT
// in each iteration of this loop, the node we choose will always be
// one which meets constraints. at the end of each iteration, we
// re-process nodes, allowing us to remove nodes which no longer meet
// resource constraints.
nodeInfo, err := s.nodeSet.nodeInfo(node.ID)
if err == nil && nodeInfo.addTask(&newT) {
s.nodeSet.updateNode(nodeInfo)

View file

@ -0,0 +1,47 @@
package scheduler
import (
"github.com/docker/swarmkit/api"
)
// IsInTopology takes a Topology `top` (which is reported by a Node) and a list
// of Topologies `accessible` (which comes from a created volume, in the form
// of the AccessibleTopology) and returns true if `top` lies within
// `accessible` (meaning a node with that Topology can access a volume with
// that AccessibleTopology).
//
// In order for `top` to lie within `accessible`, there must exist a topology
// in `accessible` such that for every subdomain/segment pair in that topology,
// there exists an equivalent subdomain/segment pair in `top`.
//
// For examples, see the test for this function.
//
// NOTE(dperny): It is unclear whether a topology can be partial. For example,
// can an accessible topology contain only a "region" subdomain, without a
// "zone" subdomain? This function assumes yes.
func IsInTopology(top *api.Topology, accessible []*api.Topology) bool {
// if any part of the topology equation is missing, then this does fit.
if top == nil || accessible == nil || len(accessible) == 0 {
return true
}
// go through each accessible topology
topologies:
for _, topology := range accessible {
// and for each topology, go through every segment
for subdomain, segment := range topology.Segments {
// if the segment for this subdomain is different in the `top`,
// then, `top` does not lie within this topology.
if top.Segments[subdomain] != segment {
// go to the next topology in the list
continue topologies
}
}
// if we get through all of the segments specified in this topology,
// and they have all matched, then `top` lies within `accessible`.
return true
}
// if we have iterated through all topologies, and never once finished
// iterating through all topological segments, then `top` does not lie
// within `accessible`.
return false
}

View file

@ -0,0 +1,319 @@
package scheduler
import (
"fmt"
"strings"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/manager/state/store"
)
// the scheduler package does double duty -- in addition to choosing nodes, it
// must also choose volumes. this is because volumes are fungible, and can be
// scheduled to several nodes, and used by several tasks. we should endeavor to
// spread tasks across volumes, like we spread nodes. on the positive side,
// unlike nodes, volumes are not heirarchical. that is, we don't need to
// spread across multiple levels of a tree, only a flat set.
// volumeSet is the set of all volumes currently managed
type volumeSet struct {
// volumes is a mapping of volume IDs to volumeInfo
volumes map[string]volumeInfo
// byGroup is a mapping from a volume group name to a set of volumes in
// that group
byGroup map[string]map[string]struct{}
// byName is a mapping of volume names to swarmkit volume IDs.
byName map[string]string
}
// volumeUsage contains information about the usage of a Volume by a specific
// task.
type volumeUsage struct {
nodeID string
readOnly bool
}
// volumeInfo contains scheduler information about a given volume
type volumeInfo struct {
volume *api.Volume
tasks map[string]volumeUsage
// nodes is a set of nodes a volume is in use on. it maps a node ID to a
// reference count for how many tasks are using the volume on that node.
nodes map[string]int
}
func newVolumeSet() *volumeSet {
return &volumeSet{
volumes: map[string]volumeInfo{},
byGroup: map[string]map[string]struct{}{},
byName: map[string]string{},
}
}
func (vs *volumeSet) getVolume(id string) *api.Volume {
// getVolume returns the volume object for the given ID as stored in the
// volumeSet, or nil if none exists
return vs.volumes[id].volume
}
func (vs *volumeSet) addOrUpdateVolume(v *api.Volume) {
if info, ok := vs.volumes[v.ID]; !ok {
vs.volumes[v.ID] = volumeInfo{
volume: v,
nodes: map[string]int{},
tasks: map[string]volumeUsage{},
}
} else {
// if the volume already exists in the set, then only update the volume
// object, not the tasks map.
info.volume = v
}
if set, ok := vs.byGroup[v.Spec.Group]; ok {
set[v.ID] = struct{}{}
} else {
vs.byGroup[v.Spec.Group] = map[string]struct{}{v.ID: {}}
}
vs.byName[v.Spec.Annotations.Name] = v.ID
}
func (vs *volumeSet) removeVolume(volumeID string) {
if info, ok := vs.volumes[volumeID]; ok {
// if the volume exists in the set, look up its group ID and remove it
// from the byGroup mapping as well
group := info.volume.Spec.Group
delete(vs.byGroup[group], volumeID)
delete(vs.volumes, volumeID)
delete(vs.byName, info.volume.Spec.Annotations.Name)
}
}
// chooseTaskVolumes selects a set of VolumeAttachments for the task on the
// given node. it expects that the node was already validated to have the
// necessary volumes, but it will return an error if a full set of volumes is
// not available.
func (vs *volumeSet) chooseTaskVolumes(task *api.Task, nodeInfo *NodeInfo) ([]*api.VolumeAttachment, error) {
volumes := []*api.VolumeAttachment{}
// we'll reserve volumes in this loop, but release all of our reservations
// before we finish. the caller will need to call reserveTaskVolumes after
// calling this function
// TODO(dperny): this is probably not optimal
defer func() {
for _, volume := range volumes {
vs.releaseVolume(volume.ID, task.ID)
}
}()
// TODO(dperny): handle non-container tasks
c := task.Spec.GetContainer()
if c == nil {
return nil, nil
}
for _, mount := range task.Spec.GetContainer().Mounts {
if mount.Type == api.MountTypeCSI {
candidate := vs.isVolumeAvailableOnNode(&mount, nodeInfo)
if candidate == "" {
// TODO(dperny): return structured error types, instead of
// error strings
return nil, fmt.Errorf("cannot find volume to satisfy mount with source %v", mount.Source)
}
vs.reserveVolume(candidate, task.ID, nodeInfo.Node.ID, mount.ReadOnly)
volumes = append(volumes, &api.VolumeAttachment{
ID: candidate,
Source: mount.Source,
Target: mount.Target,
})
}
}
return volumes, nil
}
// reserveTaskVolumes identifies all volumes currently in use on a task and
// marks them in the volumeSet as in use.
func (vs *volumeSet) reserveTaskVolumes(task *api.Task) {
for _, va := range task.Volumes {
// we shouldn't need to handle non-container tasks because those tasks
// won't have any entries in task.Volumes.
for _, mount := range task.Spec.GetContainer().Mounts {
if mount.Source == va.Source && mount.Target == va.Target {
vs.reserveVolume(va.ID, task.ID, task.NodeID, mount.ReadOnly)
}
}
}
}
func (vs *volumeSet) reserveVolume(volumeID, taskID, nodeID string, readOnly bool) {
info, ok := vs.volumes[volumeID]
if !ok {
// TODO(dperny): don't just return nothing.
return
}
info.tasks[taskID] = volumeUsage{nodeID: nodeID, readOnly: readOnly}
// increment the reference count for this node.
info.nodes[nodeID] = info.nodes[nodeID] + 1
}
func (vs *volumeSet) releaseVolume(volumeID, taskID string) {
info, ok := vs.volumes[volumeID]
if !ok {
// if the volume isn't in the set, no action to take.
return
}
// decrement the reference count for this task's node
usage, ok := info.tasks[taskID]
if ok {
// this is probably an unnecessarily high level of caution, but make
// sure we don't go below zero on node count.
if c := info.nodes[usage.nodeID]; c > 0 {
info.nodes[usage.nodeID] = c - 1
}
delete(info.tasks, taskID)
}
}
// freeVolumes finds volumes that are no longer in use on some nodes, and
// updates them to be unpublished from those nodes.
//
// TODO(dperny): this is messy and has a lot of overhead. it should be reworked
// to something more streamlined.
func (vs *volumeSet) freeVolumes(tx store.Tx) error {
for volumeID, info := range vs.volumes {
v := store.GetVolume(tx, volumeID)
if v == nil {
continue
}
changed := false
for _, status := range v.PublishStatus {
if info.nodes[status.NodeID] == 0 && status.State == api.VolumePublishStatus_PUBLISHED {
status.State = api.VolumePublishStatus_PENDING_NODE_UNPUBLISH
changed = true
}
}
if changed {
if err := store.UpdateVolume(tx, v); err != nil {
return err
}
}
}
return nil
}
// isVolumeAvailableOnNode checks if a volume satisfying the given mount is
// available on the given node.
//
// Returns the ID of the volume available, or an empty string if no such volume
// is found.
func (vs *volumeSet) isVolumeAvailableOnNode(mount *api.Mount, node *NodeInfo) string {
source := mount.Source
// first, discern whether we're looking for a group or a volume
// try trimming off the "group:" prefix. if the resulting string is
// different from the input string (meaning something has been trimmed),
// then this volume is actually a volume group.
if group := strings.TrimPrefix(source, "group:"); group != source {
ids, ok := vs.byGroup[group]
// if there are no volumes of this group specified, then no volume
// meets the moutn criteria.
if !ok {
return ""
}
// iterate through all ids in the group, checking if any one meets the
// spec.
for id := range ids {
if vs.checkVolume(id, node, mount.ReadOnly) {
return id
}
}
return ""
}
// if it's not a group, it's a name. resolve the volume name to its ID
id, ok := vs.byName[source]
if !ok || !vs.checkVolume(id, node, mount.ReadOnly) {
return ""
}
return id
}
// checkVolume checks if an individual volume with the given ID can be placed
// on the given node.
func (vs *volumeSet) checkVolume(id string, info *NodeInfo, readOnly bool) bool {
vi := vs.volumes[id]
// first, check if the volume's availability is even Active. If not. no
// reason to bother with anything further.
if vi.volume != nil && vi.volume.Spec.Availability != api.VolumeAvailabilityActive {
return false
}
// get the node topology for this volume
var top *api.Topology
// get the topology for this volume's driver on this node
for _, info := range info.Description.CSIInfo {
if info.PluginName == vi.volume.Spec.Driver.Name {
top = info.AccessibleTopology
break
}
}
// check if the volume is available on this node. a volume's
// availability on a node depends on its accessible topology, how it's
// already being used, and how this task intends to use it.
if vi.volume.Spec.AccessMode.Scope == api.VolumeScopeSingleNode {
// if the volume is not in use on this node already, then it can't
// be used here.
for _, usage := range vi.tasks {
if usage.nodeID != info.ID {
return false
}
}
}
// even if the volume is currently on this node, or it has multi-node
// access, the volume sharing needs to be compatible.
switch vi.volume.Spec.AccessMode.Sharing {
case api.VolumeSharingNone:
// if the volume sharing is none, then the volume cannot be
// used by another task
if len(vi.tasks) > 0 {
return false
}
case api.VolumeSharingOneWriter:
// if the mount is not ReadOnly, and the volume has a writer, then
// we this volume does not work.
if !readOnly && hasWriter(vi) {
return false
}
case api.VolumeSharingReadOnly:
// if the volume sharing is read-only, then the Mount must also
// be read-only
if !readOnly {
return false
}
}
// then, do the quick check of whether this volume is in the topology. if
// the volume has an AccessibleTopology, and it does not lie within the
// node's topology, then this volume won't fit.
if !IsInTopology(top, vi.volume.VolumeInfo.AccessibleTopology) {
return false
}
return true
}
// hasWriter is a helper function that returns true if at least one task is
// using this volume not in read-only mode.
func hasWriter(info volumeInfo) bool {
for _, usage := range info.tasks {
if !usage.readOnly {
return true
}
}
return false
}

View file

@ -4,10 +4,10 @@ import (
"errors"
"sync"
"github.com/coreos/etcd/raft/raftpb"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/watch"
"github.com/gogo/protobuf/proto"
"go.etcd.io/etcd/raft/v3/raftpb"
)
var (

View file

@ -14,9 +14,6 @@ import (
"time"
"code.cloudfoundry.org/clock"
"github.com/coreos/etcd/pkg/idutil"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/docker/go-events"
"github.com/docker/go-metrics"
"github.com/docker/swarmkit/api"
@ -32,6 +29,9 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.etcd.io/etcd/pkg/v3/idutil"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/raft/v3/raftpb"
"golang.org/x/time/rate"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@ -447,7 +447,7 @@ func (n *Node) JoinAndStart(ctx context.Context) (err error) {
}
n.initTransport()
n.raftNode = raft.StartNode(n.Config, nil)
n.raftNode = raft.RestartNode(n.Config)
return nil
}
@ -607,6 +607,12 @@ func (n *Node) Run(ctx context.Context) error {
}
for _, msg := range rd.Messages {
// if the message is a snapshot, before we send it, we should
// overwrite the original ConfState from the snapshot with the
// current one
if msg.Type == raftpb.MsgSnap {
msg.Snapshot.Metadata.ConfState = n.confState
}
// Send raft messages to peers
if err := n.transport.Send(msg); err != nil {
log.G(ctx).WithError(err).Error("failed to send message to member")
@ -2096,7 +2102,7 @@ func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raf
func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
ids := make(map[uint64]struct{})
if snap != nil {
for _, id := range snap.Metadata.ConfState.Nodes {
for _, id := range snap.Metadata.ConfState.Voters {
ids[id] = struct{}{}
}
}

View file

@ -4,8 +4,6 @@ import (
"context"
"fmt"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/docker/go-metrics"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
@ -14,6 +12,8 @@ import (
"github.com/docker/swarmkit/manager/state/raft/storage"
"github.com/docker/swarmkit/manager/state/store"
"github.com/pkg/errors"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/raft/v3/raftpb"
)
var (

View file

@ -1,24 +1,23 @@
package storage
import (
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/snap"
"github.com/docker/swarmkit/manager/encryption"
"github.com/pkg/errors"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
)
// This package wraps the github.com/coreos/etcd/snap package, and encrypts
// This package wraps the go.etcd.io/etcd/server/v3/api/snap package, and encrypts
// the bytes of whatever snapshot is passed to it, and decrypts the bytes of
// whatever snapshot it reads.
// Snapshotter is the interface presented by github.com/coreos/etcd/snap.Snapshotter that we depend upon
// Snapshotter is the interface presented by go.etcd.io/etcd/server/v3/api/snap.Snapshotter that we depend upon
type Snapshotter interface {
SaveSnap(snapshot raftpb.Snapshot) error
Load() (*raftpb.Snapshot, error)
@ -34,7 +33,7 @@ var _ Snapshotter = &wrappedSnap{}
var _ Snapshotter = &snap.Snapshotter{}
var _ SnapFactory = snapCryptor{}
// wrappedSnap wraps a github.com/coreos/etcd/snap.Snapshotter, and handles
// wrappedSnap wraps a go.etcd.io/etcd/server/v3/api/snap.Snapshotter, and handles
// encrypting/decrypting.
type wrappedSnap struct {
*snap.Snapshotter
@ -88,7 +87,7 @@ func NewSnapFactory(encrypter encryption.Encrypter, decrypter encryption.Decrypt
// NewSnapshotter returns a new Snapshotter with the given encrypters and decrypters
func (sc snapCryptor) New(dirpath string) Snapshotter {
return &wrappedSnap{
Snapshotter: snap.New(dirpath),
Snapshotter: snap.New(nil, dirpath),
encrypter: sc.encrypter,
decrypter: sc.decrypter,
}
@ -97,7 +96,7 @@ func (sc snapCryptor) New(dirpath string) Snapshotter {
type originalSnap struct{}
func (o originalSnap) New(dirpath string) Snapshotter {
return snap.New(dirpath)
return snap.New(nil, dirpath)
}
// OriginalSnap is the original `snap` package as an implementation of the SnapFactory interface
@ -140,7 +139,7 @@ func MigrateSnapshot(oldDir, newDir string, oldFactory, newFactory SnapFactory)
// ListSnapshots lists all the snapshot files in a particular directory and returns
// the snapshot files in reverse lexical order (newest first)
func ListSnapshots(dirpath string) ([]string, error) {
dirents, err := ioutil.ReadDir(dirpath)
dirents, err := os.ReadDir(dirpath)
if err != nil {
return nil, err
}

View file

@ -7,14 +7,14 @@ import (
"path/filepath"
"sync"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/snap"
"github.com/coreos/etcd/wal"
"github.com/coreos/etcd/wal/walpb"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/manager/encryption"
"github.com/pkg/errors"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.etcd.io/etcd/server/v3/wal"
"go.etcd.io/etcd/server/v3/wal/walpb"
)
// ErrNoWAL is returned if there are no WALs on disk
@ -83,7 +83,7 @@ func (e *EncryptedRaftLogger) BootstrapFromDisk(ctx context.Context, oldEncrypti
}
}
// ensure the new directory exists
if err := os.MkdirAll(snapDir, 0700); err != nil {
if err := os.MkdirAll(snapDir, 0o700); err != nil {
return nil, WALData{}, errors.Wrap(err, "failed to create snapshot directory")
}
@ -105,6 +105,7 @@ func (e *EncryptedRaftLogger) BootstrapFromDisk(ctx context.Context, oldEncrypti
if snapshot != nil {
walsnap.Index = snapshot.Metadata.Index
walsnap.Term = snapshot.Metadata.Term
walsnap.ConfState = &snapshot.Metadata.ConfState
}
if !wal.Exist(walDir) {
@ -147,7 +148,7 @@ func (e *EncryptedRaftLogger) BootstrapNew(metadata []byte) error {
walFactory := NewWALFactory(encrypter, decrypter)
for _, dirpath := range []string{filepath.Dir(e.walDir()), e.snapDir()} {
if err := os.MkdirAll(dirpath, 0700); err != nil {
if err := os.MkdirAll(dirpath, 0o700); err != nil {
return errors.Wrapf(err, "failed to create %s", dirpath)
}
}
@ -197,8 +198,9 @@ func (e *EncryptedRaftLogger) RotateEncryptionKey(newKey []byte) {
func (e *EncryptedRaftLogger) SaveSnapshot(snapshot raftpb.Snapshot) error {
walsnap := walpb.Snapshot{
Index: snapshot.Metadata.Index,
Term: snapshot.Metadata.Term,
Index: snapshot.Metadata.Index,
Term: snapshot.Metadata.Term,
ConfState: &snapshot.Metadata.ConfState,
}
e.encoderMu.RLock()

View file

@ -3,25 +3,24 @@ package storage
import (
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/wal"
"github.com/coreos/etcd/wal/walpb"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/manager/encryption"
"github.com/pkg/errors"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/wal"
"go.etcd.io/etcd/server/v3/wal/walpb"
)
// This package wraps the github.com/coreos/etcd/wal package, and encrypts
// This package wraps the go.etcd.io/etcd/server/v3/storage/wal package, and encrypts
// the bytes of whatever entry is passed to it, and decrypts the bytes of
// whatever entry it reads.
// WAL is the interface presented by github.com/coreos/etcd/wal.WAL that we depend upon
// WAL is the interface presented by go.etcd.io/etcd/server/v3/storage/wal.WAL that we depend upon
type WAL interface {
ReadAll() ([]byte, raftpb.HardState, []raftpb.Entry, error)
ReleaseLockTo(index uint64) error
@ -41,7 +40,7 @@ var _ WAL = &wrappedWAL{}
var _ WAL = &wal.WAL{}
var _ WALFactory = walCryptor{}
// wrappedWAL wraps a github.com/coreos/etcd/wal.WAL, and handles encrypting/decrypting
// wrappedWAL wraps a go.etcd.io/etcd/server/v3/storage/wal.WAL, and handles encrypting/decrypting
type wrappedWAL struct {
*wal.WAL
encrypter encryption.Encrypter
@ -103,7 +102,7 @@ func NewWALFactory(encrypter encryption.Encrypter, decrypter encryption.Decrypte
// Create returns a new WAL object with the given encrypters and decrypters.
func (wc walCryptor) Create(dirpath string, metadata []byte) (WAL, error) {
w, err := wal.Create(dirpath, metadata)
w, err := wal.Create(nil, dirpath, metadata)
if err != nil {
return nil, err
}
@ -116,7 +115,7 @@ func (wc walCryptor) Create(dirpath string, metadata []byte) (WAL, error) {
// Open returns a new WAL object with the given encrypters and decrypters.
func (wc walCryptor) Open(dirpath string, snap walpb.Snapshot) (WAL, error) {
w, err := wal.Open(dirpath, snap)
w, err := wal.Open(nil, dirpath, snap)
if err != nil {
return nil, err
}
@ -130,10 +129,10 @@ func (wc walCryptor) Open(dirpath string, snap walpb.Snapshot) (WAL, error) {
type originalWAL struct{}
func (o originalWAL) Create(dirpath string, metadata []byte) (WAL, error) {
return wal.Create(dirpath, metadata)
return wal.Create(nil, dirpath, metadata)
}
func (o originalWAL) Open(dirpath string, walsnap walpb.Snapshot) (WAL, error) {
return wal.Open(dirpath, walsnap)
return wal.Open(nil, dirpath, walsnap)
}
// OriginalWAL is the original `wal` package as an implementation of the WALFactory interface
@ -178,7 +177,7 @@ func ReadRepairWAL(
if repaired || err != io.ErrUnexpectedEOF {
return nil, WALData{}, errors.Wrap(err, "irreparable WAL error")
}
if !wal.Repair(walDir) {
if !wal.Repair(nil, walDir) {
return nil, WALData{}, errors.Wrap(err, "WAL error cannot be repaired")
}
log.G(ctx).WithError(err).Info("repaired WAL error")
@ -203,7 +202,7 @@ func MigrateWALs(ctx context.Context, oldDir, newDir string, oldFactory, newFact
}
oldReader.Close()
if err := os.MkdirAll(filepath.Dir(newDir), 0700); err != nil {
if err := os.MkdirAll(filepath.Dir(newDir), 0o700); err != nil {
return errors.Wrap(err, "could not create parent directory")
}
@ -237,7 +236,7 @@ func MigrateWALs(ctx context.Context, oldDir, newDir string, oldFactory, newFact
// ListWALs lists all the wals in a directory and returns the list in lexical
// order (oldest first)
func ListWALs(dirpath string) ([]string, error) {
dirents, err := ioutil.ReadDir(dirpath)
dirents, err := os.ReadDir(dirpath)
if err != nil {
return nil, err
}

View file

@ -9,12 +9,12 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/manager/state/raft/membership"
"github.com/pkg/errors"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/raft/v3/raftpb"
"google.golang.org/grpc/status"
)

View file

@ -13,10 +13,10 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/docker/swarmkit/log"
"github.com/pkg/errors"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/raft/v3/raftpb"
)
// ErrIsNotFound indicates that peer was never added to transport.

View file

@ -165,6 +165,16 @@ func ByReferencedConfigID(configID string) By {
return byReferencedConfigID(configID)
}
type byVolumeAttachment string
func (b byVolumeAttachment) isBy() {}
// ByVolumeAttachment creates an object to pass to Find to search for a Task
// that has been assigned the given ID.
func ByVolumeAttachment(volumeID string) By {
return byVolumeAttachment(volumeID)
}
type byKind string
func (b byKind) isBy() {
@ -212,3 +222,25 @@ func ByCustomPrefix(objType, index, value string) By {
value: value,
}
}
// ByVolumeGroup creates an object to pass to Find to search for volumes
// belonging to a particular group.
func ByVolumeGroup(group string) By {
return byVolumeGroup(group)
}
type byVolumeGroup string
func (b byVolumeGroup) isBy() {
}
// ByDriver creates an object to pass to Find to search for objects using a
// specific driver.
func ByDriver(driver string) By {
return byDriver(driver)
}
type byDriver string
func (b byDriver) isBy() {
}

View file

@ -22,21 +22,24 @@ import (
)
const (
indexID = "id"
indexName = "name"
indexRuntime = "runtime"
indexServiceID = "serviceid"
indexNodeID = "nodeid"
indexSlot = "slot"
indexDesiredState = "desiredstate"
indexTaskState = "taskstate"
indexRole = "role"
indexMembership = "membership"
indexNetwork = "network"
indexSecret = "secret"
indexConfig = "config"
indexKind = "kind"
indexCustom = "custom"
indexID = "id"
indexName = "name"
indexRuntime = "runtime"
indexServiceID = "serviceid"
indexNodeID = "nodeid"
indexSlot = "slot"
indexDesiredState = "desiredstate"
indexTaskState = "taskstate"
indexRole = "role"
indexMembership = "membership"
indexNetwork = "network"
indexSecret = "secret"
indexConfig = "config"
indexVolumeAttachment = "volumeattachment"
indexKind = "kind"
indexCustom = "custom"
indexVolumeGroup = "volumegroup"
indexDriver = "driver"
prefix = "_prefix"
@ -736,12 +739,30 @@ func (tx readTx) findIterators(table string, by By, checkType func(By) error) ([
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byVolumeAttachment:
it, err := tx.memDBTx.Get(table, indexVolumeAttachment, string(v))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byKind:
it, err := tx.memDBTx.Get(table, indexKind, string(v))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byVolumeGroup:
it, err := tx.memDBTx.Get(table, indexVolumeGroup, string(v))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byDriver:
it, err := tx.memDBTx.Get(table, indexDriver, string(v))
if err != nil {
return nil, err
}
return []memdb.ResultIterator{it}, nil
case byCustom:
var key string
if v.objType != "" {

View file

@ -69,6 +69,11 @@ func init() {
AllowMissing: true,
Indexer: taskIndexerByConfig{},
},
indexVolumeAttachment: {
Name: indexVolumeAttachment,
AllowMissing: true,
Indexer: taskIndexerByVolumeAttachment{},
},
indexCustom: {
Name: indexCustom,
Indexer: api.TaskCustomIndexer{},
@ -138,7 +143,7 @@ func GetTask(tx ReadTx, id string) *api.Task {
func FindTasks(tx ReadTx, by By) ([]*api.Task, error) {
checkType := func(by By) error {
switch by.(type) {
case byName, byNamePrefix, byIDPrefix, byRuntime, byDesiredState, byTaskState, byNode, byService, bySlot, byReferencedNetworkID, byReferencedSecretID, byReferencedConfigID, byCustom, byCustomPrefix:
case byName, byNamePrefix, byIDPrefix, byRuntime, byDesiredState, byTaskState, byNode, byService, bySlot, byReferencedNetworkID, byReferencedSecretID, byReferencedConfigID, byVolumeAttachment, byCustom, byCustomPrefix:
return nil
default:
return ErrInvalidFindBy
@ -317,6 +322,26 @@ func (ti taskIndexerByConfig) FromObject(obj interface{}) (bool, [][]byte, error
return len(configIDs) != 0, configIDs, nil
}
type taskIndexerByVolumeAttachment struct{}
func (ti taskIndexerByVolumeAttachment) FromArgs(args ...interface{}) ([]byte, error) {
return fromArgs(args...)
}
func (ti taskIndexerByVolumeAttachment) FromObject(obj interface{}) (bool, [][]byte, error) {
t, ok := obj.(*api.Task)
if !ok {
panic("unexpected type passed to FromObject")
}
var volumeIDs [][]byte
for _, v := range t.Volumes {
volumeIDs = append(volumeIDs, []byte(v.ID+"\x00"))
}
return len(volumeIDs) != 0, volumeIDs, nil
}
type taskIndexerByTaskState struct{}
func (ts taskIndexerByTaskState) FromArgs(args ...interface{}) ([]byte, error) {

View file

@ -0,0 +1,149 @@
package store
import (
"strings"
"github.com/docker/swarmkit/api"
memdb "github.com/hashicorp/go-memdb"
)
const tableVolume = "volume"
func init() {
register(ObjectStoreConfig{
Table: &memdb.TableSchema{
Name: tableVolume,
Indexes: map[string]*memdb.IndexSchema{
indexID: {
Name: indexID,
Unique: true,
Indexer: api.VolumeIndexerByID{},
},
indexName: {
Name: indexName,
Unique: true,
Indexer: api.VolumeIndexerByName{},
},
indexCustom: {
Name: indexCustom,
Indexer: api.VolumeCustomIndexer{},
AllowMissing: true,
},
indexVolumeGroup: {
Name: indexVolumeGroup,
Indexer: volumeIndexerByGroup{},
},
indexDriver: {
Name: indexDriver,
Indexer: volumeIndexerByDriver{},
},
},
},
Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error {
var err error
snapshot.Volumes, err = FindVolumes(tx, All)
return err
},
Restore: func(tx Tx, snapshot *api.StoreSnapshot) error {
toStoreObj := make([]api.StoreObject, len(snapshot.Volumes))
for i, x := range snapshot.Volumes {
toStoreObj[i] = x
}
return RestoreTable(tx, tableVolume, toStoreObj)
},
ApplyStoreAction: func(tx Tx, sa api.StoreAction) error {
switch v := sa.Target.(type) {
case *api.StoreAction_Volume:
obj := v.Volume
switch sa.Action {
case api.StoreActionKindCreate:
return CreateVolume(tx, obj)
case api.StoreActionKindUpdate:
return UpdateVolume(tx, obj)
case api.StoreActionKindRemove:
return DeleteVolume(tx, obj.ID)
}
}
return errUnknownStoreAction
},
})
}
func CreateVolume(tx Tx, v *api.Volume) error {
if tx.lookup(tableVolume, indexName, strings.ToLower(v.Spec.Annotations.Name)) != nil {
return ErrNameConflict
}
return tx.create(tableVolume, v)
}
func UpdateVolume(tx Tx, v *api.Volume) error {
// ensure the name is either not in use, or is in use by this volume.
if existing := tx.lookup(tableVolume, indexName, strings.ToLower(v.Spec.Annotations.Name)); existing != nil {
if existing.GetID() != v.ID {
return ErrNameConflict
}
}
return tx.update(tableVolume, v)
}
func DeleteVolume(tx Tx, id string) error {
return tx.delete(tableVolume, id)
}
func GetVolume(tx ReadTx, id string) *api.Volume {
n := tx.get(tableVolume, id)
if n == nil {
return nil
}
return n.(*api.Volume)
}
func FindVolumes(tx ReadTx, by By) ([]*api.Volume, error) {
checkType := func(by By) error {
switch by.(type) {
case byName, byNamePrefix, byIDPrefix, byVolumeGroup, byCustom, byCustomPrefix, byDriver:
return nil
default:
return ErrInvalidFindBy
}
}
volumeList := []*api.Volume{}
appendResult := func(o api.StoreObject) {
volumeList = append(volumeList, o.(*api.Volume))
}
err := tx.find(tableVolume, by, checkType, appendResult)
return volumeList, err
}
type volumeIndexerByGroup struct{}
func (vi volumeIndexerByGroup) FromArgs(args ...interface{}) ([]byte, error) {
return fromArgs(args...)
}
func (vi volumeIndexerByGroup) FromObject(obj interface{}) (bool, []byte, error) {
v := obj.(*api.Volume)
val := v.Spec.Group + "\x00"
return true, []byte(val), nil
}
type volumeIndexerByDriver struct{}
func (vi volumeIndexerByDriver) FromArgs(args ...interface{}) ([]byte, error) {
return fromArgs(args...)
}
func (vi volumeIndexerByDriver) FromObject(obj interface{}) (bool, []byte, error) {
v := obj.(*api.Volume)
// this should never happen -- existence of the volume driver is checked
// at the controlapi level. However, guard against the unforeseen.
if v.Spec.Driver == nil {
return false, nil, nil
}
val := v.Spec.Driver.Name + "\x00"
return true, []byte(val), nil
}

View file

@ -5,7 +5,6 @@ import (
"context"
"crypto/tls"
"encoding/json"
"io/ioutil"
"math"
"net"
"os"
@ -195,11 +194,11 @@ func (n *Node) RemoteAPIAddr() (string, error) {
// New returns new Node instance.
func New(c *Config) (*Node, error) {
if err := os.MkdirAll(c.StateDir, 0700); err != nil {
if err := os.MkdirAll(c.StateDir, 0o700); err != nil {
return nil, err
}
stateFile := filepath.Join(c.StateDir, stateFilename)
dt, err := ioutil.ReadFile(stateFile)
dt, err := os.ReadFile(stateFile)
var p []api.Peer
if err != nil && !os.IsNotExist(err) {
return nil, err
@ -337,7 +336,7 @@ func (n *Node) run(ctx context.Context) (err error) {
// database if it doesn't already exist, and if it does already exist, no
// error will be returned, so we use this regardless of whether this node
// is new or not.
if err := os.MkdirAll(filepath.Dir(taskDBPath), 0777); err != nil {
if err := os.MkdirAll(filepath.Dir(taskDBPath), 0o777); err != nil {
return err
}
@ -1248,7 +1247,7 @@ func (s *persistentRemotes) save() error {
return err
}
s.lastSavedState = remotes
return ioutils.AtomicWriteFile(s.storePath, dt, 0600)
return ioutils.AtomicWriteFile(s.storePath, dt, 0o600)
}
// WaitSelect waits until at least one remote becomes available and then selects one.

View file

@ -98,6 +98,7 @@ func (t templatedConfigGetter) GetAndFlagSecretData(configID string) (*api.Confi
type templatedDependencyGetter struct {
secrets exec.SecretGetter
configs TemplatedConfigGetter
volumes exec.VolumeGetter
}
// NewTemplatedDependencyGetter returns a DependencyGetter that evaluates templates.
@ -105,6 +106,7 @@ func NewTemplatedDependencyGetter(dependencies exec.DependencyGetter, t *api.Tas
return templatedDependencyGetter{
secrets: NewTemplatedSecretGetter(dependencies, t, node),
configs: NewTemplatedConfigGetter(dependencies, t, node),
volumes: dependencies.Volumes(),
}
}
@ -115,3 +117,10 @@ func (t templatedDependencyGetter) Secrets() exec.SecretGetter {
func (t templatedDependencyGetter) Configs() exec.ConfigGetter {
return t.configs
}
func (t templatedDependencyGetter) Volumes() exec.VolumeGetter {
// volumes are not templated, but we include that call (and pass it
// straight through to the underlying getter) in order to fulfill the
// DependencyGetter interface.
return t.volumes
}

4
vendor/github.com/docker/swarmkit/volumequeue/doc.go generated vendored Normal file
View file

@ -0,0 +1,4 @@
package volumequeue
// The volumequeue package defines a type of priority queue which is used by
// both the manager and the agent to manage retrying of CSI volume operations.

215
vendor/github.com/docker/swarmkit/volumequeue/queue.go generated vendored Normal file
View file

@ -0,0 +1,215 @@
package volumequeue
import (
"sync"
"time"
)
// baseRetryInterval is the base interval to retry volume operations. each
// subsequent attempt is exponential from this one
const baseRetryInterval = 100 * time.Millisecond
// maxRetryInterval is the maximum amount of time we will wait between retrying
// volume operations.
const maxRetryInterval = 10 * time.Minute
// vqTimerSource is an interface for creating timers for the volumeQueue
type vqTimerSource interface {
// NewTimer takes an attempt number and returns a vqClockTrigger which will
// trigger after a set period based on that attempt number.
NewTimer(attempt uint) vqTimer
}
// vqTimer is an interface representing a timer. However, the timer
// trigger channel, C, is instead wrapped in a Done method, so that in testing,
// the timer can be substituted for a different object.
type vqTimer interface {
Done() <-chan time.Time
Stop() bool
}
// timerSource is an empty struct type which is used to represent the default
// vqTimerSource, which uses time.Timer.
type timerSource struct{}
// NewTimer creates a new timer.
func (timerSource) NewTimer(attempt uint) vqTimer {
var waitFor time.Duration
if attempt == 0 {
waitFor = 0
} else {
// bit-shifting the base retry interval will raise it by 2 to the power
// of attempt. this is an easy way to do an exponent solely with
// integers
waitFor = baseRetryInterval << attempt
if waitFor > maxRetryInterval {
waitFor = maxRetryInterval
}
}
return timer{Timer: time.NewTimer(waitFor)}
}
// timer wraps a time.Timer to provide a Done method.
type timer struct {
*time.Timer
}
// Done returns the timer's C channel, which triggers in response to the timer
// expiring.
func (t timer) Done() <-chan time.Time {
return t.C
}
// VolumeQueue manages the exponential backoff of retrying volumes. it behaves
// somewhat like a priority queue. however, the key difference is that volumes
// which are ready to process or reprocess are read off of an unbuffered
// channel, meaning the order in which ready volumes are processed is at the
// mercy of the golang scheduler. in practice, this does not matter.
type VolumeQueue struct {
sync.Mutex
// next returns the next volumeQueueEntry when it is ready.
next chan *volumeQueueEntry
// outstanding is the set of all pending volumeQueueEntries, mapped by
// volume ID.
outstanding map[string]*volumeQueueEntry
// stopChan stops the volumeQueue and cancels all entries.
stopChan chan struct{}
// timerSource is an object which is used to create the timer for a
// volumeQueueEntry. it exists so that in testing, the timer can be
// substituted for an object that we control.
timerSource vqTimerSource
}
// volumeQueueEntry represents one entry in the volumeQueue
type volumeQueueEntry struct {
// id is the id of the volume this entry represents. we only need the ID,
// because the CSI manager will look up the latest revision of the volume
// before doing any work on it.
id string
// attempt is the current retry attempt of the entry.
attempt uint
// cancel is a function which is called to abort the retry attempt.
cancel func()
}
// NewVolumeQueue returns a new VolumeQueue with the default timerSource.
func NewVolumeQueue() *VolumeQueue {
return &VolumeQueue{
next: make(chan *volumeQueueEntry),
outstanding: make(map[string]*volumeQueueEntry),
stopChan: make(chan struct{}),
timerSource: timerSource{},
}
}
// Enqueue adds an entry to the VolumeQueue with the specified retry attempt.
// if an entry for the specified id already exists, enqueue will remove it and
// create a new entry.
func (vq *VolumeQueue) Enqueue(id string, attempt uint) {
// we must lock the volumeQueue when we add entries, because we will be
// accessing the outstanding map
vq.Lock()
defer vq.Unlock()
if entry, ok := vq.outstanding[id]; ok {
entry.cancel()
delete(vq.outstanding, id)
}
cancelChan := make(chan struct{})
v := &volumeQueueEntry{
id: id,
attempt: attempt,
cancel: func() {
close(cancelChan)
},
}
t := vq.timerSource.NewTimer(attempt)
// this goroutine is the meat of the volumeQueue. when the timer triggers,
// the volume queue entry is written out to the next channel.
//
// the nature of the select statement, and of goroutines and of
// ansynchronous operations means that this is not actually strictly
// ordered. if several entries are ready, then the one that actually gets
// dequeued is at the mercy of the golang scheduler.
//
// however, the flip side of this is that canceling an entry truly cancels
// it. because we're blocking on a write attempt, if we cancel, we don't
// do that write attempt, and there's no need to try to remove from the
// queue a ready-but-now-canceled entry before it is processed.
go func() {
select {
case <-t.Done():
// once the timer fires, we will try to write this entry to the
// next channel. however, because next is unbuffered, if we ended
// up in a situation where no read occurred, we would be
// deadlocked. to avoid this, we select on both a vq.next write and
// on a read from cancelChan, which allows us to abort our write
// attempt.
select {
case vq.next <- v:
case <-cancelChan:
}
case <-cancelChan:
// the documentation for timer recommends draining the channel like
// this.
if !t.Stop() {
<-t.Done()
}
}
}()
vq.outstanding[id] = v
}
// Wait returns the ID and attempt number of the next Volume ready to process.
// If no volume is ready, wait blocks until one is ready. if the volumeQueue
// is stopped, wait returns "", 0
func (vq *VolumeQueue) Wait() (string, uint) {
select {
case v := <-vq.next:
vq.Lock()
defer vq.Unlock()
// we need to be certain that this entry is the same entry that we
// read, because otherwise there may be a race.
//
// it would be possible for the read from next to succeed, but before
// the lock is acquired, a new attempt is enqueued. enqueuing the new
// attempt deletes the old entry before replacing it with the new entry
// and releasing the lock. then, this routine may acquire the lock, and
// delete a new entry.
//
// in practice, it is unclear if this race could happen or would matter
// if it did, but always better safe than sorry.
e, ok := vq.outstanding[v.id]
if ok && e == v {
delete(vq.outstanding, v.id)
}
return v.id, v.attempt
case <-vq.stopChan:
// if the volumeQueue is stopped, then there may be no more writes, so
// we should return an empty result from wait
return "", 0
}
}
// Outstanding returns the number of items outstanding in this queue
func (vq *VolumeQueue) Outstanding() int {
return len(vq.outstanding)
}
// Stop stops the volumeQueue and cancels all outstanding entries. stop may
// only be called once.
func (vq *VolumeQueue) Stop() {
vq.Lock()
defer vq.Unlock()
close(vq.stopChan)
for _, entry := range vq.outstanding {
entry.cancel()
}
return
}

View file

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package xnet

View file

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package xnet

21
vendor/github.com/dustin/go-humanize/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,21 @@
sudo: false
language: go
go:
- 1.3.x
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- master
matrix:
allow_failures:
- go: master
fast_finish: true
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d -s .)
- go tool vet .
- go test -v -race ./...

21
vendor/github.com/dustin/go-humanize/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
<http://www.opensource.org/licenses/mit-license.php>

124
vendor/github.com/dustin/go-humanize/README.markdown generated vendored Normal file
View file

@ -0,0 +1,124 @@
# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
Just a few functions for helping humanize times and sizes.
`go get` it as `github.com/dustin/go-humanize`, import it as
`"github.com/dustin/go-humanize"`, use it as `humanize`.
See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
complete documentation.
## Sizes
This lets you take numbers like `82854982` and convert them to useful
strings like, `83 MB` or `79 MiB` (whichever you prefer).
Example:
```go
fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
```
## Times
This lets you take a `time.Time` and spit it out in relative terms.
For example, `12 seconds ago` or `3 days from now`.
Example:
```go
fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
```
Thanks to Kyle Lemons for the time implementation from an IRC
conversation one day. It's pretty neat.
## Ordinals
From a [mailing list discussion][odisc] where a user wanted to be able
to label ordinals.
0 -> 0th
1 -> 1st
2 -> 2nd
3 -> 3rd
4 -> 4th
[...]
Example:
```go
fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
```
## Commas
Want to shove commas into numbers? Be my guest.
0 -> 0
100 -> 100
1000 -> 1,000
1000000000 -> 1,000,000,000
-100000 -> -100,000
Example:
```go
fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
```
## Ftoa
Nicer float64 formatter that removes trailing zeros.
```go
fmt.Printf("%f", 2.24) // 2.240000
fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
fmt.Printf("%f", 2.0) // 2.000000
fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
```
## SI notation
Format numbers with [SI notation][sinotation].
Example:
```go
humanize.SI(0.00000000223, "M") // 2.23 nM
```
## English-specific functions
The following functions are in the `humanize/english` subpackage.
### Plurals
Simple English pluralization
```go
english.PluralWord(1, "object", "") // object
english.PluralWord(42, "object", "") // objects
english.PluralWord(2, "bus", "") // buses
english.PluralWord(99, "locus", "loci") // loci
english.Plural(1, "object", "") // 1 object
english.Plural(42, "object", "") // 42 objects
english.Plural(2, "bus", "") // 2 buses
english.Plural(99, "locus", "loci") // 99 loci
```
### Word series
Format comma-separated words lists with conjuctions:
```go
english.WordSeries([]string{"foo"}, "and") // foo
english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
```
[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix

31
vendor/github.com/dustin/go-humanize/big.go generated vendored Normal file
View file

@ -0,0 +1,31 @@
package humanize
import (
"math/big"
)
// order of magnitude (to a max order)
func oomm(n, b *big.Int, maxmag int) (float64, int) {
mag := 0
m := &big.Int{}
for n.Cmp(b) >= 0 {
n.DivMod(n, b, m)
mag++
if mag == maxmag && maxmag >= 0 {
break
}
}
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
}
// total order of magnitude
// (same as above, but with no upper limit)
func oom(n, b *big.Int) (float64, int) {
mag := 0
m := &big.Int{}
for n.Cmp(b) >= 0 {
n.DivMod(n, b, m)
mag++
}
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
}

173
vendor/github.com/dustin/go-humanize/bigbytes.go generated vendored Normal file
View file

@ -0,0 +1,173 @@
package humanize
import (
"fmt"
"math/big"
"strings"
"unicode"
)
var (
bigIECExp = big.NewInt(1024)
// BigByte is one byte in bit.Ints
BigByte = big.NewInt(1)
// BigKiByte is 1,024 bytes in bit.Ints
BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
// BigMiByte is 1,024 k bytes in bit.Ints
BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
// BigGiByte is 1,024 m bytes in bit.Ints
BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
// BigTiByte is 1,024 g bytes in bit.Ints
BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
// BigPiByte is 1,024 t bytes in bit.Ints
BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
// BigEiByte is 1,024 p bytes in bit.Ints
BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
// BigZiByte is 1,024 e bytes in bit.Ints
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
// BigYiByte is 1,024 z bytes in bit.Ints
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
)
var (
bigSIExp = big.NewInt(1000)
// BigSIByte is one SI byte in big.Ints
BigSIByte = big.NewInt(1)
// BigKByte is 1,000 SI bytes in big.Ints
BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
// BigMByte is 1,000 SI k bytes in big.Ints
BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
// BigGByte is 1,000 SI m bytes in big.Ints
BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
// BigTByte is 1,000 SI g bytes in big.Ints
BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
// BigPByte is 1,000 SI t bytes in big.Ints
BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
// BigEByte is 1,000 SI p bytes in big.Ints
BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
// BigZByte is 1,000 SI e bytes in big.Ints
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
// BigYByte is 1,000 SI z bytes in big.Ints
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
)
var bigBytesSizeTable = map[string]*big.Int{
"b": BigByte,
"kib": BigKiByte,
"kb": BigKByte,
"mib": BigMiByte,
"mb": BigMByte,
"gib": BigGiByte,
"gb": BigGByte,
"tib": BigTiByte,
"tb": BigTByte,
"pib": BigPiByte,
"pb": BigPByte,
"eib": BigEiByte,
"eb": BigEByte,
"zib": BigZiByte,
"zb": BigZByte,
"yib": BigYiByte,
"yb": BigYByte,
// Without suffix
"": BigByte,
"ki": BigKiByte,
"k": BigKByte,
"mi": BigMiByte,
"m": BigMByte,
"gi": BigGiByte,
"g": BigGByte,
"ti": BigTiByte,
"t": BigTByte,
"pi": BigPiByte,
"p": BigPByte,
"ei": BigEiByte,
"e": BigEByte,
"z": BigZByte,
"zi": BigZiByte,
"y": BigYByte,
"yi": BigYiByte,
}
var ten = big.NewInt(10)
func humanateBigBytes(s, base *big.Int, sizes []string) string {
if s.Cmp(ten) < 0 {
return fmt.Sprintf("%d B", s)
}
c := (&big.Int{}).Set(s)
val, mag := oomm(c, base, len(sizes)-1)
suffix := sizes[mag]
f := "%.0f %s"
if val < 10 {
f = "%.1f %s"
}
return fmt.Sprintf(f, val, suffix)
}
// BigBytes produces a human readable representation of an SI size.
//
// See also: ParseBigBytes.
//
// BigBytes(82854982) -> 83 MB
func BigBytes(s *big.Int) string {
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
return humanateBigBytes(s, bigSIExp, sizes)
}
// BigIBytes produces a human readable representation of an IEC size.
//
// See also: ParseBigBytes.
//
// BigIBytes(82854982) -> 79 MiB
func BigIBytes(s *big.Int) string {
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
return humanateBigBytes(s, bigIECExp, sizes)
}
// ParseBigBytes parses a string representation of bytes into the number
// of bytes it represents.
//
// See also: BigBytes, BigIBytes.
//
// ParseBigBytes("42 MB") -> 42000000, nil
// ParseBigBytes("42 mib") -> 44040192, nil
func ParseBigBytes(s string) (*big.Int, error) {
lastDigit := 0
hasComma := false
for _, r := range s {
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
break
}
if r == ',' {
hasComma = true
}
lastDigit++
}
num := s[:lastDigit]
if hasComma {
num = strings.Replace(num, ",", "", -1)
}
val := &big.Rat{}
_, err := fmt.Sscanf(num, "%f", val)
if err != nil {
return nil, err
}
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
if m, ok := bigBytesSizeTable[extra]; ok {
mv := (&big.Rat{}).SetInt(m)
val.Mul(val, mv)
rv := &big.Int{}
rv.Div(val.Num(), val.Denom())
return rv, nil
}
return nil, fmt.Errorf("unhandled size name: %v", extra)
}

143
vendor/github.com/dustin/go-humanize/bytes.go generated vendored Normal file
View file

@ -0,0 +1,143 @@
package humanize
import (
"fmt"
"math"
"strconv"
"strings"
"unicode"
)
// IEC Sizes.
// kibis of bits
const (
Byte = 1 << (iota * 10)
KiByte
MiByte
GiByte
TiByte
PiByte
EiByte
)
// SI Sizes.
const (
IByte = 1
KByte = IByte * 1000
MByte = KByte * 1000
GByte = MByte * 1000
TByte = GByte * 1000
PByte = TByte * 1000
EByte = PByte * 1000
)
var bytesSizeTable = map[string]uint64{
"b": Byte,
"kib": KiByte,
"kb": KByte,
"mib": MiByte,
"mb": MByte,
"gib": GiByte,
"gb": GByte,
"tib": TiByte,
"tb": TByte,
"pib": PiByte,
"pb": PByte,
"eib": EiByte,
"eb": EByte,
// Without suffix
"": Byte,
"ki": KiByte,
"k": KByte,
"mi": MiByte,
"m": MByte,
"gi": GiByte,
"g": GByte,
"ti": TiByte,
"t": TByte,
"pi": PiByte,
"p": PByte,
"ei": EiByte,
"e": EByte,
}
func logn(n, b float64) float64 {
return math.Log(n) / math.Log(b)
}
func humanateBytes(s uint64, base float64, sizes []string) string {
if s < 10 {
return fmt.Sprintf("%d B", s)
}
e := math.Floor(logn(float64(s), base))
suffix := sizes[int(e)]
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
f := "%.0f %s"
if val < 10 {
f = "%.1f %s"
}
return fmt.Sprintf(f, val, suffix)
}
// Bytes produces a human readable representation of an SI size.
//
// See also: ParseBytes.
//
// Bytes(82854982) -> 83 MB
func Bytes(s uint64) string {
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
return humanateBytes(s, 1000, sizes)
}
// IBytes produces a human readable representation of an IEC size.
//
// See also: ParseBytes.
//
// IBytes(82854982) -> 79 MiB
func IBytes(s uint64) string {
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
return humanateBytes(s, 1024, sizes)
}
// ParseBytes parses a string representation of bytes into the number
// of bytes it represents.
//
// See Also: Bytes, IBytes.
//
// ParseBytes("42 MB") -> 42000000, nil
// ParseBytes("42 mib") -> 44040192, nil
func ParseBytes(s string) (uint64, error) {
lastDigit := 0
hasComma := false
for _, r := range s {
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
break
}
if r == ',' {
hasComma = true
}
lastDigit++
}
num := s[:lastDigit]
if hasComma {
num = strings.Replace(num, ",", "", -1)
}
f, err := strconv.ParseFloat(num, 64)
if err != nil {
return 0, err
}
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
if m, ok := bytesSizeTable[extra]; ok {
f *= float64(m)
if f >= math.MaxUint64 {
return 0, fmt.Errorf("too large: %v", s)
}
return uint64(f), nil
}
return 0, fmt.Errorf("unhandled size name: %v", extra)
}

116
vendor/github.com/dustin/go-humanize/comma.go generated vendored Normal file
View file

@ -0,0 +1,116 @@
package humanize
import (
"bytes"
"math"
"math/big"
"strconv"
"strings"
)
// Comma produces a string form of the given number in base 10 with
// commas after every three orders of magnitude.
//
// e.g. Comma(834142) -> 834,142
func Comma(v int64) string {
sign := ""
// Min int64 can't be negated to a usable value, so it has to be special cased.
if v == math.MinInt64 {
return "-9,223,372,036,854,775,808"
}
if v < 0 {
sign = "-"
v = 0 - v
}
parts := []string{"", "", "", "", "", "", ""}
j := len(parts) - 1
for v > 999 {
parts[j] = strconv.FormatInt(v%1000, 10)
switch len(parts[j]) {
case 2:
parts[j] = "0" + parts[j]
case 1:
parts[j] = "00" + parts[j]
}
v = v / 1000
j--
}
parts[j] = strconv.Itoa(int(v))
return sign + strings.Join(parts[j:], ",")
}
// Commaf produces a string form of the given number in base 10 with
// commas after every three orders of magnitude.
//
// e.g. Commaf(834142.32) -> 834,142.32
func Commaf(v float64) string {
buf := &bytes.Buffer{}
if v < 0 {
buf.Write([]byte{'-'})
v = 0 - v
}
comma := []byte{','}
parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
pos := 0
if len(parts[0])%3 != 0 {
pos += len(parts[0]) % 3
buf.WriteString(parts[0][:pos])
buf.Write(comma)
}
for ; pos < len(parts[0]); pos += 3 {
buf.WriteString(parts[0][pos : pos+3])
buf.Write(comma)
}
buf.Truncate(buf.Len() - 1)
if len(parts) > 1 {
buf.Write([]byte{'.'})
buf.WriteString(parts[1])
}
return buf.String()
}
// CommafWithDigits works like the Commaf but limits the resulting
// string to the given number of decimal places.
//
// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
func CommafWithDigits(f float64, decimals int) string {
return stripTrailingDigits(Commaf(f), decimals)
}
// BigComma produces a string form of the given big.Int in base 10
// with commas after every three orders of magnitude.
func BigComma(b *big.Int) string {
sign := ""
if b.Sign() < 0 {
sign = "-"
b.Abs(b)
}
athousand := big.NewInt(1000)
c := (&big.Int{}).Set(b)
_, m := oom(c, athousand)
parts := make([]string, m+1)
j := len(parts) - 1
mod := &big.Int{}
for b.Cmp(athousand) >= 0 {
b.DivMod(b, athousand, mod)
parts[j] = strconv.FormatInt(mod.Int64(), 10)
switch len(parts[j]) {
case 2:
parts[j] = "0" + parts[j]
case 1:
parts[j] = "00" + parts[j]
}
j--
}
parts[j] = strconv.Itoa(int(b.Int64()))
return sign + strings.Join(parts[j:], ",")
}

40
vendor/github.com/dustin/go-humanize/commaf.go generated vendored Normal file
View file

@ -0,0 +1,40 @@
// +build go1.6
package humanize
import (
"bytes"
"math/big"
"strings"
)
// BigCommaf produces a string form of the given big.Float in base 10
// with commas after every three orders of magnitude.
func BigCommaf(v *big.Float) string {
buf := &bytes.Buffer{}
if v.Sign() < 0 {
buf.Write([]byte{'-'})
v.Abs(v)
}
comma := []byte{','}
parts := strings.Split(v.Text('f', -1), ".")
pos := 0
if len(parts[0])%3 != 0 {
pos += len(parts[0]) % 3
buf.WriteString(parts[0][:pos])
buf.Write(comma)
}
for ; pos < len(parts[0]); pos += 3 {
buf.WriteString(parts[0][pos : pos+3])
buf.Write(comma)
}
buf.Truncate(buf.Len() - 1)
if len(parts) > 1 {
buf.Write([]byte{'.'})
buf.WriteString(parts[1])
}
return buf.String()
}

46
vendor/github.com/dustin/go-humanize/ftoa.go generated vendored Normal file
View file

@ -0,0 +1,46 @@
package humanize
import (
"strconv"
"strings"
)
func stripTrailingZeros(s string) string {
offset := len(s) - 1
for offset > 0 {
if s[offset] == '.' {
offset--
break
}
if s[offset] != '0' {
break
}
offset--
}
return s[:offset+1]
}
func stripTrailingDigits(s string, digits int) string {
if i := strings.Index(s, "."); i >= 0 {
if digits <= 0 {
return s[:i]
}
i++
if i+digits >= len(s) {
return s
}
return s[:i+digits]
}
return s
}
// Ftoa converts a float to a string with no trailing zeros.
func Ftoa(num float64) string {
return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
}
// FtoaWithDigits converts a float to a string but limits the resulting string
// to the given number of decimal places, and no trailing zeros.
func FtoaWithDigits(num float64, digits int) string {
return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
}

8
vendor/github.com/dustin/go-humanize/humanize.go generated vendored Normal file
View file

@ -0,0 +1,8 @@
/*
Package humanize converts boring ugly numbers to human-friendly strings and back.
Durations can be turned into strings such as "3 days ago", numbers
representing sizes like 82854982 into useful strings like, "83 MB" or
"79 MiB" (whichever you prefer).
*/
package humanize

Some files were not shown because too many files have changed in this diff Show more