Merge pull request #33642 from aaronlehmann/vendor-swarmkit-a4bf013
Vendor swarmkit a4bf013
This commit is contained in:
commit
3029e4b73c
39 changed files with 3468 additions and 1711 deletions
|
@ -104,7 +104,7 @@ github.com/containerd/containerd 3addd840653146c90a254301d6c3a663c7fd6429
|
|||
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
|
||||
|
||||
# cluster
|
||||
github.com/docker/swarmkit eb07af52aa2216100cff1ad0b13df48daa8914bf
|
||||
github.com/docker/swarmkit a4bf0135f63fb60f0e76ae81579cde87f580db6e
|
||||
github.com/gogo/protobuf v0.4
|
||||
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
||||
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
|
||||
|
|
20
vendor/github.com/docker/swarmkit/agent/agent.go
generated
vendored
20
vendor/github.com/docker/swarmkit/agent/agent.go
generated
vendored
|
@ -206,6 +206,9 @@ func (a *Agent) run(ctx context.Context) {
|
|||
leaving = a.leaving
|
||||
subscriptions = map[string]context.CancelFunc{}
|
||||
)
|
||||
defer func() {
|
||||
session.close()
|
||||
}()
|
||||
|
||||
if err := a.worker.Init(ctx); err != nil {
|
||||
log.G(ctx).WithError(err).Error("worker initialization failed")
|
||||
|
@ -314,6 +317,9 @@ func (a *Agent) run(ctx context.Context) {
|
|||
if ready != nil {
|
||||
close(ready)
|
||||
}
|
||||
if a.config.SessionTracker != nil {
|
||||
a.config.SessionTracker.SessionEstablished()
|
||||
}
|
||||
ready = nil
|
||||
registered = nil // we only care about this once per session
|
||||
backoff = 0 // reset backoff
|
||||
|
@ -323,6 +329,10 @@ func (a *Agent) run(ctx context.Context) {
|
|||
// but no error was sent. This must be the only place
|
||||
// session.close is called in response to errors, for this to work.
|
||||
if err != nil {
|
||||
if a.config.SessionTracker != nil {
|
||||
a.config.SessionTracker.SessionError(err)
|
||||
}
|
||||
|
||||
log.G(ctx).WithError(err).Error("agent: session failed")
|
||||
backoff = initialSessionFailureBackoff + 2*backoff
|
||||
if backoff > maxSessionFailureBackoff {
|
||||
|
@ -337,6 +347,14 @@ func (a *Agent) run(ctx context.Context) {
|
|||
// if we're here before <-registered, do nothing for that event
|
||||
registered = nil
|
||||
case <-session.closed:
|
||||
if a.config.SessionTracker != nil {
|
||||
if err := a.config.SessionTracker.SessionClosed(); err != nil {
|
||||
log.G(ctx).WithError(err).Error("agent: exiting")
|
||||
a.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
log.G(ctx).Debugf("agent: rebuild session")
|
||||
|
||||
// select a session registration delay from backoff range.
|
||||
|
@ -365,8 +383,6 @@ func (a *Agent) run(ctx context.Context) {
|
|||
if a.err == nil {
|
||||
a.err = ctx.Err()
|
||||
}
|
||||
session.close()
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
17
vendor/github.com/docker/swarmkit/agent/config.go
generated
vendored
17
vendor/github.com/docker/swarmkit/agent/config.go
generated
vendored
|
@ -43,6 +43,10 @@ type Config struct {
|
|||
|
||||
// NodeTLSInfo contains the starting node TLS info to bootstrap into the agent
|
||||
NodeTLSInfo *api.NodeTLSInfo
|
||||
|
||||
// SessionTracker, if provided, will have its SessionClosed and SessionError methods called
|
||||
// when sessions close and error.
|
||||
SessionTracker SessionTracker
|
||||
}
|
||||
|
||||
func (c *Config) validate() error {
|
||||
|
@ -64,3 +68,16 @@ func (c *Config) validate() error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SessionTracker gets notified when sessions close and error
|
||||
type SessionTracker interface {
|
||||
// SessionClosed is called whenever a session is closed - if the function errors, the agent
|
||||
// will exit with the returned error. Otherwise the agent can continue and rebuild a new session.
|
||||
SessionClosed() error
|
||||
|
||||
// SessionError is called whenever a session errors
|
||||
SessionError(err error)
|
||||
|
||||
// SessionEstablished is called whenever a session is established
|
||||
SessionEstablished()
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/swarmkit/agent/session.go
generated
vendored
2
vendor/github.com/docker/swarmkit/agent/session.go
generated
vendored
|
@ -409,7 +409,7 @@ func (s *session) sendError(err error) {
|
|||
}
|
||||
|
||||
// close closing session. It should be called only in <-session.errs branch
|
||||
// of event loop.
|
||||
// of event loop, or when cleaning up the agent.
|
||||
func (s *session) close() error {
|
||||
s.closeOnce.Do(func() {
|
||||
s.cancel()
|
||||
|
|
111
vendor/github.com/docker/swarmkit/api/genericresource/helpers.go
generated
vendored
Normal file
111
vendor/github.com/docker/swarmkit/api/genericresource/helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
package genericresource
|
||||
|
||||
import (
|
||||
"github.com/docker/swarmkit/api"
|
||||
)
|
||||
|
||||
// NewSet creates a set object
|
||||
func NewSet(key string, vals ...string) []*api.GenericResource {
|
||||
rs := make([]*api.GenericResource, 0, len(vals))
|
||||
|
||||
for _, v := range vals {
|
||||
rs = append(rs, NewString(key, v))
|
||||
}
|
||||
|
||||
return rs
|
||||
}
|
||||
|
||||
// NewString creates a String resource
|
||||
func NewString(key, val string) *api.GenericResource {
|
||||
return &api.GenericResource{
|
||||
Resource: &api.GenericResource_Str{
|
||||
Str: &api.GenericString{
|
||||
Kind: key,
|
||||
Value: val,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewDiscrete creates a Discrete resource
|
||||
func NewDiscrete(key string, val int64) *api.GenericResource {
|
||||
return &api.GenericResource{
|
||||
Resource: &api.GenericResource_Discrete{
|
||||
Discrete: &api.GenericDiscrete{
|
||||
Kind: key,
|
||||
Value: val,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetResource returns resources from the "resources" parameter matching the kind key
|
||||
func GetResource(kind string, resources []*api.GenericResource) []*api.GenericResource {
|
||||
var res []*api.GenericResource
|
||||
|
||||
for _, r := range resources {
|
||||
if Kind(r) != kind {
|
||||
continue
|
||||
}
|
||||
|
||||
res = append(res, r)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// ConsumeNodeResources removes "res" from nodeAvailableResources
|
||||
func ConsumeNodeResources(nodeAvailableResources *[]*api.GenericResource, res []*api.GenericResource) {
|
||||
if nodeAvailableResources == nil {
|
||||
return
|
||||
}
|
||||
|
||||
w := 0
|
||||
|
||||
loop:
|
||||
for _, na := range *nodeAvailableResources {
|
||||
for _, r := range res {
|
||||
if Kind(na) != Kind(r) {
|
||||
continue
|
||||
}
|
||||
|
||||
if remove(na, r) {
|
||||
continue loop
|
||||
}
|
||||
// If this wasn't the right element then
|
||||
// we need to continue
|
||||
}
|
||||
|
||||
(*nodeAvailableResources)[w] = na
|
||||
w++
|
||||
}
|
||||
|
||||
*nodeAvailableResources = (*nodeAvailableResources)[:w]
|
||||
}
|
||||
|
||||
// Returns true if the element is to be removed from the list
|
||||
func remove(na, r *api.GenericResource) bool {
|
||||
switch tr := r.Resource.(type) {
|
||||
case *api.GenericResource_Discrete:
|
||||
if na.GetDiscrete() == nil {
|
||||
return false // Type change, ignore
|
||||
}
|
||||
|
||||
na.GetDiscrete().Value -= tr.Discrete.Value
|
||||
if na.GetDiscrete().Value <= 0 {
|
||||
return true
|
||||
}
|
||||
case *api.GenericResource_Str:
|
||||
if na.GetStr() == nil {
|
||||
return false // Type change, ignore
|
||||
}
|
||||
|
||||
if tr.Str.Value != na.GetStr().Value {
|
||||
return false // not the right item, ignore
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
47
vendor/github.com/docker/swarmkit/api/genericresource/parse.go
generated
vendored
Normal file
47
vendor/github.com/docker/swarmkit/api/genericresource/parse.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
package genericresource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
)
|
||||
|
||||
func newParseError(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("could not parse GenericResource: "+format, args...)
|
||||
}
|
||||
|
||||
// Parse parses the GenericResource resources given by the arguments
|
||||
func Parse(cmd string) ([]*api.GenericResource, error) {
|
||||
var rs []*api.GenericResource
|
||||
|
||||
for _, term := range strings.Split(cmd, ";") {
|
||||
kva := strings.Split(term, "=")
|
||||
if len(kva) != 2 {
|
||||
return nil, newParseError("incorrect term %s, missing '=' or malformed expr", term)
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(kva[0])
|
||||
val := strings.TrimSpace(kva[1])
|
||||
|
||||
u, err := strconv.ParseInt(val, 10, 64)
|
||||
if err == nil {
|
||||
if u < 0 {
|
||||
return nil, newParseError("cannot ask for negative resource %s", key)
|
||||
}
|
||||
rs = append(rs, NewDiscrete(key, u))
|
||||
continue
|
||||
}
|
||||
|
||||
if len(val) > 2 && val[0] == '{' && val[len(val)-1] == '}' {
|
||||
val = val[1 : len(val)-1]
|
||||
rs = append(rs, NewSet(key, strings.Split(val, ",")...)...)
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, newParseError("could not parse expression '%s'", term)
|
||||
}
|
||||
|
||||
return rs, nil
|
||||
}
|
202
vendor/github.com/docker/swarmkit/api/genericresource/resource_management.go
generated
vendored
Normal file
202
vendor/github.com/docker/swarmkit/api/genericresource/resource_management.go
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
package genericresource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/docker/swarmkit/api"
|
||||
)
|
||||
|
||||
// Claim assigns GenericResources to a task by taking them from the
|
||||
// node's GenericResource list and storing them in the task's available list
|
||||
func Claim(nodeAvailableResources, taskAssigned *[]*api.GenericResource,
|
||||
taskReservations []*api.GenericResource) error {
|
||||
var resSelected []*api.GenericResource
|
||||
|
||||
for _, res := range taskReservations {
|
||||
tr := res.GetDiscrete()
|
||||
if tr == nil {
|
||||
return fmt.Errorf("task should only hold Discrete type")
|
||||
}
|
||||
|
||||
// Select the resources
|
||||
nrs, err := selectNodeResources(*nodeAvailableResources, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resSelected = append(resSelected, nrs...)
|
||||
}
|
||||
|
||||
ClaimResources(nodeAvailableResources, taskAssigned, resSelected)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClaimResources adds the specified resources to the task's list
|
||||
// and removes them from the node's generic resource list
|
||||
func ClaimResources(nodeAvailableResources, taskAssigned *[]*api.GenericResource,
|
||||
resSelected []*api.GenericResource) {
|
||||
*taskAssigned = append(*taskAssigned, resSelected...)
|
||||
ConsumeNodeResources(nodeAvailableResources, resSelected)
|
||||
}
|
||||
|
||||
func selectNodeResources(nodeRes []*api.GenericResource,
|
||||
tr *api.GenericDiscrete) ([]*api.GenericResource, error) {
|
||||
var nrs []*api.GenericResource
|
||||
|
||||
for _, res := range nodeRes {
|
||||
if Kind(res) != tr.Kind {
|
||||
continue
|
||||
}
|
||||
|
||||
switch nr := res.Resource.(type) {
|
||||
case *api.GenericResource_Discrete:
|
||||
if nr.Discrete.Value >= tr.Value && tr.Value != 0 {
|
||||
nrs = append(nrs, NewDiscrete(tr.Kind, tr.Value))
|
||||
}
|
||||
|
||||
return nrs, nil
|
||||
case *api.GenericResource_Str:
|
||||
nrs = append(nrs, res.Copy())
|
||||
|
||||
if int64(len(nrs)) == tr.Value {
|
||||
return nrs, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(nrs) == 0 {
|
||||
return nil, fmt.Errorf("not enough resources available for task reservations: %+v", tr)
|
||||
}
|
||||
|
||||
return nrs, nil
|
||||
}
|
||||
|
||||
// Reclaim adds the resources taken by the task to the node's store
|
||||
func Reclaim(nodeAvailableResources *[]*api.GenericResource, taskAssigned, nodeRes []*api.GenericResource) error {
|
||||
err := reclaimResources(nodeAvailableResources, taskAssigned)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sanitize(nodeRes, nodeAvailableResources)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func reclaimResources(nodeAvailableResources *[]*api.GenericResource, taskAssigned []*api.GenericResource) error {
|
||||
// The node could have been updated
|
||||
if nodeAvailableResources == nil {
|
||||
return fmt.Errorf("node no longer has any resources")
|
||||
}
|
||||
|
||||
for _, res := range taskAssigned {
|
||||
switch tr := res.Resource.(type) {
|
||||
case *api.GenericResource_Discrete:
|
||||
nrs := GetResource(tr.Discrete.Kind, *nodeAvailableResources)
|
||||
|
||||
// If the resource went down to 0 it's no longer in the
|
||||
// available list
|
||||
if len(nrs) == 0 {
|
||||
*nodeAvailableResources = append(*nodeAvailableResources, res.Copy())
|
||||
}
|
||||
|
||||
if len(nrs) != 1 {
|
||||
continue // Type change
|
||||
}
|
||||
|
||||
nr := nrs[0].GetDiscrete()
|
||||
if nr == nil {
|
||||
continue // Type change
|
||||
}
|
||||
|
||||
nr.Value += tr.Discrete.Value
|
||||
case *api.GenericResource_Str:
|
||||
*nodeAvailableResources = append(*nodeAvailableResources, res.Copy())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sanitize checks that nodeAvailableResources does not add resources unknown
|
||||
// to the nodeSpec (nodeRes) or goes over the integer bound specified
|
||||
// by the spec.
|
||||
// Note this is because the user is able to update a node's resources
|
||||
func sanitize(nodeRes []*api.GenericResource, nodeAvailableResources *[]*api.GenericResource) {
|
||||
// - We add the sanitized resources at the end, after
|
||||
// having removed the elements from the list
|
||||
|
||||
// - When a set changes to a Discrete we also need
|
||||
// to make sure that we don't add the Discrete multiple
|
||||
// time hence, the need of a map to remember that
|
||||
var sanitized []*api.GenericResource
|
||||
kindSanitized := make(map[string]struct{})
|
||||
w := 0
|
||||
|
||||
for _, na := range *nodeAvailableResources {
|
||||
ok, nrs := sanitizeResource(nodeRes, na)
|
||||
if !ok {
|
||||
if _, ok = kindSanitized[Kind(na)]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
kindSanitized[Kind(na)] = struct{}{}
|
||||
sanitized = append(sanitized, nrs...)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
(*nodeAvailableResources)[w] = na
|
||||
w++
|
||||
}
|
||||
|
||||
*nodeAvailableResources = (*nodeAvailableResources)[:w]
|
||||
*nodeAvailableResources = append(*nodeAvailableResources, sanitized...)
|
||||
}
|
||||
|
||||
// Returns true if the element is in nodeRes and "sane"
|
||||
// Returns false if the element isn't in nodeRes and "sane" and the element(s) that should be replacing it
|
||||
func sanitizeResource(nodeRes []*api.GenericResource, res *api.GenericResource) (ok bool, nrs []*api.GenericResource) {
|
||||
switch na := res.Resource.(type) {
|
||||
case *api.GenericResource_Discrete:
|
||||
nrs := GetResource(na.Discrete.Kind, nodeRes)
|
||||
|
||||
// Type change or removed: reset
|
||||
if len(nrs) != 1 {
|
||||
return false, nrs
|
||||
}
|
||||
|
||||
// Type change: reset
|
||||
nr := nrs[0].GetDiscrete()
|
||||
if nr == nil {
|
||||
return false, nrs
|
||||
}
|
||||
|
||||
// Amount change: reset
|
||||
if na.Discrete.Value > nr.Value {
|
||||
return false, nrs
|
||||
}
|
||||
case *api.GenericResource_Str:
|
||||
nrs := GetResource(na.Str.Kind, nodeRes)
|
||||
|
||||
// Type change
|
||||
if len(nrs) == 0 {
|
||||
return false, nrs
|
||||
}
|
||||
|
||||
for _, nr := range nrs {
|
||||
// Type change: reset
|
||||
if nr.GetDiscrete() != nil {
|
||||
return false, nrs
|
||||
}
|
||||
|
||||
if na.Str.Value == nr.GetStr().Value {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Removed
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
54
vendor/github.com/docker/swarmkit/api/genericresource/string.go
generated
vendored
Normal file
54
vendor/github.com/docker/swarmkit/api/genericresource/string.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package genericresource
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
)
|
||||
|
||||
func discreteToString(d *api.GenericResource_Discrete) string {
|
||||
return strconv.FormatInt(d.Discrete.Value, 10)
|
||||
}
|
||||
|
||||
// Kind returns the kind key as a string
|
||||
func Kind(res *api.GenericResource) string {
|
||||
switch r := res.Resource.(type) {
|
||||
case *api.GenericResource_Discrete:
|
||||
return r.Discrete.Kind
|
||||
case *api.GenericResource_Str:
|
||||
return r.Str.Kind
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Value returns the value key as a string
|
||||
func Value(res *api.GenericResource) string {
|
||||
switch res := res.Resource.(type) {
|
||||
case *api.GenericResource_Discrete:
|
||||
return discreteToString(res)
|
||||
case *api.GenericResource_Str:
|
||||
return res.Str.Value
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// EnvFormat returns the environment string version of the resource
|
||||
func EnvFormat(res []*api.GenericResource, prefix string) []string {
|
||||
envs := make(map[string][]string)
|
||||
for _, v := range res {
|
||||
key := Kind(v)
|
||||
val := Value(v)
|
||||
envs[key] = append(envs[key], val)
|
||||
}
|
||||
|
||||
env := make([]string, 0, len(res))
|
||||
for k, v := range envs {
|
||||
k = strings.ToUpper(prefix + "_" + k)
|
||||
env = append(env, k+"="+strings.Join(v, ","))
|
||||
}
|
||||
|
||||
return env
|
||||
}
|
84
vendor/github.com/docker/swarmkit/api/genericresource/validate.go
generated
vendored
Normal file
84
vendor/github.com/docker/swarmkit/api/genericresource/validate.go
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
package genericresource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/docker/swarmkit/api"
|
||||
)
|
||||
|
||||
// ValidateTask validates that the task only uses integers
|
||||
// for generic resources
|
||||
func ValidateTask(resources *api.Resources) error {
|
||||
for _, v := range resources.Generic {
|
||||
if v.GetDiscrete() != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid argument for resource %s", Kind(v))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasEnough returns true if node can satisfy the task's GenericResource request
|
||||
func HasEnough(nodeRes []*api.GenericResource, taskRes *api.GenericResource) (bool, error) {
|
||||
t := taskRes.GetDiscrete()
|
||||
if t == nil {
|
||||
return false, fmt.Errorf("task should only hold Discrete type")
|
||||
}
|
||||
|
||||
if nodeRes == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
nrs := GetResource(t.Kind, nodeRes)
|
||||
if len(nrs) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
switch nr := nrs[0].Resource.(type) {
|
||||
case *api.GenericResource_Discrete:
|
||||
if t.Value > nr.Discrete.Value {
|
||||
return false, nil
|
||||
}
|
||||
case *api.GenericResource_Str:
|
||||
if t.Value > int64(len(nrs)) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// HasResource checks if there is enough "res" in the "resources" argument
|
||||
func HasResource(res *api.GenericResource, resources []*api.GenericResource) bool {
|
||||
for _, r := range resources {
|
||||
if Kind(res) != Kind(r) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch rtype := r.Resource.(type) {
|
||||
case *api.GenericResource_Discrete:
|
||||
if res.GetDiscrete() == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if res.GetDiscrete().Value < rtype.Discrete.Value {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
case *api.GenericResource_Str:
|
||||
if res.GetStr() == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if res.GetStr().Value != rtype.Str.Value {
|
||||
continue
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
248
vendor/github.com/docker/swarmkit/api/objects.pb.go
generated
vendored
248
vendor/github.com/docker/swarmkit/api/objects.pb.go
generated
vendored
|
@ -198,7 +198,8 @@ type Task struct {
|
|||
// such a cluster default or policy-based value.
|
||||
//
|
||||
// If not present, the daemon's default will be used.
|
||||
LogDriver *Driver `protobuf:"bytes,13,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"`
|
||||
LogDriver *Driver `protobuf:"bytes,13,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"`
|
||||
AssignedGenericResources []*GenericResource `protobuf:"bytes,15,rep,name=assigned_generic_resources,json=assignedGenericResources" json:"assigned_generic_resources,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Task) Reset() { *m = Task{} }
|
||||
|
@ -529,6 +530,14 @@ func (m *Task) CopyFrom(src interface{}) {
|
|||
m.LogDriver = &Driver{}
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(m.LogDriver, o.LogDriver)
|
||||
}
|
||||
if o.AssignedGenericResources != nil {
|
||||
m.AssignedGenericResources = make([]*GenericResource, len(o.AssignedGenericResources))
|
||||
for i := range m.AssignedGenericResources {
|
||||
m.AssignedGenericResources[i] = &GenericResource{}
|
||||
github_com_docker_swarmkit_api_deepcopy.Copy(m.AssignedGenericResources[i], o.AssignedGenericResources[i])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m *NetworkAttachment) Copy() *NetworkAttachment {
|
||||
|
@ -1140,6 +1149,18 @@ func (m *Task) MarshalTo(dAtA []byte) (int, error) {
|
|||
}
|
||||
i += n26
|
||||
}
|
||||
if len(m.AssignedGenericResources) > 0 {
|
||||
for _, msg := range m.AssignedGenericResources {
|
||||
dAtA[i] = 0x7a
|
||||
i++
|
||||
i = encodeVarintObjects(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -1771,6 +1792,12 @@ func (m *Task) Size() (n int) {
|
|||
l = m.SpecVersion.Size()
|
||||
n += 1 + l + sovObjects(uint64(l))
|
||||
}
|
||||
if len(m.AssignedGenericResources) > 0 {
|
||||
for _, e := range m.AssignedGenericResources {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovObjects(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -4419,6 +4446,7 @@ func (this *Task) String() string {
|
|||
`Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`,
|
||||
`LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`,
|
||||
`SpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.SpecVersion), "Version", "Version", 1) + `,`,
|
||||
`AssignedGenericResources:` + strings.Replace(fmt.Sprintf("%v", this.AssignedGenericResources), "GenericResource", "GenericResource", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -6001,6 +6029,37 @@ func (m *Task) Unmarshal(dAtA []byte) error {
|
|||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 15:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field AssignedGenericResources", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowObjects
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthObjects
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.AssignedGenericResources = append(m.AssignedGenericResources, &GenericResource{})
|
||||
if err := m.AssignedGenericResources[len(m.AssignedGenericResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipObjects(dAtA[iNdEx:])
|
||||
|
@ -7630,96 +7689,99 @@ var (
|
|||
func init() { proto.RegisterFile("objects.proto", fileDescriptorObjects) }
|
||||
|
||||
var fileDescriptorObjects = []byte{
|
||||
// 1451 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0x1b, 0x45,
|
||||
0x14, 0xef, 0xda, 0x1b, 0x7f, 0x3c, 0x27, 0x56, 0x98, 0x86, 0xe0, 0x9a, 0x60, 0x07, 0x57, 0xa0,
|
||||
0x0a, 0x55, 0x4e, 0x09, 0x05, 0xa5, 0x81, 0xd2, 0xda, 0x49, 0xd4, 0x5a, 0xa5, 0x34, 0x9a, 0x96,
|
||||
0x96, 0x9b, 0x99, 0xec, 0x4e, 0xdd, 0xc5, 0xeb, 0x9d, 0xd5, 0xce, 0xd8, 0xc5, 0x37, 0xce, 0xf9,
|
||||
0x07, 0x72, 0xe3, 0xd0, 0x13, 0x77, 0xb8, 0x70, 0xe1, 0xc0, 0xa9, 0x47, 0x4e, 0x88, 0x53, 0x44,
|
||||
0xfd, 0x5f, 0x20, 0x71, 0x40, 0x33, 0x3b, 0x6b, 0x6f, 0xea, 0x75, 0x92, 0xa2, 0x2a, 0xe2, 0xe4,
|
||||
0xf9, 0xf8, 0xfd, 0xde, 0xd7, 0xbc, 0xf7, 0x66, 0xd6, 0xb0, 0xc0, 0xf6, 0xbe, 0xa5, 0x96, 0xe0,
|
||||
0x75, 0x3f, 0x60, 0x82, 0x21, 0x64, 0x33, 0xab, 0x4b, 0x83, 0x3a, 0x7f, 0x4a, 0x82, 0x5e, 0xd7,
|
||||
0x11, 0xf5, 0xc1, 0x87, 0xe5, 0x82, 0x18, 0xfa, 0x54, 0x03, 0xca, 0x05, 0xee, 0x53, 0x2b, 0x9a,
|
||||
0x54, 0x3b, 0x8c, 0x75, 0x5c, 0xba, 0xa6, 0x66, 0x7b, 0xfd, 0xc7, 0x6b, 0xc2, 0xe9, 0x51, 0x2e,
|
||||
0x48, 0xcf, 0xd7, 0x80, 0xa5, 0x0e, 0xeb, 0x30, 0x35, 0x5c, 0x93, 0x23, 0xbd, 0x7a, 0xe1, 0x65,
|
||||
0x1a, 0xf1, 0x86, 0x7a, 0xeb, 0xbc, 0xef, 0xf6, 0x3b, 0x8e, 0xb7, 0x16, 0xfe, 0x84, 0x8b, 0xb5,
|
||||
0x5f, 0x0c, 0x30, 0xef, 0x52, 0x41, 0xd0, 0xa7, 0x90, 0x1d, 0xd0, 0x80, 0x3b, 0xcc, 0x2b, 0x19,
|
||||
0xab, 0xc6, 0xa5, 0xc2, 0xfa, 0xdb, 0xf5, 0x69, 0x7b, 0xeb, 0x0f, 0x43, 0x48, 0xd3, 0x7c, 0x7e,
|
||||
0x58, 0x3d, 0x87, 0x23, 0x06, 0xba, 0x06, 0x60, 0x05, 0x94, 0x08, 0x6a, 0xb7, 0x89, 0x28, 0xa5,
|
||||
0x14, 0xbf, 0x5c, 0x0f, 0x4d, 0xa9, 0x47, 0xa6, 0xd4, 0x1f, 0x44, 0x1e, 0xe0, 0xbc, 0x46, 0x37,
|
||||
0x84, 0xa4, 0xf6, 0x7d, 0x3b, 0xa2, 0xa6, 0x4f, 0xa6, 0x6a, 0x74, 0x43, 0xd4, 0x7e, 0x32, 0xc1,
|
||||
0xfc, 0x92, 0xd9, 0x14, 0x2d, 0x43, 0xca, 0xb1, 0x95, 0xd9, 0xf9, 0x66, 0x66, 0x74, 0x58, 0x4d,
|
||||
0xb5, 0xb6, 0x71, 0xca, 0xb1, 0xd1, 0x3a, 0x98, 0x3d, 0x2a, 0x88, 0x36, 0xa8, 0x94, 0xe4, 0x90,
|
||||
0xf4, 0x5d, 0x7b, 0xa3, 0xb0, 0xe8, 0x13, 0x30, 0xe5, 0x31, 0x68, 0x4b, 0x56, 0x92, 0x38, 0x52,
|
||||
0xe7, 0x7d, 0x9f, 0x5a, 0x11, 0x4f, 0xe2, 0xd1, 0x0e, 0x14, 0x6c, 0xca, 0xad, 0xc0, 0xf1, 0x85,
|
||||
0x8c, 0xa1, 0xa9, 0xe8, 0x17, 0x67, 0xd1, 0xb7, 0x27, 0x50, 0x1c, 0xe7, 0xa1, 0xcf, 0x20, 0xc3,
|
||||
0x05, 0x11, 0x7d, 0x5e, 0x9a, 0x53, 0x12, 0x2a, 0x33, 0x0d, 0x50, 0x28, 0x6d, 0x82, 0xe6, 0xa0,
|
||||
0xdb, 0x50, 0xec, 0x11, 0x8f, 0x74, 0x68, 0xd0, 0xd6, 0x52, 0x32, 0x4a, 0xca, 0xbb, 0x89, 0xae,
|
||||
0x87, 0xc8, 0x50, 0x10, 0x5e, 0xe8, 0xc5, 0xa7, 0x68, 0x07, 0x80, 0x08, 0x41, 0xac, 0x27, 0x3d,
|
||||
0xea, 0x89, 0x52, 0x56, 0x49, 0x79, 0x2f, 0xd1, 0x16, 0x2a, 0x9e, 0xb2, 0xa0, 0xdb, 0x18, 0x83,
|
||||
0x71, 0x8c, 0x88, 0x6e, 0x41, 0xc1, 0xa2, 0x81, 0x70, 0x1e, 0x3b, 0x16, 0x11, 0xb4, 0x94, 0x53,
|
||||
0x72, 0xaa, 0x49, 0x72, 0xb6, 0x26, 0x30, 0xed, 0x54, 0x9c, 0x89, 0xae, 0x80, 0x19, 0x30, 0x97,
|
||||
0x96, 0xf2, 0xab, 0xc6, 0xa5, 0xe2, 0xec, 0x63, 0xc1, 0xcc, 0xa5, 0x58, 0x21, 0x37, 0x97, 0xf7,
|
||||
0x0f, 0x6a, 0x08, 0x16, 0x73, 0xc6, 0xa2, 0xa1, 0x52, 0xc3, 0xb8, 0x62, 0x7c, 0x6d, 0x7c, 0x63,
|
||||
0xd4, 0xfe, 0x49, 0x43, 0xf6, 0x3e, 0x0d, 0x06, 0x8e, 0xf5, 0x7a, 0x13, 0xe7, 0xda, 0x91, 0xc4,
|
||||
0x49, 0xf4, 0x51, 0xab, 0x9d, 0xca, 0x9d, 0x0d, 0xc8, 0x51, 0xcf, 0xf6, 0x99, 0xe3, 0x09, 0x9d,
|
||||
0x38, 0x89, 0x0e, 0xee, 0x68, 0x0c, 0x1e, 0xa3, 0xd1, 0x0e, 0x2c, 0x84, 0xf5, 0xd0, 0x3e, 0x92,
|
||||
0x35, 0xab, 0x49, 0xf4, 0xaf, 0x14, 0x50, 0x1f, 0xf7, 0x7c, 0x3f, 0x36, 0x43, 0xdb, 0xb0, 0xe0,
|
||||
0x07, 0x74, 0xe0, 0xb0, 0x3e, 0x6f, 0x2b, 0x27, 0x32, 0xa7, 0x72, 0x02, 0xcf, 0x47, 0x2c, 0x39,
|
||||
0x43, 0x9f, 0xc3, 0xbc, 0x24, 0xb7, 0xa3, 0x3e, 0x02, 0x27, 0xf6, 0x11, 0xac, 0x5a, 0x9e, 0x9e,
|
||||
0xa0, 0x7b, 0xf0, 0xe6, 0x11, 0x2b, 0xc6, 0x82, 0x0a, 0x27, 0x0b, 0x3a, 0x1f, 0xb7, 0x44, 0x2f,
|
||||
0x6e, 0xa2, 0xfd, 0x83, 0x5a, 0x11, 0xe6, 0xe3, 0x29, 0x50, 0xfb, 0x21, 0x05, 0xb9, 0x28, 0x90,
|
||||
0xe8, 0xaa, 0x3e, 0x33, 0x63, 0x76, 0xd4, 0x22, 0xac, 0xf2, 0x37, 0x3c, 0xae, 0xab, 0x30, 0xe7,
|
||||
0xb3, 0x40, 0xf0, 0x52, 0x6a, 0x35, 0x3d, 0xab, 0x44, 0x77, 0x59, 0x20, 0xb6, 0x98, 0xf7, 0xd8,
|
||||
0xe9, 0xe0, 0x10, 0x8c, 0x1e, 0x41, 0x61, 0xe0, 0x04, 0xa2, 0x4f, 0xdc, 0xb6, 0xe3, 0xf3, 0x52,
|
||||
0x5a, 0x71, 0xdf, 0x3f, 0x4e, 0x65, 0xfd, 0x61, 0x88, 0x6f, 0xed, 0x36, 0x8b, 0xa3, 0xc3, 0x2a,
|
||||
0x8c, 0xa7, 0x1c, 0x83, 0x16, 0xd5, 0xf2, 0x79, 0xf9, 0x2e, 0xe4, 0xc7, 0x3b, 0xe8, 0x32, 0x80,
|
||||
0x17, 0x56, 0x64, 0x7b, 0x9c, 0xd9, 0x0b, 0xa3, 0xc3, 0x6a, 0x5e, 0xd7, 0x69, 0x6b, 0x1b, 0xe7,
|
||||
0x35, 0xa0, 0x65, 0x23, 0x04, 0x26, 0xb1, 0xed, 0x40, 0xe5, 0x79, 0x1e, 0xab, 0x71, 0xed, 0xc7,
|
||||
0x0c, 0x98, 0x0f, 0x08, 0xef, 0x9e, 0x75, 0x57, 0x95, 0x3a, 0xa7, 0x2a, 0xe3, 0x32, 0x00, 0x0f,
|
||||
0xf3, 0x4d, 0xba, 0x63, 0x4e, 0xdc, 0xd1, 0x59, 0x28, 0xdd, 0xd1, 0x80, 0xd0, 0x1d, 0xee, 0x32,
|
||||
0xa1, 0x8a, 0xc0, 0xc4, 0x6a, 0x8c, 0x2e, 0x42, 0xd6, 0x63, 0xb6, 0xa2, 0x67, 0x14, 0x1d, 0x46,
|
||||
0x87, 0xd5, 0x8c, 0xec, 0x15, 0xad, 0x6d, 0x9c, 0x91, 0x5b, 0x2d, 0x5b, 0xb6, 0x29, 0xe2, 0x79,
|
||||
0x4c, 0x10, 0xd9, 0x83, 0xb9, 0x6e, 0x77, 0x89, 0xd9, 0xdf, 0x98, 0xc0, 0xa2, 0x36, 0x15, 0x63,
|
||||
0xa2, 0x87, 0x70, 0x3e, 0xb2, 0x37, 0x2e, 0x30, 0xf7, 0x2a, 0x02, 0x91, 0x96, 0x10, 0xdb, 0x89,
|
||||
0x5d, 0x0b, 0xf9, 0xd9, 0xd7, 0x82, 0x8a, 0x60, 0xd2, 0xb5, 0xd0, 0x84, 0x05, 0x9b, 0x72, 0x27,
|
||||
0xa0, 0xb6, 0x6a, 0x13, 0x54, 0x55, 0x66, 0x71, 0xfd, 0x9d, 0xe3, 0x84, 0x50, 0x3c, 0xaf, 0x39,
|
||||
0x6a, 0x86, 0x1a, 0x90, 0xd3, 0x79, 0xc3, 0x4b, 0x05, 0x95, 0xbb, 0xa7, 0xbc, 0x0e, 0xc6, 0xb4,
|
||||
0x23, 0x6d, 0x6e, 0xfe, 0x95, 0xda, 0xdc, 0x35, 0x00, 0x97, 0x75, 0xda, 0x76, 0xe0, 0x0c, 0x68,
|
||||
0x50, 0x5a, 0xd0, 0x8f, 0x84, 0x04, 0xee, 0xb6, 0x42, 0xe0, 0xbc, 0xcb, 0x3a, 0xe1, 0x70, 0xaa,
|
||||
0x29, 0x15, 0x5f, 0xad, 0x29, 0x6d, 0x96, 0xf7, 0x0f, 0x6a, 0xcb, 0xb0, 0x14, 0xef, 0x21, 0x1b,
|
||||
0xc6, 0x4d, 0xe3, 0xb6, 0xb1, 0x6b, 0xd4, 0x7e, 0x4b, 0xc1, 0x1b, 0x53, 0x0e, 0xa3, 0x8f, 0x21,
|
||||
0xab, 0x5d, 0x3e, 0xee, 0x25, 0xa5, 0x79, 0x38, 0xc2, 0xa2, 0x15, 0xc8, 0xcb, 0xfa, 0xa3, 0x9c,
|
||||
0xd3, 0xb0, 0xb3, 0xe4, 0xf1, 0x64, 0x01, 0x95, 0x20, 0x4b, 0x5c, 0x87, 0xc8, 0xbd, 0xb4, 0xda,
|
||||
0x8b, 0xa6, 0xa8, 0x0f, 0xcb, 0x61, 0x5c, 0xda, 0x93, 0x7b, 0xb7, 0xcd, 0x7c, 0xc1, 0x4b, 0xa6,
|
||||
0x3a, 0xa6, 0x1b, 0xa7, 0x3a, 0x26, 0x1d, 0xb9, 0xc9, 0xc2, 0x3d, 0x5f, 0xf0, 0x1d, 0x4f, 0x04,
|
||||
0x43, 0xbc, 0x64, 0x27, 0x6c, 0x95, 0x6f, 0xc1, 0x85, 0x99, 0x14, 0xb4, 0x08, 0xe9, 0x2e, 0x1d,
|
||||
0x86, 0xbd, 0x03, 0xcb, 0x21, 0x5a, 0x82, 0xb9, 0x01, 0x71, 0xfb, 0x54, 0xb7, 0x9a, 0x70, 0xb2,
|
||||
0x99, 0xda, 0x30, 0x6a, 0xcf, 0x52, 0x90, 0xd5, 0xe6, 0x9c, 0xf5, 0x7d, 0xac, 0xd5, 0x4e, 0x75,
|
||||
0x9d, 0xeb, 0x30, 0xaf, 0x43, 0x1a, 0x96, 0x8b, 0x79, 0x62, 0xc2, 0x15, 0x42, 0x7c, 0x58, 0x2a,
|
||||
0xd7, 0xc1, 0x74, 0x7c, 0xd2, 0xd3, 0x77, 0x71, 0xa2, 0xe6, 0xd6, 0x6e, 0xe3, 0xee, 0x3d, 0x3f,
|
||||
0xac, 0xfa, 0xdc, 0xe8, 0xb0, 0x6a, 0xca, 0x05, 0xac, 0x68, 0x89, 0xb7, 0xd6, 0xcf, 0x73, 0x90,
|
||||
0xdd, 0x72, 0xfb, 0x5c, 0xd0, 0xe0, 0xac, 0x83, 0xa4, 0xd5, 0x4e, 0x05, 0x69, 0x0b, 0xb2, 0x01,
|
||||
0x63, 0xa2, 0x6d, 0x91, 0xe3, 0xe2, 0x83, 0x19, 0x13, 0x5b, 0x8d, 0x66, 0x51, 0x12, 0x65, 0xe3,
|
||||
0x0d, 0xe7, 0x38, 0x23, 0xa9, 0x5b, 0x04, 0x3d, 0x82, 0xe5, 0xe8, 0xba, 0xda, 0x63, 0x4c, 0x70,
|
||||
0x11, 0x10, 0xbf, 0xdd, 0xa5, 0x43, 0xf9, 0x90, 0x49, 0xcf, 0x7a, 0xb8, 0xee, 0x78, 0x56, 0x30,
|
||||
0x54, 0xc1, 0xbb, 0x43, 0x87, 0x78, 0x49, 0x0b, 0x68, 0x46, 0xfc, 0x3b, 0x74, 0xc8, 0xd1, 0x0d,
|
||||
0x58, 0xa1, 0x63, 0x98, 0x94, 0xd8, 0x76, 0x49, 0x4f, 0x5e, 0xc4, 0x6d, 0xcb, 0x65, 0x56, 0x57,
|
||||
0xdd, 0x05, 0x26, 0xbe, 0x40, 0xe3, 0xa2, 0xbe, 0x08, 0x11, 0x5b, 0x12, 0x80, 0x38, 0x94, 0xf6,
|
||||
0x5c, 0x62, 0x75, 0x5d, 0x87, 0xcb, 0x6f, 0x93, 0xd8, 0x5b, 0x54, 0xb6, 0x73, 0x69, 0xdb, 0xc6,
|
||||
0x31, 0xd1, 0xaa, 0x37, 0x27, 0xdc, 0xd8, 0xcb, 0x56, 0x57, 0xd4, 0x5b, 0x7b, 0xc9, 0xbb, 0xa8,
|
||||
0x09, 0x85, 0xbe, 0x27, 0xd5, 0x87, 0x31, 0xc8, 0x9f, 0x36, 0x06, 0x10, 0xb2, 0xa4, 0xe7, 0xe5,
|
||||
0x01, 0xac, 0x1c, 0xa7, 0x3c, 0xa1, 0x36, 0x6f, 0xc6, 0x6b, 0xb3, 0xb0, 0xfe, 0x41, 0x92, 0xbe,
|
||||
0x64, 0x91, 0xb1, 0x3a, 0x4e, 0x4c, 0xdb, 0x5f, 0x0d, 0xc8, 0xdc, 0xa7, 0x56, 0x40, 0xc5, 0x6b,
|
||||
0xcd, 0xda, 0x8d, 0x23, 0x59, 0x5b, 0x49, 0x7e, 0xa5, 0x4a, 0xad, 0x53, 0x49, 0x5b, 0x86, 0x9c,
|
||||
0xe3, 0x09, 0x1a, 0x78, 0xc4, 0x55, 0x59, 0x9b, 0xc3, 0xe3, 0x79, 0xa2, 0x03, 0xcf, 0x0c, 0xc8,
|
||||
0x84, 0xcf, 0xb8, 0xb3, 0x76, 0x20, 0xd4, 0xfa, 0xb2, 0x03, 0x89, 0x46, 0xfe, 0x6d, 0x40, 0x0e,
|
||||
0x53, 0xce, 0xfa, 0xc1, 0x6b, 0xfe, 0xa4, 0x79, 0xe9, 0x59, 0x94, 0xfe, 0xcf, 0xcf, 0x22, 0x04,
|
||||
0x66, 0xd7, 0xf1, 0xf4, 0x03, 0x0e, 0xab, 0x31, 0xaa, 0x43, 0xd6, 0x27, 0x43, 0x97, 0x11, 0x5b,
|
||||
0x37, 0xca, 0xa5, 0xa9, 0xaf, 0xfe, 0x86, 0x37, 0xc4, 0x11, 0x68, 0x73, 0x69, 0xff, 0xa0, 0xb6,
|
||||
0x08, 0xc5, 0xb8, 0xe7, 0x4f, 0x8c, 0xda, 0x1f, 0x06, 0xe4, 0x77, 0xbe, 0x13, 0xd4, 0x53, 0x5f,
|
||||
0x10, 0xff, 0x4b, 0xe7, 0x57, 0xa7, 0xff, 0x19, 0xc8, 0x1f, 0xf9, 0xe8, 0x4f, 0x3a, 0xd4, 0x66,
|
||||
0xe9, 0xf9, 0x8b, 0xca, 0xb9, 0x3f, 0x5f, 0x54, 0xce, 0x7d, 0x3f, 0xaa, 0x18, 0xcf, 0x47, 0x15,
|
||||
0xe3, 0xf7, 0x51, 0xc5, 0xf8, 0x6b, 0x54, 0x31, 0xf6, 0x32, 0x2a, 0x3e, 0x1f, 0xfd, 0x1b, 0x00,
|
||||
0x00, 0xff, 0xff, 0x1b, 0x47, 0x17, 0xba, 0x5f, 0x12, 0x00, 0x00,
|
||||
// 1491 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcf, 0x6f, 0x1b, 0x4f,
|
||||
0x15, 0xef, 0xda, 0x1b, 0xff, 0x78, 0x4e, 0x4c, 0x98, 0x86, 0xb0, 0x35, 0xc1, 0x0e, 0xae, 0x40,
|
||||
0x15, 0xaa, 0x9c, 0x12, 0x0a, 0x4a, 0x03, 0xa5, 0xb5, 0x93, 0xa8, 0xb5, 0x4a, 0x69, 0x34, 0x2d,
|
||||
0x2d, 0xb7, 0x65, 0xb2, 0x3b, 0x75, 0x17, 0xaf, 0x77, 0x56, 0x3b, 0x63, 0x17, 0xdf, 0x38, 0x87,
|
||||
0x3f, 0x20, 0x37, 0x0e, 0xfd, 0x17, 0xe0, 0xc2, 0x85, 0x03, 0xa7, 0x1e, 0x39, 0x21, 0x4e, 0x11,
|
||||
0xf5, 0x7f, 0x81, 0xc4, 0xe1, 0xab, 0x99, 0x9d, 0xb5, 0x37, 0xf1, 0x3a, 0x49, 0xbf, 0xaa, 0xa2,
|
||||
0xef, 0x29, 0x33, 0x3b, 0x9f, 0xcf, 0x9b, 0xf7, 0xde, 0xbc, 0x5f, 0x31, 0xac, 0xb0, 0xa3, 0x3f,
|
||||
0x50, 0x47, 0xf0, 0x56, 0x18, 0x31, 0xc1, 0x10, 0x72, 0x99, 0xd3, 0xa7, 0x51, 0x8b, 0xbf, 0x27,
|
||||
0xd1, 0xa0, 0xef, 0x89, 0xd6, 0xe8, 0x27, 0xb5, 0x8a, 0x18, 0x87, 0x54, 0x03, 0x6a, 0x15, 0x1e,
|
||||
0x52, 0x27, 0xd9, 0x34, 0x7a, 0x8c, 0xf5, 0x7c, 0xba, 0xa5, 0x76, 0x47, 0xc3, 0xb7, 0x5b, 0xc2,
|
||||
0x1b, 0x50, 0x2e, 0xc8, 0x20, 0xd4, 0x80, 0xb5, 0x1e, 0xeb, 0x31, 0xb5, 0xdc, 0x92, 0x2b, 0xfd,
|
||||
0xf5, 0xd6, 0x79, 0x1a, 0x09, 0xc6, 0xfa, 0xe8, 0x66, 0xe8, 0x0f, 0x7b, 0x5e, 0xb0, 0x15, 0xff,
|
||||
0x89, 0x3f, 0x36, 0xff, 0x6e, 0x80, 0xf9, 0x9c, 0x0a, 0x82, 0x7e, 0x01, 0xc5, 0x11, 0x8d, 0xb8,
|
||||
0xc7, 0x02, 0xcb, 0xd8, 0x34, 0xee, 0x54, 0xb6, 0xbf, 0xd7, 0x9a, 0xd7, 0xb7, 0xf5, 0x3a, 0x86,
|
||||
0x74, 0xcc, 0x8f, 0xa7, 0x8d, 0x1b, 0x38, 0x61, 0xa0, 0x07, 0x00, 0x4e, 0x44, 0x89, 0xa0, 0xae,
|
||||
0x4d, 0x84, 0x95, 0x53, 0xfc, 0x5a, 0x2b, 0x56, 0xa5, 0x95, 0xa8, 0xd2, 0x7a, 0x95, 0x58, 0x80,
|
||||
0xcb, 0x1a, 0xdd, 0x16, 0x92, 0x3a, 0x0c, 0xdd, 0x84, 0x9a, 0xbf, 0x9c, 0xaa, 0xd1, 0x6d, 0xd1,
|
||||
0xfc, 0xab, 0x09, 0xe6, 0x6f, 0x98, 0x4b, 0xd1, 0x3a, 0xe4, 0x3c, 0x57, 0xa9, 0x5d, 0xee, 0x14,
|
||||
0x26, 0xa7, 0x8d, 0x5c, 0x77, 0x1f, 0xe7, 0x3c, 0x17, 0x6d, 0x83, 0x39, 0xa0, 0x82, 0x68, 0x85,
|
||||
0xac, 0x2c, 0x83, 0xa4, 0xed, 0xda, 0x1a, 0x85, 0x45, 0x3f, 0x07, 0x53, 0x3e, 0x83, 0xd6, 0x64,
|
||||
0x23, 0x8b, 0x23, 0xef, 0x7c, 0x19, 0x52, 0x27, 0xe1, 0x49, 0x3c, 0x3a, 0x80, 0x8a, 0x4b, 0xb9,
|
||||
0x13, 0x79, 0xa1, 0x90, 0x3e, 0x34, 0x15, 0xfd, 0xf6, 0x22, 0xfa, 0xfe, 0x0c, 0x8a, 0xd3, 0x3c,
|
||||
0xf4, 0x4b, 0x28, 0x70, 0x41, 0xc4, 0x90, 0x5b, 0x4b, 0x4a, 0x42, 0x7d, 0xa1, 0x02, 0x0a, 0xa5,
|
||||
0x55, 0xd0, 0x1c, 0xf4, 0x14, 0xaa, 0x03, 0x12, 0x90, 0x1e, 0x8d, 0x6c, 0x2d, 0xa5, 0xa0, 0xa4,
|
||||
0xfc, 0x20, 0xd3, 0xf4, 0x18, 0x19, 0x0b, 0xc2, 0x2b, 0x83, 0xf4, 0x16, 0x1d, 0x00, 0x10, 0x21,
|
||||
0x88, 0xf3, 0x6e, 0x40, 0x03, 0x61, 0x15, 0x95, 0x94, 0x1f, 0x66, 0xea, 0x42, 0xc5, 0x7b, 0x16,
|
||||
0xf5, 0xdb, 0x53, 0x30, 0x4e, 0x11, 0xd1, 0x13, 0xa8, 0x38, 0x34, 0x12, 0xde, 0x5b, 0xcf, 0x21,
|
||||
0x82, 0x5a, 0x25, 0x25, 0xa7, 0x91, 0x25, 0x67, 0x6f, 0x06, 0xd3, 0x46, 0xa5, 0x99, 0xe8, 0x1e,
|
||||
0x98, 0x11, 0xf3, 0xa9, 0x55, 0xde, 0x34, 0xee, 0x54, 0x17, 0x3f, 0x0b, 0x66, 0x3e, 0xc5, 0x0a,
|
||||
0xb9, 0xbb, 0x7e, 0x7c, 0xd2, 0x44, 0xb0, 0x5a, 0x32, 0x56, 0x0d, 0x15, 0x1a, 0xc6, 0x3d, 0xe3,
|
||||
0x77, 0xc6, 0xef, 0x8d, 0xe6, 0xff, 0xf3, 0x50, 0x7c, 0x49, 0xa3, 0x91, 0xe7, 0x7c, 0xd9, 0xc0,
|
||||
0x79, 0x70, 0x26, 0x70, 0x32, 0x6d, 0xd4, 0xd7, 0xce, 0xc5, 0xce, 0x0e, 0x94, 0x68, 0xe0, 0x86,
|
||||
0xcc, 0x0b, 0x84, 0x0e, 0x9c, 0x4c, 0x03, 0x0f, 0x34, 0x06, 0x4f, 0xd1, 0xe8, 0x00, 0x56, 0xe2,
|
||||
0x7c, 0xb0, 0xcf, 0x44, 0xcd, 0x66, 0x16, 0xfd, 0xb7, 0x0a, 0xa8, 0x9f, 0x7b, 0x79, 0x98, 0xda,
|
||||
0xa1, 0x7d, 0x58, 0x09, 0x23, 0x3a, 0xf2, 0xd8, 0x90, 0xdb, 0xca, 0x88, 0xc2, 0x95, 0x8c, 0xc0,
|
||||
0xcb, 0x09, 0x4b, 0xee, 0xd0, 0xaf, 0x60, 0x59, 0x92, 0xed, 0xa4, 0x8e, 0xc0, 0xa5, 0x75, 0x04,
|
||||
0xab, 0x92, 0xa7, 0x37, 0xe8, 0x05, 0x7c, 0xe7, 0x8c, 0x16, 0x53, 0x41, 0x95, 0xcb, 0x05, 0xdd,
|
||||
0x4c, 0x6b, 0xa2, 0x3f, 0xee, 0xa2, 0xe3, 0x93, 0x66, 0x15, 0x96, 0xd3, 0x21, 0xd0, 0xfc, 0x4b,
|
||||
0x0e, 0x4a, 0x89, 0x23, 0xd1, 0x7d, 0xfd, 0x66, 0xc6, 0x62, 0xaf, 0x25, 0x58, 0x65, 0x6f, 0xfc,
|
||||
0x5c, 0xf7, 0x61, 0x29, 0x64, 0x91, 0xe0, 0x56, 0x6e, 0x33, 0xbf, 0x28, 0x45, 0x0f, 0x59, 0x24,
|
||||
0xf6, 0x58, 0xf0, 0xd6, 0xeb, 0xe1, 0x18, 0x8c, 0xde, 0x40, 0x65, 0xe4, 0x45, 0x62, 0x48, 0x7c,
|
||||
0xdb, 0x0b, 0xb9, 0x95, 0x57, 0xdc, 0x1f, 0x5d, 0x74, 0x65, 0xeb, 0x75, 0x8c, 0xef, 0x1e, 0x76,
|
||||
0xaa, 0x93, 0xd3, 0x06, 0x4c, 0xb7, 0x1c, 0x83, 0x16, 0xd5, 0x0d, 0x79, 0xed, 0x39, 0x94, 0xa7,
|
||||
0x27, 0xe8, 0x2e, 0x40, 0x10, 0x67, 0xa4, 0x3d, 0x8d, 0xec, 0x95, 0xc9, 0x69, 0xa3, 0xac, 0xf3,
|
||||
0xb4, 0xbb, 0x8f, 0xcb, 0x1a, 0xd0, 0x75, 0x11, 0x02, 0x93, 0xb8, 0x6e, 0xa4, 0xe2, 0xbc, 0x8c,
|
||||
0xd5, 0xba, 0xf9, 0xe7, 0x22, 0x98, 0xaf, 0x08, 0xef, 0x5f, 0x77, 0x55, 0x95, 0x77, 0xce, 0x65,
|
||||
0xc6, 0x5d, 0x00, 0x1e, 0xc7, 0x9b, 0x34, 0xc7, 0x9c, 0x99, 0xa3, 0xa3, 0x50, 0x9a, 0xa3, 0x01,
|
||||
0xb1, 0x39, 0xdc, 0x67, 0x42, 0x25, 0x81, 0x89, 0xd5, 0x1a, 0xdd, 0x86, 0x62, 0xc0, 0x5c, 0x45,
|
||||
0x2f, 0x28, 0x3a, 0x4c, 0x4e, 0x1b, 0x05, 0x59, 0x2b, 0xba, 0xfb, 0xb8, 0x20, 0x8f, 0xba, 0xae,
|
||||
0x2c, 0x53, 0x24, 0x08, 0x98, 0x20, 0xb2, 0x06, 0x73, 0x5d, 0xee, 0x32, 0xa3, 0xbf, 0x3d, 0x83,
|
||||
0x25, 0x65, 0x2a, 0xc5, 0x44, 0xaf, 0xe1, 0x66, 0xa2, 0x6f, 0x5a, 0x60, 0xe9, 0x73, 0x04, 0x22,
|
||||
0x2d, 0x21, 0x75, 0x92, 0x6a, 0x0b, 0xe5, 0xc5, 0x6d, 0x41, 0x79, 0x30, 0xab, 0x2d, 0x74, 0x60,
|
||||
0xc5, 0xa5, 0xdc, 0x8b, 0xa8, 0xab, 0xca, 0x04, 0x55, 0x99, 0x59, 0xdd, 0xfe, 0xfe, 0x45, 0x42,
|
||||
0x28, 0x5e, 0xd6, 0x1c, 0xb5, 0x43, 0x6d, 0x28, 0xe9, 0xb8, 0xe1, 0x56, 0x45, 0xc5, 0xee, 0x15,
|
||||
0xdb, 0xc1, 0x94, 0x76, 0xa6, 0xcc, 0x2d, 0x7f, 0x56, 0x99, 0x7b, 0x00, 0xe0, 0xb3, 0x9e, 0xed,
|
||||
0x46, 0xde, 0x88, 0x46, 0xd6, 0x8a, 0x1e, 0x12, 0x32, 0xb8, 0xfb, 0x0a, 0x81, 0xcb, 0x3e, 0xeb,
|
||||
0xc5, 0xcb, 0xb9, 0xa2, 0x54, 0xfd, 0xcc, 0xa2, 0x44, 0xa0, 0x46, 0x38, 0xf7, 0x7a, 0x01, 0x75,
|
||||
0xed, 0x1e, 0x0d, 0x68, 0xe4, 0x39, 0x76, 0x44, 0x39, 0x1b, 0x46, 0x0e, 0xe5, 0xd6, 0xb7, 0x94,
|
||||
0x27, 0x32, 0xdb, 0xfc, 0x93, 0x18, 0x8c, 0x35, 0x16, 0x5b, 0x89, 0x98, 0x73, 0x07, 0x7c, 0xb7,
|
||||
0x76, 0x7c, 0xd2, 0x5c, 0x87, 0xb5, 0x74, 0x99, 0xda, 0x31, 0x1e, 0x1b, 0x4f, 0x8d, 0x43, 0xa3,
|
||||
0xf9, 0xcf, 0x1c, 0x7c, 0x7b, 0xce, 0xa7, 0xe8, 0x67, 0x50, 0xd4, 0x5e, 0xbd, 0x68, 0x58, 0xd3,
|
||||
0x3c, 0x9c, 0x60, 0xd1, 0x06, 0x94, 0x65, 0x8a, 0x53, 0xce, 0x69, 0x5c, 0xbc, 0xca, 0x78, 0xf6,
|
||||
0x01, 0x59, 0x50, 0x24, 0xbe, 0x47, 0xe4, 0x59, 0x5e, 0x9d, 0x25, 0x5b, 0x34, 0x84, 0xf5, 0xd8,
|
||||
0xf5, 0xf6, 0xac, 0xb5, 0xdb, 0x2c, 0x14, 0xdc, 0x32, 0x95, 0xfd, 0x8f, 0xae, 0x14, 0x09, 0xfa,
|
||||
0x71, 0x66, 0x1f, 0x5e, 0x84, 0x82, 0x1f, 0x04, 0x22, 0x1a, 0xe3, 0x35, 0x37, 0xe3, 0xa8, 0xf6,
|
||||
0x04, 0x6e, 0x2d, 0xa4, 0xa0, 0x55, 0xc8, 0xf7, 0xe9, 0x38, 0x2e, 0x4f, 0x58, 0x2e, 0xd1, 0x1a,
|
||||
0x2c, 0x8d, 0x88, 0x3f, 0xa4, 0xba, 0x9a, 0xc5, 0x9b, 0xdd, 0xdc, 0x8e, 0xd1, 0xfc, 0x90, 0x83,
|
||||
0xa2, 0x56, 0xe7, 0xba, 0x5b, 0xbe, 0xbe, 0x76, 0xae, 0xb0, 0x3d, 0x84, 0x65, 0xed, 0xd2, 0x38,
|
||||
0x23, 0xcd, 0x4b, 0x63, 0xba, 0x12, 0xe3, 0xe3, 0x6c, 0x7c, 0x08, 0xa6, 0x17, 0x92, 0x81, 0x6e,
|
||||
0xf7, 0x99, 0x37, 0x77, 0x0f, 0xdb, 0xcf, 0x5f, 0x84, 0x71, 0x61, 0x29, 0x4d, 0x4e, 0x1b, 0xa6,
|
||||
0xfc, 0x80, 0x15, 0x2d, 0xb3, 0x31, 0xfe, 0x6d, 0x09, 0x8a, 0x7b, 0xfe, 0x90, 0x0b, 0x1a, 0x5d,
|
||||
0xb7, 0x93, 0xf4, 0xb5, 0x73, 0x4e, 0xda, 0x83, 0x62, 0xc4, 0x98, 0xb0, 0x1d, 0x72, 0x91, 0x7f,
|
||||
0x30, 0x63, 0x62, 0xaf, 0xdd, 0xa9, 0x4a, 0xa2, 0xac, 0xed, 0xf1, 0x1e, 0x17, 0x24, 0x75, 0x8f,
|
||||
0xa0, 0x37, 0xb0, 0x9e, 0x74, 0xc4, 0x23, 0xc6, 0x04, 0x17, 0x11, 0x09, 0xed, 0x3e, 0x1d, 0xcb,
|
||||
0x59, 0x29, 0xbf, 0x68, 0x36, 0x3e, 0x08, 0x9c, 0x68, 0xac, 0x9c, 0xf7, 0x8c, 0x8e, 0xf1, 0x9a,
|
||||
0x16, 0xd0, 0x49, 0xf8, 0xcf, 0xe8, 0x98, 0xa3, 0x47, 0xb0, 0x41, 0xa7, 0x30, 0x29, 0xd1, 0xf6,
|
||||
0xc9, 0x40, 0xf6, 0x7a, 0xdb, 0xf1, 0x99, 0xd3, 0x57, 0xed, 0xc6, 0xc4, 0xb7, 0x68, 0x5a, 0xd4,
|
||||
0xaf, 0x63, 0xc4, 0x9e, 0x04, 0x20, 0x0e, 0xd6, 0x91, 0x4f, 0x9c, 0xbe, 0xef, 0x71, 0xf9, 0xef,
|
||||
0x4f, 0x6a, 0xdc, 0x95, 0x1d, 0x43, 0xea, 0xb6, 0x73, 0x81, 0xb7, 0x5a, 0x9d, 0x19, 0x37, 0x35,
|
||||
0x3c, 0xeb, 0x8c, 0xfa, 0xee, 0x51, 0xf6, 0x29, 0xea, 0x40, 0x65, 0x18, 0xc8, 0xeb, 0x63, 0x1f,
|
||||
0x94, 0xaf, 0xea, 0x03, 0x88, 0x59, 0xd2, 0xf2, 0xda, 0x08, 0x36, 0x2e, 0xba, 0x3c, 0x23, 0x37,
|
||||
0x1f, 0xa7, 0x73, 0xb3, 0xb2, 0xfd, 0xe3, 0xac, 0xfb, 0xb2, 0x45, 0xa6, 0xf2, 0x38, 0x33, 0x6c,
|
||||
0xff, 0x61, 0x40, 0xe1, 0x25, 0x75, 0x22, 0x2a, 0xbe, 0x68, 0xd4, 0xee, 0x9c, 0x89, 0xda, 0x7a,
|
||||
0xf6, 0x20, 0x2c, 0x6f, 0x9d, 0x0b, 0xda, 0x1a, 0x94, 0xbc, 0x40, 0xd0, 0x28, 0x20, 0xbe, 0x8a,
|
||||
0xda, 0x12, 0x9e, 0xee, 0x33, 0x0d, 0xf8, 0x60, 0x40, 0x21, 0x9e, 0x14, 0xaf, 0xdb, 0x80, 0xf8,
|
||||
0xd6, 0xf3, 0x06, 0x64, 0x2a, 0xf9, 0x3f, 0x03, 0x4a, 0x49, 0xc3, 0xfa, 0xa2, 0x6a, 0x9e, 0x9b,
|
||||
0xbc, 0xf2, 0x5f, 0x7b, 0xf2, 0x42, 0x60, 0xf6, 0xbd, 0x40, 0xcf, 0x88, 0x58, 0xad, 0x51, 0x0b,
|
||||
0x8a, 0x21, 0x19, 0xfb, 0x8c, 0xb8, 0xba, 0x50, 0xae, 0xcd, 0xfd, 0xb0, 0xd0, 0x0e, 0xc6, 0x38,
|
||||
0x01, 0xed, 0xae, 0x1d, 0x9f, 0x34, 0x57, 0xa1, 0x9a, 0xb6, 0xfc, 0x9d, 0xd1, 0xfc, 0xb7, 0x01,
|
||||
0xe5, 0x83, 0x3f, 0x0a, 0x1a, 0xa8, 0x79, 0xe0, 0x1b, 0x69, 0xfc, 0xe6, 0xfc, 0x8f, 0x0f, 0xe5,
|
||||
0x33, 0xbf, 0x2b, 0x64, 0x3d, 0x6a, 0xc7, 0xfa, 0xf8, 0xa9, 0x7e, 0xe3, 0x3f, 0x9f, 0xea, 0x37,
|
||||
0xfe, 0x34, 0xa9, 0x1b, 0x1f, 0x27, 0x75, 0xe3, 0x5f, 0x93, 0xba, 0xf1, 0xdf, 0x49, 0xdd, 0x38,
|
||||
0x2a, 0x28, 0xff, 0xfc, 0xf4, 0xab, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe4, 0x48, 0xcb, 0x39, 0xc2,
|
||||
0x12, 0x00, 0x00,
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/swarmkit/api/objects.proto
generated
vendored
2
vendor/github.com/docker/swarmkit/api/objects.proto
generated
vendored
|
@ -239,6 +239,8 @@ message Task {
|
|||
//
|
||||
// If not present, the daemon's default will be used.
|
||||
Driver log_driver = 13;
|
||||
|
||||
repeated GenericResource assigned_generic_resources = 15;
|
||||
}
|
||||
|
||||
// NetworkAttachment specifies the network parameters of attachment to
|
||||
|
|
1714
vendor/github.com/docker/swarmkit/api/types.pb.go
generated
vendored
1714
vendor/github.com/docker/swarmkit/api/types.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
20
vendor/github.com/docker/swarmkit/api/types.proto
generated
vendored
20
vendor/github.com/docker/swarmkit/api/types.proto
generated
vendored
|
@ -30,12 +30,32 @@ message Annotations {
|
|||
repeated IndexEntry indices = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message GenericString {
|
||||
string kind = 1;
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
message GenericDiscrete {
|
||||
string kind = 1;
|
||||
int64 value = 2;
|
||||
}
|
||||
|
||||
message GenericResource {
|
||||
oneof resource {
|
||||
GenericString str = 1;
|
||||
GenericDiscrete discrete = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message Resources {
|
||||
// Amount of CPUs (e.g. 2000000000 = 2 CPU cores)
|
||||
int64 nano_cpus = 1 [(gogoproto.customname) = "NanoCPUs"];
|
||||
|
||||
// Amount of memory in bytes.
|
||||
int64 memory_bytes = 2;
|
||||
|
||||
// User specified resource (e.g: bananas=2;apple={red,yellow,green})
|
||||
repeated GenericResource generic = 3;
|
||||
}
|
||||
|
||||
message ResourceRequirements {
|
||||
|
|
2
vendor/github.com/docker/swarmkit/ca/certificates.go
generated
vendored
2
vendor/github.com/docker/swarmkit/ca/certificates.go
generated
vendored
|
@ -871,7 +871,7 @@ func GetRemoteSignedCertificate(ctx context.Context, csr []byte, rootCAPool *x50
|
|||
caClient = api.NewNodeCAClient(conn.ClientConn)
|
||||
|
||||
// If there was no deadline exceeded error, and the certificate was issued, return
|
||||
case err == nil && statusResponse.Status.State == api.IssuanceStateIssued:
|
||||
case err == nil && (statusResponse.Status.State == api.IssuanceStateIssued || statusResponse.Status.State == api.IssuanceStateRotate):
|
||||
if statusResponse.Certificate == nil {
|
||||
conn.Close(false)
|
||||
return nil, errors.New("no certificate in CertificateStatus response")
|
||||
|
|
51
vendor/github.com/docker/swarmkit/ca/config.go
generated
vendored
51
vendor/github.com/docker/swarmkit/ca/config.go
generated
vendored
|
@ -89,6 +89,39 @@ type CertificateUpdate struct {
|
|||
Err error
|
||||
}
|
||||
|
||||
func validateRootCAAndTLSCert(rootCA *RootCA, externalCARootPool *x509.CertPool, tlsKeyPair *tls.Certificate) error {
|
||||
var (
|
||||
leafCert *x509.Certificate
|
||||
intermediatePool *x509.CertPool
|
||||
)
|
||||
for i, derBytes := range tlsKeyPair.Certificate {
|
||||
parsed, err := x509.ParseCertificate(derBytes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not validate new root certificates due to parse error")
|
||||
}
|
||||
if i == 0 {
|
||||
leafCert = parsed
|
||||
} else {
|
||||
if intermediatePool == nil {
|
||||
intermediatePool = x509.NewCertPool()
|
||||
}
|
||||
intermediatePool.AddCert(parsed)
|
||||
}
|
||||
}
|
||||
opts := x509.VerifyOptions{
|
||||
Roots: rootCA.Pool,
|
||||
Intermediates: intermediatePool,
|
||||
}
|
||||
if _, err := leafCert.Verify(opts); err != nil {
|
||||
return errors.Wrap(err, "new root CA does not match existing TLS credentials")
|
||||
}
|
||||
opts.Roots = externalCARootPool
|
||||
if _, err := leafCert.Verify(opts); err != nil {
|
||||
return errors.Wrap(err, "new external root pool does not match existing TLS credentials")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewSecurityConfig initializes and returns a new SecurityConfig.
|
||||
func NewSecurityConfig(rootCA *RootCA, krw *KeyReadWriter, tlsKeyPair *tls.Certificate, issuerInfo *IssuerInfo) (*SecurityConfig, error) {
|
||||
// Create the Server TLS Credentials for this node. These will not be used by workers.
|
||||
|
@ -155,8 +188,15 @@ func (s *SecurityConfig) UpdateRootCA(rootCA *RootCA, externalCARootPool *x509.C
|
|||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// refuse to update the root CA if the current TLS credentials do not validate against it
|
||||
if err := validateRootCAAndTLSCert(rootCA, externalCARootPool, s.certificate); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.rootCA = rootCA
|
||||
s.externalCAClientRootPool = externalCARootPool
|
||||
s.externalCA.UpdateRootCA(rootCA)
|
||||
|
||||
return s.updateTLSCredentials(s.certificate, s.issuerInfo)
|
||||
}
|
||||
|
||||
|
@ -215,6 +255,13 @@ func (s *SecurityConfig) updateTLSCredentials(certificate *tls.Certificate, issu
|
|||
return nil
|
||||
}
|
||||
|
||||
// UpdateTLSCredentials updates the security config with an updated TLS certificate and issuer info
|
||||
func (s *SecurityConfig) UpdateTLSCredentials(certificate *tls.Certificate, issuerInfo *IssuerInfo) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.updateTLSCredentials(certificate, issuerInfo)
|
||||
}
|
||||
|
||||
// SigningPolicy creates a policy used by the signer to ensure that the only fields
|
||||
// from the remote CSRs we trust are: PublicKey, PublicKeyAlgorithm and SignatureAlgorithm.
|
||||
// It receives the duration a certificate will be valid for
|
||||
|
@ -470,9 +517,7 @@ func RenewTLSConfigNow(ctx context.Context, s *SecurityConfig, connBroker *conne
|
|||
return err
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.updateTLSCredentials(tlsKeyPair, issuerInfo)
|
||||
return s.UpdateTLSCredentials(tlsKeyPair, issuerInfo)
|
||||
}
|
||||
|
||||
// calculateRandomExpiry returns a random duration between 50% and 80% of the
|
||||
|
|
20
vendor/github.com/docker/swarmkit/ca/external.go
generated
vendored
20
vendor/github.com/docker/swarmkit/ca/external.go
generated
vendored
|
@ -18,11 +18,15 @@ import (
|
|||
"github.com/cloudflare/cfssl/config"
|
||||
"github.com/cloudflare/cfssl/csr"
|
||||
"github.com/cloudflare/cfssl/signer"
|
||||
"github.com/docker/swarmkit/log"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
// ExternalCrossSignProfile is the profile that we will be sending cross-signing CSR sign requests with
|
||||
const ExternalCrossSignProfile = "CA"
|
||||
|
||||
// ErrNoExternalCAURLs is an error used it indicate that an ExternalCA is
|
||||
// configured with no URLs to which it can proxy certificate signing requests.
|
||||
var ErrNoExternalCAURLs = errors.New("no external CA URLs")
|
||||
|
@ -79,8 +83,7 @@ func (eca *ExternalCA) UpdateTLSConfig(tlsConfig *tls.Config) {
|
|||
}
|
||||
}
|
||||
|
||||
// UpdateURLs updates the list of CSR API endpoints by setting it to the given
|
||||
// urls.
|
||||
// UpdateURLs updates the list of CSR API endpoints by setting it to the given urls.
|
||||
func (eca *ExternalCA) UpdateURLs(urls ...string) {
|
||||
eca.mu.Lock()
|
||||
defer eca.mu.Unlock()
|
||||
|
@ -88,6 +91,13 @@ func (eca *ExternalCA) UpdateURLs(urls ...string) {
|
|||
eca.urls = urls
|
||||
}
|
||||
|
||||
// UpdateRootCA changes the root CA used to append intermediates
|
||||
func (eca *ExternalCA) UpdateRootCA(rca *RootCA) {
|
||||
eca.mu.Lock()
|
||||
eca.rootCA = rca
|
||||
eca.mu.Unlock()
|
||||
}
|
||||
|
||||
// Sign signs a new certificate by proxying the given certificate signing
|
||||
// request to an external CFSSL API server.
|
||||
func (eca *ExternalCA) Sign(ctx context.Context, req signer.SignRequest) (cert []byte, err error) {
|
||||
|
@ -96,6 +106,7 @@ func (eca *ExternalCA) Sign(ctx context.Context, req signer.SignRequest) (cert [
|
|||
eca.mu.Lock()
|
||||
urls := eca.urls
|
||||
client := eca.client
|
||||
intermediates := eca.rootCA.Intermediates
|
||||
eca.mu.Unlock()
|
||||
|
||||
if len(urls) == 0 {
|
||||
|
@ -114,9 +125,9 @@ func (eca *ExternalCA) Sign(ctx context.Context, req signer.SignRequest) (cert [
|
|||
cert, err = makeExternalSignRequest(requestCtx, client, url, csrJSON)
|
||||
cancel()
|
||||
if err == nil {
|
||||
return append(cert, eca.rootCA.Intermediates...), err
|
||||
return append(cert, intermediates...), err
|
||||
}
|
||||
logrus.Debugf("unable to proxy certificate signing request to %s: %s", url, err)
|
||||
log.G(ctx).Debugf("unable to proxy certificate signing request to %s: %s", url, err)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
|
@ -157,6 +168,7 @@ func (eca *ExternalCA) CrossSignRootCA(ctx context.Context, rca RootCA) ([]byte,
|
|||
CN: rootCert.Subject.CommonName,
|
||||
Names: cfCSRObj.Names,
|
||||
},
|
||||
Profile: ExternalCrossSignProfile,
|
||||
}
|
||||
// cfssl actually ignores non subject alt name extensions in the CSR, so we have to add the CA extension in the signing
|
||||
// request as well
|
||||
|
|
6
vendor/github.com/docker/swarmkit/ca/reconciler.go
generated
vendored
6
vendor/github.com/docker/swarmkit/ca/reconciler.go
generated
vendored
|
@ -99,7 +99,7 @@ func (r *rootRotationReconciler) UpdateRootCA(newRootCA *api.RootCA) {
|
|||
if newRootCA.RootRotation != nil {
|
||||
var nodes []*api.Node
|
||||
r.store.View(func(tx store.ReadTx) {
|
||||
nodes, err = store.FindNodes(tx, store.ByMembership(api.NodeMembershipAccepted))
|
||||
nodes, err = store.FindNodes(tx, store.All)
|
||||
})
|
||||
if err != nil {
|
||||
log.G(r.ctx).WithError(err).Error("unable to list nodes, so unable to process the current root CA")
|
||||
|
@ -132,8 +132,8 @@ func (r *rootRotationReconciler) UpdateRootCA(newRootCA *api.RootCA) {
|
|||
func (r *rootRotationReconciler) UpdateNode(node *api.Node) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
// if we're not in the middle of a root rotation, or if this node does not have membership, ignore it
|
||||
if r.currentRootCA == nil || r.currentRootCA.RootRotation == nil || node.Spec.Membership != api.NodeMembershipAccepted {
|
||||
// if we're not in the middle of a root rotation ignore the update
|
||||
if r.currentRootCA == nil || r.currentRootCA.RootRotation == nil {
|
||||
return
|
||||
}
|
||||
if hasIssuer(node, &r.currentIssuer) {
|
||||
|
|
80
vendor/github.com/docker/swarmkit/manager/allocator/allocator.go
generated
vendored
80
vendor/github.com/docker/swarmkit/manager/allocator/allocator.go
generated
vendored
|
@ -51,13 +51,6 @@ type taskBallot struct {
|
|||
|
||||
// allocActor controls the various phases in the lifecycle of one kind of allocator.
|
||||
type allocActor struct {
|
||||
// Channel through which the allocator gets all the events
|
||||
// that it is interested in.
|
||||
ch chan events.Event
|
||||
|
||||
// cancel unregisters the watcher.
|
||||
cancel func()
|
||||
|
||||
// Task voter identity of the allocator.
|
||||
taskVoter string
|
||||
|
||||
|
@ -90,7 +83,10 @@ func New(store *store.MemoryStore, pg plugingetter.PluginGetter) (*Allocator, er
|
|||
func (a *Allocator) Run(ctx context.Context) error {
|
||||
// Setup cancel context for all goroutines to use.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
var wg sync.WaitGroup
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
actors []func() error
|
||||
)
|
||||
|
||||
defer func() {
|
||||
cancel()
|
||||
|
@ -98,26 +94,8 @@ func (a *Allocator) Run(ctx context.Context) error {
|
|||
close(a.doneChan)
|
||||
}()
|
||||
|
||||
var actors []func() error
|
||||
watch, watchCancel := state.Watch(a.store.WatchQueue(),
|
||||
api.EventCreateNetwork{},
|
||||
api.EventDeleteNetwork{},
|
||||
api.EventCreateService{},
|
||||
api.EventUpdateService{},
|
||||
api.EventDeleteService{},
|
||||
api.EventCreateTask{},
|
||||
api.EventUpdateTask{},
|
||||
api.EventDeleteTask{},
|
||||
api.EventCreateNode{},
|
||||
api.EventUpdateNode{},
|
||||
api.EventDeleteNode{},
|
||||
state.EventCommit{},
|
||||
)
|
||||
|
||||
for _, aa := range []allocActor{
|
||||
{
|
||||
ch: watch,
|
||||
cancel: watchCancel,
|
||||
taskVoter: networkVoter,
|
||||
init: a.doNetworkInit,
|
||||
action: a.doNetworkAlloc,
|
||||
|
@ -127,8 +105,8 @@ func (a *Allocator) Run(ctx context.Context) error {
|
|||
a.registerToVote(aa.taskVoter)
|
||||
}
|
||||
|
||||
// Copy the iterated value for variable capture.
|
||||
aaCopy := aa
|
||||
// Assign a pointer for variable capture
|
||||
aaPtr := &aa
|
||||
actor := func() error {
|
||||
wg.Add(1)
|
||||
defer wg.Done()
|
||||
|
@ -136,19 +114,19 @@ func (a *Allocator) Run(ctx context.Context) error {
|
|||
// init might return an allocator specific context
|
||||
// which is a child of the passed in context to hold
|
||||
// allocator specific state
|
||||
if err := aaCopy.init(ctx); err != nil {
|
||||
// Stop the watches for this allocator
|
||||
// if we are failing in the init of
|
||||
// this allocator.
|
||||
aa.cancel()
|
||||
watch, watchCancel, err := a.init(ctx, aaPtr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
a.run(ctx, aaCopy)
|
||||
}()
|
||||
go func(watch <-chan events.Event, watchCancel func()) {
|
||||
defer func() {
|
||||
wg.Done()
|
||||
watchCancel()
|
||||
}()
|
||||
a.run(ctx, *aaPtr, watch)
|
||||
}(watch, watchCancel)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -172,10 +150,34 @@ func (a *Allocator) Stop() {
|
|||
<-a.doneChan
|
||||
}
|
||||
|
||||
func (a *Allocator) run(ctx context.Context, aa allocActor) {
|
||||
func (a *Allocator) init(ctx context.Context, aa *allocActor) (<-chan events.Event, func(), error) {
|
||||
watch, watchCancel := state.Watch(a.store.WatchQueue(),
|
||||
api.EventCreateNetwork{},
|
||||
api.EventDeleteNetwork{},
|
||||
api.EventCreateService{},
|
||||
api.EventUpdateService{},
|
||||
api.EventDeleteService{},
|
||||
api.EventCreateTask{},
|
||||
api.EventUpdateTask{},
|
||||
api.EventDeleteTask{},
|
||||
api.EventCreateNode{},
|
||||
api.EventUpdateNode{},
|
||||
api.EventDeleteNode{},
|
||||
state.EventCommit{},
|
||||
)
|
||||
|
||||
if err := aa.init(ctx); err != nil {
|
||||
watchCancel()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return watch, watchCancel, nil
|
||||
}
|
||||
|
||||
func (a *Allocator) run(ctx context.Context, aa allocActor, watch <-chan events.Event) {
|
||||
for {
|
||||
select {
|
||||
case ev, ok := <-aa.ch:
|
||||
case ev, ok := <-watch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package networkallocator
|
||||
package cnmallocator
|
||||
|
||||
import (
|
||||
"github.com/docker/libnetwork/drivers/overlay/ovmanager"
|
||||
"github.com/docker/libnetwork/drivers/remote"
|
||||
"github.com/docker/swarmkit/manager/allocator/networkallocator"
|
||||
)
|
||||
|
||||
var initializers = []initializer{
|
||||
|
@ -11,6 +12,6 @@ var initializers = []initializer{
|
|||
}
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []PredefinedNetworkData {
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package networkallocator
|
||||
package cnmallocator
|
||||
|
||||
import (
|
||||
"github.com/docker/libnetwork/drvregistry"
|
|
@ -1,4 +1,4 @@
|
|||
package networkallocator
|
||||
package cnmallocator
|
||||
|
||||
import (
|
||||
"github.com/docker/libnetwork/drivers/bridge/brmanager"
|
||||
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/docker/libnetwork/drivers/macvlan/mvmanager"
|
||||
"github.com/docker/libnetwork/drivers/overlay/ovmanager"
|
||||
"github.com/docker/libnetwork/drivers/remote"
|
||||
"github.com/docker/swarmkit/manager/allocator/networkallocator"
|
||||
)
|
||||
|
||||
var initializers = []initializer{
|
||||
|
@ -19,9 +20,9 @@ var initializers = []initializer{
|
|||
}
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []PredefinedNetworkData {
|
||||
return []PredefinedNetworkData{
|
||||
{"bridge", "bridge"},
|
||||
{"host", "host"},
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return []networkallocator.PredefinedNetworkData{
|
||||
{Name: "bridge", Driver: "bridge"},
|
||||
{Name: "host", Driver: "host"},
|
||||
}
|
||||
}
|
|
@ -1,8 +1,9 @@
|
|||
package networkallocator
|
||||
package cnmallocator
|
||||
|
||||
import (
|
||||
"github.com/docker/libnetwork/drivers/overlay/ovmanager"
|
||||
"github.com/docker/libnetwork/drivers/remote"
|
||||
"github.com/docker/swarmkit/manager/allocator/networkallocator"
|
||||
)
|
||||
|
||||
var initializers = []initializer{
|
||||
|
@ -11,6 +12,6 @@ var initializers = []initializer{
|
|||
}
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []PredefinedNetworkData {
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return nil
|
||||
}
|
14
vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/drivers_unsupported.go
generated
vendored
Normal file
14
vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/drivers_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
// +build !linux,!darwin,!windows
|
||||
|
||||
package cnmallocator
|
||||
|
||||
import (
|
||||
"github.com/docker/swarmkit/manager/allocator/networkallocator"
|
||||
)
|
||||
|
||||
const initializers = nil
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return nil
|
||||
}
|
957
vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
generated
vendored
Normal file
957
vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
generated
vendored
Normal file
|
@ -0,0 +1,957 @@
|
|||
package cnmallocator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/libnetwork/datastore"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
"github.com/docker/libnetwork/drvregistry"
|
||||
"github.com/docker/libnetwork/ipamapi"
|
||||
"github.com/docker/libnetwork/netlabel"
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/log"
|
||||
"github.com/docker/swarmkit/manager/allocator/networkallocator"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultDriver defines the name of the driver to be used by
|
||||
// default if a network without any driver name specified is
|
||||
// created.
|
||||
DefaultDriver = "overlay"
|
||||
)
|
||||
|
||||
// cnmNetworkAllocator acts as the controller for all network related operations
|
||||
// like managing network and IPAM drivers and also creating and
|
||||
// deleting networks and the associated resources.
|
||||
type cnmNetworkAllocator struct {
|
||||
// The driver register which manages all internal and external
|
||||
// IPAM and network drivers.
|
||||
drvRegistry *drvregistry.DrvRegistry
|
||||
|
||||
// The port allocator instance for allocating node ports
|
||||
portAllocator *portAllocator
|
||||
|
||||
// Local network state used by cnmNetworkAllocator to do network management.
|
||||
networks map[string]*network
|
||||
|
||||
// Allocator state to indicate if allocation has been
|
||||
// successfully completed for this service.
|
||||
services map[string]struct{}
|
||||
|
||||
// Allocator state to indicate if allocation has been
|
||||
// successfully completed for this task.
|
||||
tasks map[string]struct{}
|
||||
|
||||
// Allocator state to indicate if allocation has been
|
||||
// successfully completed for this node.
|
||||
nodes map[string]struct{}
|
||||
}
|
||||
|
||||
// Local in-memory state related to network that need to be tracked by cnmNetworkAllocator
|
||||
type network struct {
|
||||
// A local cache of the store object.
|
||||
nw *api.Network
|
||||
|
||||
// pools is used to save the internal poolIDs needed when
|
||||
// releasing the pool.
|
||||
pools map[string]string
|
||||
|
||||
// endpoints is a map of endpoint IP to the poolID from which it
|
||||
// was allocated.
|
||||
endpoints map[string]string
|
||||
|
||||
// isNodeLocal indicates whether the scope of the network's resources
|
||||
// is local to the node. If true, it means the resources can only be
|
||||
// allocated locally by the node where the network will be deployed.
|
||||
// In this the swarm manager will skip the allocations.
|
||||
isNodeLocal bool
|
||||
}
|
||||
|
||||
type networkDriver struct {
|
||||
driver driverapi.Driver
|
||||
name string
|
||||
capability *driverapi.Capability
|
||||
}
|
||||
|
||||
type initializer struct {
|
||||
fn drvregistry.InitFunc
|
||||
ntype string
|
||||
}
|
||||
|
||||
// New returns a new NetworkAllocator handle
|
||||
func New(pg plugingetter.PluginGetter) (networkallocator.NetworkAllocator, error) {
|
||||
na := &cnmNetworkAllocator{
|
||||
networks: make(map[string]*network),
|
||||
services: make(map[string]struct{}),
|
||||
tasks: make(map[string]struct{}),
|
||||
nodes: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
// There are no driver configurations and notification
|
||||
// functions as of now.
|
||||
reg, err := drvregistry.New(nil, nil, nil, nil, pg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := initializeDrivers(reg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = initIPAMDrivers(reg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pa, err := newPortAllocator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
na.portAllocator = pa
|
||||
na.drvRegistry = reg
|
||||
return na, nil
|
||||
}
|
||||
|
||||
// Allocate allocates all the necessary resources both general
|
||||
// and driver-specific which may be specified in the NetworkSpec
|
||||
func (na *cnmNetworkAllocator) Allocate(n *api.Network) error {
|
||||
if _, ok := na.networks[n.ID]; ok {
|
||||
return fmt.Errorf("network %s already allocated", n.ID)
|
||||
}
|
||||
|
||||
d, err := na.resolveDriver(n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nw := &network{
|
||||
nw: n,
|
||||
endpoints: make(map[string]string),
|
||||
isNodeLocal: d.capability.DataScope == datastore.LocalScope,
|
||||
}
|
||||
|
||||
// No swarm-level allocation can be provided by the network driver for
|
||||
// node-local networks. Only thing needed is populating the driver's name
|
||||
// in the driver's state.
|
||||
if nw.isNodeLocal {
|
||||
n.DriverState = &api.Driver{
|
||||
Name: d.name,
|
||||
}
|
||||
} else {
|
||||
nw.pools, err = na.allocatePools(n)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed allocating pools and gateway IP for network %s", n.ID)
|
||||
}
|
||||
|
||||
if err := na.allocateDriverState(n); err != nil {
|
||||
na.freePools(n, nw.pools)
|
||||
return errors.Wrapf(err, "failed while allocating driver state for network %s", n.ID)
|
||||
}
|
||||
}
|
||||
|
||||
na.networks[n.ID] = nw
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (na *cnmNetworkAllocator) getNetwork(id string) *network {
|
||||
return na.networks[id]
|
||||
}
|
||||
|
||||
// Deallocate frees all the general and driver specific resources
|
||||
// which were assigned to the passed network.
|
||||
func (na *cnmNetworkAllocator) Deallocate(n *api.Network) error {
|
||||
localNet := na.getNetwork(n.ID)
|
||||
if localNet == nil {
|
||||
return fmt.Errorf("could not get networker state for network %s", n.ID)
|
||||
}
|
||||
|
||||
// No swarm-level resource deallocation needed for node-local networks
|
||||
if localNet.isNodeLocal {
|
||||
delete(na.networks, n.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := na.freeDriverState(n); err != nil {
|
||||
return errors.Wrapf(err, "failed to free driver state for network %s", n.ID)
|
||||
}
|
||||
|
||||
delete(na.networks, n.ID)
|
||||
|
||||
return na.freePools(n, localNet.pools)
|
||||
}
|
||||
|
||||
// AllocateService allocates all the network resources such as virtual
|
||||
// IP and ports needed by the service.
|
||||
func (na *cnmNetworkAllocator) AllocateService(s *api.Service) (err error) {
|
||||
if err = na.portAllocator.serviceAllocatePorts(s); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
na.DeallocateService(s)
|
||||
}
|
||||
}()
|
||||
|
||||
if s.Endpoint == nil {
|
||||
s.Endpoint = &api.Endpoint{}
|
||||
}
|
||||
s.Endpoint.Spec = s.Spec.Endpoint.Copy()
|
||||
|
||||
// If ResolutionMode is DNSRR do not try allocating VIPs, but
|
||||
// free any VIP from previous state.
|
||||
if s.Spec.Endpoint != nil && s.Spec.Endpoint.Mode == api.ResolutionModeDNSRoundRobin {
|
||||
for _, vip := range s.Endpoint.VirtualIPs {
|
||||
if err := na.deallocateVIP(vip); err != nil {
|
||||
// don't bail here, deallocate as many as possible.
|
||||
log.L.WithError(err).
|
||||
WithField("vip.network", vip.NetworkID).
|
||||
WithField("vip.addr", vip.Addr).Error("error deallocating vip")
|
||||
}
|
||||
}
|
||||
|
||||
s.Endpoint.VirtualIPs = nil
|
||||
|
||||
delete(na.services, s.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
specNetworks := serviceNetworks(s)
|
||||
|
||||
// Allocate VIPs for all the pre-populated endpoint attachments
|
||||
eVIPs := s.Endpoint.VirtualIPs[:0]
|
||||
|
||||
vipLoop:
|
||||
for _, eAttach := range s.Endpoint.VirtualIPs {
|
||||
if na.IsVIPOnIngressNetwork(eAttach) && networkallocator.IsIngressNetworkNeeded(s) {
|
||||
if err = na.allocateVIP(eAttach); err != nil {
|
||||
return err
|
||||
}
|
||||
eVIPs = append(eVIPs, eAttach)
|
||||
continue vipLoop
|
||||
|
||||
}
|
||||
for _, nAttach := range specNetworks {
|
||||
if nAttach.Target == eAttach.NetworkID {
|
||||
if err = na.allocateVIP(eAttach); err != nil {
|
||||
return err
|
||||
}
|
||||
eVIPs = append(eVIPs, eAttach)
|
||||
continue vipLoop
|
||||
}
|
||||
}
|
||||
// If the network of the VIP is not part of the service spec,
|
||||
// deallocate the vip
|
||||
na.deallocateVIP(eAttach)
|
||||
}
|
||||
|
||||
networkLoop:
|
||||
for _, nAttach := range specNetworks {
|
||||
for _, vip := range s.Endpoint.VirtualIPs {
|
||||
if vip.NetworkID == nAttach.Target {
|
||||
continue networkLoop
|
||||
}
|
||||
}
|
||||
|
||||
vip := &api.Endpoint_VirtualIP{NetworkID: nAttach.Target}
|
||||
if err = na.allocateVIP(vip); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
eVIPs = append(eVIPs, vip)
|
||||
}
|
||||
|
||||
if len(eVIPs) > 0 {
|
||||
na.services[s.ID] = struct{}{}
|
||||
} else {
|
||||
delete(na.services, s.ID)
|
||||
}
|
||||
|
||||
s.Endpoint.VirtualIPs = eVIPs
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeallocateService de-allocates all the network resources such as
|
||||
// virtual IP and ports associated with the service.
|
||||
func (na *cnmNetworkAllocator) DeallocateService(s *api.Service) error {
|
||||
if s.Endpoint == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, vip := range s.Endpoint.VirtualIPs {
|
||||
if err := na.deallocateVIP(vip); err != nil {
|
||||
// don't bail here, deallocate as many as possible.
|
||||
log.L.WithError(err).
|
||||
WithField("vip.network", vip.NetworkID).
|
||||
WithField("vip.addr", vip.Addr).Error("error deallocating vip")
|
||||
}
|
||||
}
|
||||
s.Endpoint.VirtualIPs = nil
|
||||
|
||||
na.portAllocator.serviceDeallocatePorts(s)
|
||||
delete(na.services, s.ID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsAllocated returns if the passed network has been allocated or not.
|
||||
func (na *cnmNetworkAllocator) IsAllocated(n *api.Network) bool {
|
||||
_, ok := na.networks[n.ID]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsTaskAllocated returns if the passed task has its network resources allocated or not.
|
||||
func (na *cnmNetworkAllocator) IsTaskAllocated(t *api.Task) bool {
|
||||
// If the task is not found in the allocated set, then it is
|
||||
// not allocated.
|
||||
if _, ok := na.tasks[t.ID]; !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// If Networks is empty there is no way this Task is allocated.
|
||||
if len(t.Networks) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// To determine whether the task has its resources allocated,
|
||||
// we just need to look at one global scope network (in case of
|
||||
// multi-network attachment). This is because we make sure we
|
||||
// allocate for every network or we allocate for none.
|
||||
|
||||
// Find the first global scope network
|
||||
for _, nAttach := range t.Networks {
|
||||
// If the network is not allocated, the task cannot be allocated.
|
||||
localNet, ok := na.networks[nAttach.Network.ID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Nothing else to check for local scope network
|
||||
if localNet.isNodeLocal {
|
||||
continue
|
||||
}
|
||||
|
||||
// Addresses empty. Task is not allocated.
|
||||
if len(nAttach.Addresses) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// The allocated IP address not found in local endpoint state. Not allocated.
|
||||
if _, ok := localNet.endpoints[nAttach.Addresses[0]]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// HostPublishPortsNeedUpdate returns true if the passed service needs
|
||||
// allocations for its published ports in host (non ingress) mode
|
||||
func (na *cnmNetworkAllocator) HostPublishPortsNeedUpdate(s *api.Service) bool {
|
||||
return na.portAllocator.hostPublishPortsNeedUpdate(s)
|
||||
}
|
||||
|
||||
// IsServiceAllocated returns false if the passed service needs to have network resources allocated/updated.
|
||||
func (na *cnmNetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func(*networkallocator.ServiceAllocationOpts)) bool {
|
||||
var options networkallocator.ServiceAllocationOpts
|
||||
for _, flag := range flags {
|
||||
flag(&options)
|
||||
}
|
||||
|
||||
specNetworks := serviceNetworks(s)
|
||||
|
||||
// If endpoint mode is VIP and allocator does not have the
|
||||
// service in VIP allocated set then it needs to be allocated.
|
||||
if len(specNetworks) != 0 &&
|
||||
(s.Spec.Endpoint == nil ||
|
||||
s.Spec.Endpoint.Mode == api.ResolutionModeVirtualIP) {
|
||||
|
||||
if _, ok := na.services[s.ID]; !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if s.Endpoint == nil || len(s.Endpoint.VirtualIPs) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the spec has networks which don't have a corresponding VIP,
|
||||
// the service needs to be allocated.
|
||||
networkLoop:
|
||||
for _, net := range specNetworks {
|
||||
for _, vip := range s.Endpoint.VirtualIPs {
|
||||
if vip.NetworkID == net.Target {
|
||||
continue networkLoop
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// If the spec no longer has networks attached and has a vip allocated
|
||||
// from previous spec the service needs to allocated.
|
||||
if s.Endpoint != nil {
|
||||
vipLoop:
|
||||
for _, vip := range s.Endpoint.VirtualIPs {
|
||||
if na.IsVIPOnIngressNetwork(vip) && networkallocator.IsIngressNetworkNeeded(s) {
|
||||
continue vipLoop
|
||||
}
|
||||
for _, net := range specNetworks {
|
||||
if vip.NetworkID == net.Target {
|
||||
continue vipLoop
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// If the endpoint mode is DNSRR and allocator has the service
|
||||
// in VIP allocated set then we return to be allocated to make
|
||||
// sure the allocator triggers networkallocator to free up the
|
||||
// resources if any.
|
||||
if s.Spec.Endpoint != nil && s.Spec.Endpoint.Mode == api.ResolutionModeDNSRoundRobin {
|
||||
if _, ok := na.services[s.ID]; ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if (s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0) ||
|
||||
(s.Endpoint != nil && len(s.Endpoint.Ports) != 0) {
|
||||
return na.portAllocator.isPortsAllocatedOnInit(s, options.OnInit)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsNodeAllocated returns if the passed node has its network resources allocated or not.
|
||||
func (na *cnmNetworkAllocator) IsNodeAllocated(node *api.Node) bool {
|
||||
// If the node is not found in the allocated set, then it is
|
||||
// not allocated.
|
||||
if _, ok := na.nodes[node.ID]; !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// If no attachment, not allocated.
|
||||
if node.Attachment == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the network is not allocated, the node cannot be allocated.
|
||||
localNet, ok := na.networks[node.Attachment.Network.ID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Addresses empty, not allocated.
|
||||
if len(node.Attachment.Addresses) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// The allocated IP address not found in local endpoint state. Not allocated.
|
||||
if _, ok := localNet.endpoints[node.Attachment.Addresses[0]]; !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// AllocateNode allocates the IP addresses for the network to which
|
||||
// the node is attached.
|
||||
func (na *cnmNetworkAllocator) AllocateNode(node *api.Node) error {
|
||||
if err := na.allocateNetworkIPs(node.Attachment); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
na.nodes[node.ID] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeallocateNode deallocates the IP addresses for the network to
|
||||
// which the node is attached.
|
||||
func (na *cnmNetworkAllocator) DeallocateNode(node *api.Node) error {
|
||||
delete(na.nodes, node.ID)
|
||||
return na.releaseEndpoints([]*api.NetworkAttachment{node.Attachment})
|
||||
}
|
||||
|
||||
// AllocateTask allocates all the endpoint resources for all the
|
||||
// networks that a task is attached to.
|
||||
func (na *cnmNetworkAllocator) AllocateTask(t *api.Task) error {
|
||||
for i, nAttach := range t.Networks {
|
||||
if localNet := na.getNetwork(nAttach.Network.ID); localNet != nil && localNet.isNodeLocal {
|
||||
continue
|
||||
}
|
||||
if err := na.allocateNetworkIPs(nAttach); err != nil {
|
||||
if err := na.releaseEndpoints(t.Networks[:i]); err != nil {
|
||||
log.G(context.TODO()).WithError(err).Errorf("Failed to release IP addresses while rolling back allocation for task %s network %s", t.ID, nAttach.Network.ID)
|
||||
}
|
||||
return errors.Wrapf(err, "failed to allocate network IP for task %s network %s", t.ID, nAttach.Network.ID)
|
||||
}
|
||||
}
|
||||
|
||||
na.tasks[t.ID] = struct{}{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeallocateTask releases all the endpoint resources for all the
|
||||
// networks that a task is attached to.
|
||||
func (na *cnmNetworkAllocator) DeallocateTask(t *api.Task) error {
|
||||
delete(na.tasks, t.ID)
|
||||
return na.releaseEndpoints(t.Networks)
|
||||
}
|
||||
|
||||
func (na *cnmNetworkAllocator) releaseEndpoints(networks []*api.NetworkAttachment) error {
|
||||
for _, nAttach := range networks {
|
||||
localNet := na.getNetwork(nAttach.Network.ID)
|
||||
if localNet == nil {
|
||||
return fmt.Errorf("could not find network allocator state for network %s", nAttach.Network.ID)
|
||||
}
|
||||
|
||||
if localNet.isNodeLocal {
|
||||
continue
|
||||
}
|
||||
|
||||
ipam, _, _, err := na.resolveIPAM(nAttach.Network)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to resolve IPAM while releasing")
|
||||
}
|
||||
|
||||
// Do not fail and bail out if we fail to release IP
|
||||
// address here. Keep going and try releasing as many
|
||||
// addresses as possible.
|
||||
for _, addr := range nAttach.Addresses {
|
||||
// Retrieve the poolID and immediately nuke
|
||||
// out the mapping.
|
||||
poolID := localNet.endpoints[addr]
|
||||
delete(localNet.endpoints, addr)
|
||||
|
||||
ip, _, err := net.ParseCIDR(addr)
|
||||
if err != nil {
|
||||
log.G(context.TODO()).Errorf("Could not parse IP address %s while releasing", addr)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := ipam.ReleaseAddress(poolID, ip); err != nil {
|
||||
log.G(context.TODO()).WithError(err).Errorf("IPAM failure while releasing IP address %s", addr)
|
||||
}
|
||||
}
|
||||
|
||||
// Clear out the address list when we are done with
|
||||
// this network.
|
||||
nAttach.Addresses = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// allocate virtual IP for a single endpoint attachment of the service.
|
||||
func (na *cnmNetworkAllocator) allocateVIP(vip *api.Endpoint_VirtualIP) error {
|
||||
localNet := na.getNetwork(vip.NetworkID)
|
||||
if localNet == nil {
|
||||
return errors.New("networkallocator: could not find local network state")
|
||||
}
|
||||
|
||||
if localNet.isNodeLocal {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If this IP is already allocated in memory we don't need to
|
||||
// do anything.
|
||||
if _, ok := localNet.endpoints[vip.Addr]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
ipam, _, _, err := na.resolveIPAM(localNet.nw)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to resolve IPAM while allocating")
|
||||
}
|
||||
|
||||
var addr net.IP
|
||||
if vip.Addr != "" {
|
||||
var err error
|
||||
|
||||
addr, _, err = net.ParseCIDR(vip.Addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, poolID := range localNet.pools {
|
||||
ip, _, err := ipam.RequestAddress(poolID, addr, nil)
|
||||
if err != nil && err != ipamapi.ErrNoAvailableIPs && err != ipamapi.ErrIPOutOfRange {
|
||||
return errors.Wrap(err, "could not allocate VIP from IPAM")
|
||||
}
|
||||
|
||||
// If we got an address then we are done.
|
||||
if err == nil {
|
||||
ipStr := ip.String()
|
||||
localNet.endpoints[ipStr] = poolID
|
||||
vip.Addr = ipStr
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("could not find an available IP while allocating VIP")
|
||||
}
|
||||
|
||||
func (na *cnmNetworkAllocator) deallocateVIP(vip *api.Endpoint_VirtualIP) error {
|
||||
localNet := na.getNetwork(vip.NetworkID)
|
||||
if localNet == nil {
|
||||
return errors.New("networkallocator: could not find local network state")
|
||||
}
|
||||
if localNet.isNodeLocal {
|
||||
return nil
|
||||
}
|
||||
ipam, _, _, err := na.resolveIPAM(localNet.nw)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to resolve IPAM while allocating")
|
||||
}
|
||||
|
||||
// Retrieve the poolID and immediately nuke
|
||||
// out the mapping.
|
||||
poolID := localNet.endpoints[vip.Addr]
|
||||
delete(localNet.endpoints, vip.Addr)
|
||||
|
||||
ip, _, err := net.ParseCIDR(vip.Addr)
|
||||
if err != nil {
|
||||
log.G(context.TODO()).Errorf("Could not parse VIP address %s while releasing", vip.Addr)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ipam.ReleaseAddress(poolID, ip); err != nil {
|
||||
log.G(context.TODO()).WithError(err).Errorf("IPAM failure while releasing VIP address %s", vip.Addr)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// allocate the IP addresses for a single network attachment of the task.
|
||||
func (na *cnmNetworkAllocator) allocateNetworkIPs(nAttach *api.NetworkAttachment) error {
|
||||
var ip *net.IPNet
|
||||
|
||||
ipam, _, _, err := na.resolveIPAM(nAttach.Network)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to resolve IPAM while allocating")
|
||||
}
|
||||
|
||||
localNet := na.getNetwork(nAttach.Network.ID)
|
||||
if localNet == nil {
|
||||
return fmt.Errorf("could not find network allocator state for network %s", nAttach.Network.ID)
|
||||
}
|
||||
|
||||
addresses := nAttach.Addresses
|
||||
if len(addresses) == 0 {
|
||||
addresses = []string{""}
|
||||
}
|
||||
|
||||
for i, rawAddr := range addresses {
|
||||
var addr net.IP
|
||||
if rawAddr != "" {
|
||||
var err error
|
||||
addr, _, err = net.ParseCIDR(rawAddr)
|
||||
if err != nil {
|
||||
addr = net.ParseIP(rawAddr)
|
||||
|
||||
if addr == nil {
|
||||
return errors.Wrapf(err, "could not parse address string %s", rawAddr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, poolID := range localNet.pools {
|
||||
var err error
|
||||
|
||||
ip, _, err = ipam.RequestAddress(poolID, addr, nil)
|
||||
if err != nil && err != ipamapi.ErrNoAvailableIPs && err != ipamapi.ErrIPOutOfRange {
|
||||
return errors.Wrap(err, "could not allocate IP from IPAM")
|
||||
}
|
||||
|
||||
// If we got an address then we are done.
|
||||
if err == nil {
|
||||
ipStr := ip.String()
|
||||
localNet.endpoints[ipStr] = poolID
|
||||
addresses[i] = ipStr
|
||||
nAttach.Addresses = addresses
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("could not find an available IP")
|
||||
}
|
||||
|
||||
func (na *cnmNetworkAllocator) freeDriverState(n *api.Network) error {
|
||||
d, err := na.resolveDriver(n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.driver.NetworkFree(n.ID)
|
||||
}
|
||||
|
||||
func (na *cnmNetworkAllocator) allocateDriverState(n *api.Network) error {
|
||||
d, err := na.resolveDriver(n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := make(map[string]string)
|
||||
// reconcile the driver specific options from the network spec
|
||||
// and from the operational state retrieved from the store
|
||||
if n.Spec.DriverConfig != nil {
|
||||
for k, v := range n.Spec.DriverConfig.Options {
|
||||
options[k] = v
|
||||
}
|
||||
}
|
||||
if n.DriverState != nil {
|
||||
for k, v := range n.DriverState.Options {
|
||||
options[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Construct IPAM data for driver consumption.
|
||||
ipv4Data := make([]driverapi.IPAMData, 0, len(n.IPAM.Configs))
|
||||
for _, ic := range n.IPAM.Configs {
|
||||
if ic.Family == api.IPAMConfig_IPV6 {
|
||||
continue
|
||||
}
|
||||
|
||||
_, subnet, err := net.ParseCIDR(ic.Subnet)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing subnet %s while allocating driver state", ic.Subnet)
|
||||
}
|
||||
|
||||
gwIP := net.ParseIP(ic.Gateway)
|
||||
gwNet := &net.IPNet{
|
||||
IP: gwIP,
|
||||
Mask: subnet.Mask,
|
||||
}
|
||||
|
||||
data := driverapi.IPAMData{
|
||||
Pool: subnet,
|
||||
Gateway: gwNet,
|
||||
}
|
||||
|
||||
ipv4Data = append(ipv4Data, data)
|
||||
}
|
||||
|
||||
ds, err := d.driver.NetworkAllocate(n.ID, options, ipv4Data, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update network object with the obtained driver state.
|
||||
n.DriverState = &api.Driver{
|
||||
Name: d.name,
|
||||
Options: ds,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resolve network driver
|
||||
func (na *cnmNetworkAllocator) resolveDriver(n *api.Network) (*networkDriver, error) {
|
||||
dName := DefaultDriver
|
||||
if n.Spec.DriverConfig != nil && n.Spec.DriverConfig.Name != "" {
|
||||
dName = n.Spec.DriverConfig.Name
|
||||
}
|
||||
|
||||
d, drvcap := na.drvRegistry.Driver(dName)
|
||||
if d == nil {
|
||||
var err error
|
||||
err = na.loadDriver(dName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, drvcap = na.drvRegistry.Driver(dName)
|
||||
if d == nil {
|
||||
return nil, fmt.Errorf("could not resolve network driver %s", dName)
|
||||
}
|
||||
}
|
||||
|
||||
return &networkDriver{driver: d, capability: drvcap, name: dName}, nil
|
||||
}
|
||||
|
||||
func (na *cnmNetworkAllocator) loadDriver(name string) error {
|
||||
pg := na.drvRegistry.GetPluginGetter()
|
||||
if pg == nil {
|
||||
return errors.New("plugin store is uninitialized")
|
||||
}
|
||||
_, err := pg.Get(name, driverapi.NetworkPluginEndpointType, plugingetter.Lookup)
|
||||
return err
|
||||
}
|
||||
|
||||
// Resolve the IPAM driver
|
||||
func (na *cnmNetworkAllocator) resolveIPAM(n *api.Network) (ipamapi.Ipam, string, map[string]string, error) {
|
||||
dName := ipamapi.DefaultIPAM
|
||||
if n.Spec.IPAM != nil && n.Spec.IPAM.Driver != nil && n.Spec.IPAM.Driver.Name != "" {
|
||||
dName = n.Spec.IPAM.Driver.Name
|
||||
}
|
||||
|
||||
var dOptions map[string]string
|
||||
if n.Spec.IPAM != nil && n.Spec.IPAM.Driver != nil && len(n.Spec.IPAM.Driver.Options) != 0 {
|
||||
dOptions = n.Spec.IPAM.Driver.Options
|
||||
}
|
||||
|
||||
ipam, _ := na.drvRegistry.IPAM(dName)
|
||||
if ipam == nil {
|
||||
return nil, "", nil, fmt.Errorf("could not resolve IPAM driver %s", dName)
|
||||
}
|
||||
|
||||
return ipam, dName, dOptions, nil
|
||||
}
|
||||
|
||||
func (na *cnmNetworkAllocator) freePools(n *api.Network, pools map[string]string) error {
|
||||
ipam, _, _, err := na.resolveIPAM(n)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to resolve IPAM while freeing pools for network %s", n.ID)
|
||||
}
|
||||
|
||||
releasePools(ipam, n.IPAM.Configs, pools)
|
||||
return nil
|
||||
}
|
||||
|
||||
func releasePools(ipam ipamapi.Ipam, icList []*api.IPAMConfig, pools map[string]string) {
|
||||
for _, ic := range icList {
|
||||
if err := ipam.ReleaseAddress(pools[ic.Subnet], net.ParseIP(ic.Gateway)); err != nil {
|
||||
log.G(context.TODO()).WithError(err).Errorf("Failed to release address %s", ic.Subnet)
|
||||
}
|
||||
}
|
||||
|
||||
for k, p := range pools {
|
||||
if err := ipam.ReleasePool(p); err != nil {
|
||||
log.G(context.TODO()).WithError(err).Errorf("Failed to release pool %s", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (na *cnmNetworkAllocator) allocatePools(n *api.Network) (map[string]string, error) {
|
||||
ipam, dName, dOptions, err := na.resolveIPAM(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We don't support user defined address spaces yet so just
|
||||
// retrieve default address space names for the driver.
|
||||
_, asName, err := na.drvRegistry.IPAMDefaultAddressSpaces(dName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pools := make(map[string]string)
|
||||
|
||||
var ipamConfigs []*api.IPAMConfig
|
||||
|
||||
// If there is non-nil IPAM state always prefer those subnet
|
||||
// configs over Spec configs.
|
||||
if n.IPAM != nil {
|
||||
ipamConfigs = n.IPAM.Configs
|
||||
} else if n.Spec.IPAM != nil {
|
||||
ipamConfigs = make([]*api.IPAMConfig, len(n.Spec.IPAM.Configs))
|
||||
copy(ipamConfigs, n.Spec.IPAM.Configs)
|
||||
}
|
||||
|
||||
// Append an empty slot for subnet allocation if there are no
|
||||
// IPAM configs from either spec or state.
|
||||
if len(ipamConfigs) == 0 {
|
||||
ipamConfigs = append(ipamConfigs, &api.IPAMConfig{Family: api.IPAMConfig_IPV4})
|
||||
}
|
||||
|
||||
// Update the runtime IPAM configurations with initial state
|
||||
n.IPAM = &api.IPAMOptions{
|
||||
Driver: &api.Driver{Name: dName, Options: dOptions},
|
||||
Configs: ipamConfigs,
|
||||
}
|
||||
|
||||
for i, ic := range ipamConfigs {
|
||||
poolID, poolIP, meta, err := ipam.RequestPool(asName, ic.Subnet, ic.Range, dOptions, false)
|
||||
if err != nil {
|
||||
// Rollback by releasing all the resources allocated so far.
|
||||
releasePools(ipam, ipamConfigs[:i], pools)
|
||||
return nil, err
|
||||
}
|
||||
pools[poolIP.String()] = poolID
|
||||
|
||||
// The IPAM contract allows the IPAM driver to autonomously
|
||||
// provide a network gateway in response to the pool request.
|
||||
// But if the network spec contains a gateway, we will allocate
|
||||
// it irrespective of whether the ipam driver returned one already.
|
||||
// If none of the above is true, we need to allocate one now, and
|
||||
// let the driver know this request is for the network gateway.
|
||||
var (
|
||||
gwIP *net.IPNet
|
||||
ip net.IP
|
||||
)
|
||||
if gws, ok := meta[netlabel.Gateway]; ok {
|
||||
if ip, gwIP, err = net.ParseCIDR(gws); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse gateway address (%v) returned by ipam driver: %v", gws, err)
|
||||
}
|
||||
gwIP.IP = ip
|
||||
}
|
||||
if ic.Gateway != "" || gwIP == nil {
|
||||
gwIP, _, err = ipam.RequestAddress(poolID, net.ParseIP(ic.Gateway), map[string]string{ipamapi.RequestAddressType: netlabel.Gateway})
|
||||
if err != nil {
|
||||
// Rollback by releasing all the resources allocated so far.
|
||||
releasePools(ipam, ipamConfigs[:i], pools)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if ic.Subnet == "" {
|
||||
ic.Subnet = poolIP.String()
|
||||
}
|
||||
|
||||
if ic.Gateway == "" {
|
||||
ic.Gateway = gwIP.IP.String()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return pools, nil
|
||||
}
|
||||
|
||||
func initializeDrivers(reg *drvregistry.DrvRegistry) error {
|
||||
for _, i := range initializers {
|
||||
if err := reg.AddDriver(i.ntype, i.fn, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func serviceNetworks(s *api.Service) []*api.NetworkAttachmentConfig {
|
||||
// Always prefer NetworkAttachmentConfig in the TaskSpec
|
||||
if len(s.Spec.Task.Networks) == 0 && len(s.Spec.Networks) != 0 {
|
||||
return s.Spec.Networks
|
||||
}
|
||||
return s.Spec.Task.Networks
|
||||
}
|
||||
|
||||
// IsVIPOnIngressNetwork check if the vip is in ingress network
|
||||
func (na *cnmNetworkAllocator) IsVIPOnIngressNetwork(vip *api.Endpoint_VirtualIP) bool {
|
||||
if vip == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
localNet := na.getNetwork(vip.NetworkID)
|
||||
if localNet != nil && localNet.nw != nil {
|
||||
return networkallocator.IsIngressNetwork(localNet.nw)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsBuiltInDriver returns whether the passed driver is an internal network driver
|
||||
func IsBuiltInDriver(name string) bool {
|
||||
n := strings.ToLower(name)
|
||||
for _, d := range initializers {
|
||||
if n == d.ntype {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package networkallocator
|
||||
package cnmallocator
|
||||
|
||||
import (
|
||||
"fmt"
|
286
vendor/github.com/docker/swarmkit/manager/allocator/network.go
generated
vendored
286
vendor/github.com/docker/swarmkit/manager/allocator/network.go
generated
vendored
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/docker/go-events"
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/log"
|
||||
"github.com/docker/swarmkit/manager/allocator/cnmallocator"
|
||||
"github.com/docker/swarmkit/manager/allocator/networkallocator"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
"github.com/docker/swarmkit/manager/state/store"
|
||||
|
@ -34,7 +35,7 @@ type networkContext struct {
|
|||
ingressNetwork *api.Network
|
||||
// Instance of the low-level network allocator which performs
|
||||
// the actual network allocation.
|
||||
nwkAllocator *networkallocator.NetworkAllocator
|
||||
nwkAllocator networkallocator.NetworkAllocator
|
||||
|
||||
// A set of tasks which are ready to be allocated as a batch. This is
|
||||
// distinct from "unallocatedTasks" which are tasks that failed to
|
||||
|
@ -61,7 +62,7 @@ type networkContext struct {
|
|||
}
|
||||
|
||||
func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
||||
na, err := networkallocator.New(a.pluginGetter)
|
||||
na, err := cnmallocator.New(a.pluginGetter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -145,110 +146,31 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
|||
log.G(ctx).WithError(err).Error("failed committing allocation of networks during init")
|
||||
}
|
||||
|
||||
// Allocate nodes in the store so far before we process watched events,
|
||||
// if the ingress network is present.
|
||||
// First, allocate objects that already have addresses associated with
|
||||
// them, to reserve these IP addresses in internal state.
|
||||
if nc.ingressNetwork != nil {
|
||||
if err := a.allocateNodes(ctx); err != nil {
|
||||
if err := a.allocateNodes(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate services in the store so far before we process watched events.
|
||||
var services []*api.Service
|
||||
a.store.View(func(tx store.ReadTx) {
|
||||
services, err = store.FindServices(tx, store.All)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error listing all services in store while trying to allocate during init")
|
||||
if err := a.allocateServices(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.allocateTasks(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var allocatedServices []*api.Service
|
||||
for _, s := range services {
|
||||
if !nc.nwkAllocator.ServiceNeedsAllocation(s, networkallocator.OnInit) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := a.allocateService(ctx, s); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed allocating service %s during init", s.ID)
|
||||
continue
|
||||
}
|
||||
allocatedServices = append(allocatedServices, s)
|
||||
}
|
||||
|
||||
if err := a.store.Batch(func(batch *store.Batch) error {
|
||||
for _, s := range allocatedServices {
|
||||
if err := a.commitAllocatedService(ctx, batch, s); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed committing allocation of service %s during init", s.ID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed committing allocation of services during init")
|
||||
}
|
||||
|
||||
// Allocate tasks in the store so far before we started watching.
|
||||
var (
|
||||
tasks []*api.Task
|
||||
allocatedTasks []*api.Task
|
||||
)
|
||||
a.store.View(func(tx store.ReadTx) {
|
||||
tasks, err = store.FindTasks(tx, store.All)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error listing all tasks in store while trying to allocate during init")
|
||||
}
|
||||
|
||||
for _, t := range tasks {
|
||||
if t.Status.State > api.TaskStateRunning {
|
||||
continue
|
||||
}
|
||||
|
||||
var s *api.Service
|
||||
if t.ServiceID != "" {
|
||||
a.store.View(func(tx store.ReadTx) {
|
||||
s = store.GetService(tx, t.ServiceID)
|
||||
})
|
||||
}
|
||||
|
||||
// Populate network attachments in the task
|
||||
// based on service spec.
|
||||
a.taskCreateNetworkAttachments(t, s)
|
||||
|
||||
if taskReadyForNetworkVote(t, s, nc) {
|
||||
if t.Status.State >= api.TaskStatePending {
|
||||
continue
|
||||
}
|
||||
|
||||
if a.taskAllocateVote(networkVoter, t.ID) {
|
||||
// If the task is not attached to any network, network
|
||||
// allocators job is done. Immediately cast a vote so
|
||||
// that the task can be moved to the PENDING state as
|
||||
// soon as possible.
|
||||
updateTaskStatus(t, api.TaskStatePending, allocatedStatusMessage)
|
||||
allocatedTasks = append(allocatedTasks, t)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err := a.allocateTask(ctx, t)
|
||||
if err == nil {
|
||||
allocatedTasks = append(allocatedTasks, t)
|
||||
} else if err != errNoChanges {
|
||||
log.G(ctx).WithError(err).Errorf("failed allocating task %s during init", t.ID)
|
||||
nc.unallocatedTasks[t.ID] = t
|
||||
// Now allocate objects that don't have addresses yet.
|
||||
if nc.ingressNetwork != nil {
|
||||
if err := a.allocateNodes(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := a.store.Batch(func(batch *store.Batch) error {
|
||||
for _, t := range allocatedTasks {
|
||||
if err := a.commitAllocatedTask(ctx, batch, t); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed committing allocation of task %s during init", t.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed committing allocation of tasks during init")
|
||||
if err := a.allocateServices(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.allocateTasks(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -283,7 +205,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
|
||||
if IsIngressNetwork(n) {
|
||||
nc.ingressNetwork = n
|
||||
err := a.allocateNodes(ctx)
|
||||
err := a.allocateNodes(ctx, false)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error(err)
|
||||
}
|
||||
|
@ -317,7 +239,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
break
|
||||
}
|
||||
|
||||
if !nc.nwkAllocator.ServiceNeedsAllocation(s) {
|
||||
if nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -345,7 +267,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
break
|
||||
}
|
||||
|
||||
if !nc.nwkAllocator.ServiceNeedsAllocation(s) {
|
||||
if nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
if !nc.nwkAllocator.HostPublishPortsNeedUpdate(s) {
|
||||
break
|
||||
}
|
||||
|
@ -368,7 +290,7 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
|||
case api.EventDeleteService:
|
||||
s := v.Service.Copy()
|
||||
|
||||
if err := nc.nwkAllocator.ServiceDeallocate(s); err != nil {
|
||||
if err := nc.nwkAllocator.DeallocateService(s); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("Failed deallocation during delete of service %s", s.ID)
|
||||
}
|
||||
|
||||
|
@ -455,7 +377,7 @@ func (a *Allocator) doNodeAlloc(ctx context.Context, ev events.Event) {
|
|||
}
|
||||
}
|
||||
|
||||
func (a *Allocator) allocateNodes(ctx context.Context) error {
|
||||
func (a *Allocator) allocateNodes(ctx context.Context, existingAddressesOnly bool) error {
|
||||
// Allocate nodes in the store so far before we process watched events.
|
||||
var (
|
||||
allocatedNodes []*api.Node
|
||||
|
@ -480,6 +402,10 @@ func (a *Allocator) allocateNodes(ctx context.Context) error {
|
|||
node.Attachment = &api.NetworkAttachment{}
|
||||
}
|
||||
|
||||
if existingAddressesOnly && len(node.Attachment.Addresses) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
node.Attachment.Network = nc.ingressNetwork.Copy()
|
||||
if err := a.allocateNode(ctx, node); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s", node.ID)
|
||||
|
@ -534,6 +460,138 @@ func (a *Allocator) deallocateNodes(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// allocateServices allocates services in the store so far before we process
|
||||
// watched events.
|
||||
func (a *Allocator) allocateServices(ctx context.Context, existingAddressesOnly bool) error {
|
||||
var (
|
||||
nc = a.netCtx
|
||||
services []*api.Service
|
||||
err error
|
||||
)
|
||||
a.store.View(func(tx store.ReadTx) {
|
||||
services, err = store.FindServices(tx, store.All)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error listing all services in store while trying to allocate during init")
|
||||
}
|
||||
|
||||
var allocatedServices []*api.Service
|
||||
for _, s := range services {
|
||||
if nc.nwkAllocator.IsServiceAllocated(s, networkallocator.OnInit) {
|
||||
continue
|
||||
}
|
||||
|
||||
if existingAddressesOnly &&
|
||||
(s.Endpoint == nil ||
|
||||
len(s.Endpoint.VirtualIPs) == 0) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := a.allocateService(ctx, s); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed allocating service %s during init", s.ID)
|
||||
continue
|
||||
}
|
||||
allocatedServices = append(allocatedServices, s)
|
||||
}
|
||||
|
||||
if err := a.store.Batch(func(batch *store.Batch) error {
|
||||
for _, s := range allocatedServices {
|
||||
if err := a.commitAllocatedService(ctx, batch, s); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed committing allocation of service %s during init", s.ID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed committing allocation of services during init")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// allocateTasks allocates tasks in the store so far before we started watching.
|
||||
func (a *Allocator) allocateTasks(ctx context.Context, existingAddressesOnly bool) error {
|
||||
var (
|
||||
nc = a.netCtx
|
||||
tasks []*api.Task
|
||||
allocatedTasks []*api.Task
|
||||
err error
|
||||
)
|
||||
a.store.View(func(tx store.ReadTx) {
|
||||
tasks, err = store.FindTasks(tx, store.All)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error listing all tasks in store while trying to allocate during init")
|
||||
}
|
||||
|
||||
for _, t := range tasks {
|
||||
if t.Status.State > api.TaskStateRunning {
|
||||
continue
|
||||
}
|
||||
|
||||
if existingAddressesOnly {
|
||||
hasAddresses := false
|
||||
for _, nAttach := range t.Networks {
|
||||
if len(nAttach.Addresses) != 0 {
|
||||
hasAddresses = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasAddresses {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var s *api.Service
|
||||
if t.ServiceID != "" {
|
||||
a.store.View(func(tx store.ReadTx) {
|
||||
s = store.GetService(tx, t.ServiceID)
|
||||
})
|
||||
}
|
||||
|
||||
// Populate network attachments in the task
|
||||
// based on service spec.
|
||||
a.taskCreateNetworkAttachments(t, s)
|
||||
|
||||
if taskReadyForNetworkVote(t, s, nc) {
|
||||
if t.Status.State >= api.TaskStatePending {
|
||||
continue
|
||||
}
|
||||
|
||||
if a.taskAllocateVote(networkVoter, t.ID) {
|
||||
// If the task is not attached to any network, network
|
||||
// allocators job is done. Immediately cast a vote so
|
||||
// that the task can be moved to the PENDING state as
|
||||
// soon as possible.
|
||||
updateTaskStatus(t, api.TaskStatePending, allocatedStatusMessage)
|
||||
allocatedTasks = append(allocatedTasks, t)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err := a.allocateTask(ctx, t)
|
||||
if err == nil {
|
||||
allocatedTasks = append(allocatedTasks, t)
|
||||
} else if err != errNoChanges {
|
||||
log.G(ctx).WithError(err).Errorf("failed allocating task %s during init", t.ID)
|
||||
nc.unallocatedTasks[t.ID] = t
|
||||
}
|
||||
}
|
||||
|
||||
if err := a.store.Batch(func(batch *store.Batch) error {
|
||||
for _, t := range allocatedTasks {
|
||||
if err := a.commitAllocatedTask(ctx, batch, t); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed committing allocation of task %s during init", t.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed committing allocation of tasks during init")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// taskReadyForNetworkVote checks if the task is ready for a network
|
||||
// vote to move it to PENDING state.
|
||||
func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bool {
|
||||
|
@ -544,7 +602,7 @@ func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bo
|
|||
// network configured or service endpoints have been
|
||||
// allocated.
|
||||
return (len(t.Networks) == 0 || nc.nwkAllocator.IsTaskAllocated(t)) &&
|
||||
(s == nil || !nc.nwkAllocator.ServiceNeedsAllocation(s))
|
||||
(s == nil || nc.nwkAllocator.IsServiceAllocated(s))
|
||||
}
|
||||
|
||||
func taskUpdateNetworks(t *api.Task, networks []*api.NetworkAttachment) {
|
||||
|
@ -774,12 +832,12 @@ func (a *Allocator) allocateService(ctx context.Context, s *api.Service) error {
|
|||
} else if s.Endpoint != nil {
|
||||
// service has no user-defined endpoints while has already allocated network resources,
|
||||
// need deallocated.
|
||||
if err := nc.nwkAllocator.ServiceDeallocate(s); err != nil {
|
||||
if err := nc.nwkAllocator.DeallocateService(s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := nc.nwkAllocator.ServiceAllocate(s); err != nil {
|
||||
if err := nc.nwkAllocator.AllocateService(s); err != nil {
|
||||
nc.unallocatedServices[s.ID] = s
|
||||
return err
|
||||
}
|
||||
|
@ -814,7 +872,7 @@ func (a *Allocator) commitAllocatedService(ctx context.Context, batch *store.Bat
|
|||
|
||||
return errors.Wrapf(err, "failed updating state in store transaction for service %s", s.ID)
|
||||
}); err != nil {
|
||||
if err := a.netCtx.nwkAllocator.ServiceDeallocate(s); err != nil {
|
||||
if err := a.netCtx.nwkAllocator.DeallocateService(s); err != nil {
|
||||
log.G(ctx).WithError(err).Errorf("failed rolling back allocation of service %s", s.ID)
|
||||
}
|
||||
|
||||
|
@ -869,7 +927,7 @@ func (a *Allocator) allocateTask(ctx context.Context, t *api.Task) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
if nc.nwkAllocator.ServiceNeedsAllocation(s) {
|
||||
if !nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
err = fmt.Errorf("service %s to which this task %s belongs has pending allocations", s.ID, t.ID)
|
||||
return
|
||||
}
|
||||
|
@ -986,7 +1044,7 @@ func (a *Allocator) procUnallocatedServices(ctx context.Context) {
|
|||
nc := a.netCtx
|
||||
var allocatedServices []*api.Service
|
||||
for _, s := range nc.unallocatedServices {
|
||||
if nc.nwkAllocator.ServiceNeedsAllocation(s) {
|
||||
if !nc.nwkAllocator.IsServiceAllocated(s) {
|
||||
if err := a.allocateService(ctx, s); err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("Failed allocation of unallocated service %s", s.ID)
|
||||
continue
|
||||
|
@ -1071,6 +1129,16 @@ func (a *Allocator) procTasksNetwork(ctx context.Context, onRetry bool) {
|
|||
}
|
||||
}
|
||||
|
||||
// IsBuiltInNetworkDriver returns whether the passed driver is an internal network driver
|
||||
func IsBuiltInNetworkDriver(name string) bool {
|
||||
return cnmallocator.IsBuiltInDriver(name)
|
||||
}
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures for a given network model
|
||||
func PredefinedNetworks() []networkallocator.PredefinedNetworkData {
|
||||
return cnmallocator.PredefinedNetworks()
|
||||
}
|
||||
|
||||
// updateTaskStatus sets TaskStatus and updates timestamp.
|
||||
func updateTaskStatus(t *api.Task, newStatus api.TaskState, message string) {
|
||||
t.Status.State = newStatus
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
// +build !linux,!darwin,!windows
|
||||
|
||||
package networkallocator
|
||||
|
||||
const initializers = nil
|
||||
|
||||
// PredefinedNetworks returns the list of predefined network structures
|
||||
func PredefinedNetworks() []PredefinedNetworkData {
|
||||
return nil
|
||||
}
|
1018
vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go
generated
vendored
1018
vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go
generated
vendored
File diff suppressed because it is too large
Load diff
8
vendor/github.com/docker/swarmkit/manager/controlapi/common.go
generated
vendored
8
vendor/github.com/docker/swarmkit/manager/controlapi/common.go
generated
vendored
|
@ -5,9 +5,10 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/libnetwork/driverapi"
|
||||
"github.com/docker/libnetwork/ipamapi"
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/manager/allocator/networkallocator"
|
||||
"github.com/docker/swarmkit/manager/allocator"
|
||||
"github.com/docker/swarmkit/manager/state/store"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -110,10 +111,11 @@ func validateDriver(driver *api.Driver, pg plugingetter.PluginGetter, pluginType
|
|||
if strings.ToLower(driver.Name) == ipamapi.DefaultIPAM {
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
if networkallocator.IsBuiltInDriver(driver.Name) {
|
||||
case driverapi.NetworkPluginEndpointType:
|
||||
if allocator.IsBuiltInNetworkDriver(driver.Name) {
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
if pg == nil {
|
||||
|
|
4
vendor/github.com/docker/swarmkit/manager/controlapi/service.go
generated
vendored
4
vendor/github.com/docker/swarmkit/manager/controlapi/service.go
generated
vendored
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/api/defaults"
|
||||
"github.com/docker/swarmkit/api/genericresource"
|
||||
"github.com/docker/swarmkit/api/naming"
|
||||
"github.com/docker/swarmkit/identity"
|
||||
"github.com/docker/swarmkit/manager/allocator"
|
||||
|
@ -42,6 +43,9 @@ func validateResources(r *api.Resources) error {
|
|||
if r.MemoryBytes != 0 && r.MemoryBytes < 4*1024*1024 {
|
||||
return grpc.Errorf(codes.InvalidArgument, "invalid memory value %d: Must be at least 4MiB", r.MemoryBytes)
|
||||
}
|
||||
if err := genericresource.ValidateTask(r); err != nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
12
vendor/github.com/docker/swarmkit/manager/manager.go
generated
vendored
12
vendor/github.com/docker/swarmkit/manager/manager.go
generated
vendored
|
@ -217,7 +217,6 @@ func New(config *Config) (*Manager, error) {
|
|||
|
||||
m := &Manager{
|
||||
config: *config,
|
||||
collector: metrics.NewCollector(raftNode.MemoryStore()),
|
||||
caserver: ca.NewServer(raftNode.MemoryStore(), config.SecurityConfig, config.RootCAPaths),
|
||||
dispatcher: dispatcher.New(raftNode, dispatcher.DefaultConfig()),
|
||||
logbroker: logbroker.New(raftNode.MemoryStore()),
|
||||
|
@ -502,12 +501,16 @@ func (m *Manager) Run(parent context.Context) error {
|
|||
healthServer.SetServingStatus("Raft", api.HealthCheckResponse_SERVING)
|
||||
|
||||
if err := m.raftNode.JoinAndStart(ctx); err != nil {
|
||||
// Don't block future calls to Stop.
|
||||
close(m.started)
|
||||
return errors.Wrap(err, "can't initialize raft node")
|
||||
}
|
||||
|
||||
localHealthServer.SetServingStatus("ControlAPI", api.HealthCheckResponse_SERVING)
|
||||
|
||||
// Start metrics collection.
|
||||
|
||||
m.collector = metrics.NewCollector(m.raftNode.MemoryStore())
|
||||
go func(collector *metrics.Collector) {
|
||||
if err := collector.Run(ctx); err != nil {
|
||||
log.G(ctx).WithError(err).Error("collector failed with an error")
|
||||
|
@ -590,7 +593,10 @@ func (m *Manager) Stop(ctx context.Context, clearData bool) {
|
|||
|
||||
m.raftNode.Cancel()
|
||||
|
||||
m.collector.Stop()
|
||||
if m.collector != nil {
|
||||
m.collector.Stop()
|
||||
}
|
||||
|
||||
m.dispatcher.Stop()
|
||||
m.logbroker.Stop()
|
||||
m.caserver.Stop()
|
||||
|
@ -944,7 +950,7 @@ func (m *Manager) becomeLeader(ctx context.Context) {
|
|||
// in order to allow running services on the predefined docker
|
||||
// networks like `bridge` and `host`.
|
||||
log.G(ctx).Info("Creating node-local predefined networks")
|
||||
for _, p := range networkallocator.PredefinedNetworks() {
|
||||
for _, p := range allocator.PredefinedNetworks() {
|
||||
if err := store.CreateNetwork(tx, newPredefinedNetwork(p.Name, p.Driver)); err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed to create predefined network " + p.Name)
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/api/genericresource"
|
||||
"github.com/docker/swarmkit/log"
|
||||
"github.com/docker/swarmkit/manager/constraint"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
|
@ -83,10 +84,11 @@ func (ce *ConstraintEnforcer) rejectNoncompliantTasks(node *api.Node) {
|
|||
log.L.WithError(err).Errorf("failed to list tasks for node ID %s", node.ID)
|
||||
}
|
||||
|
||||
var availableMemoryBytes, availableNanoCPUs int64
|
||||
available := &api.Resources{}
|
||||
var fakeStore []*api.GenericResource
|
||||
|
||||
if node.Description != nil && node.Description.Resources != nil {
|
||||
availableMemoryBytes = node.Description.Resources.MemoryBytes
|
||||
availableNanoCPUs = node.Description.Resources.NanoCPUs
|
||||
available = node.Description.Resources.Copy()
|
||||
}
|
||||
|
||||
removeTasks := make(map[string]*api.Task)
|
||||
|
@ -97,6 +99,7 @@ func (ce *ConstraintEnforcer) rejectNoncompliantTasks(node *api.Node) {
|
|||
// a separate pass over the tasks for each type of
|
||||
// resource, and sort by the size of the reservation
|
||||
// to remove the most resource-intensive tasks.
|
||||
loop:
|
||||
for _, t := range tasks {
|
||||
if t.DesiredState < api.TaskStateAssigned || t.DesiredState > api.TaskStateRunning {
|
||||
continue
|
||||
|
@ -115,16 +118,27 @@ func (ce *ConstraintEnforcer) rejectNoncompliantTasks(node *api.Node) {
|
|||
// Ensure that the task assigned to the node
|
||||
// still satisfies the resource limits.
|
||||
if t.Spec.Resources != nil && t.Spec.Resources.Reservations != nil {
|
||||
if t.Spec.Resources.Reservations.MemoryBytes > availableMemoryBytes {
|
||||
if t.Spec.Resources.Reservations.MemoryBytes > available.MemoryBytes {
|
||||
removeTasks[t.ID] = t
|
||||
continue
|
||||
}
|
||||
if t.Spec.Resources.Reservations.NanoCPUs > availableNanoCPUs {
|
||||
if t.Spec.Resources.Reservations.NanoCPUs > available.NanoCPUs {
|
||||
removeTasks[t.ID] = t
|
||||
continue
|
||||
}
|
||||
availableMemoryBytes -= t.Spec.Resources.Reservations.MemoryBytes
|
||||
availableNanoCPUs -= t.Spec.Resources.Reservations.NanoCPUs
|
||||
for _, ta := range t.AssignedGenericResources {
|
||||
// Type change or no longer available
|
||||
if genericresource.HasResource(ta, available.Generic) {
|
||||
removeTasks[t.ID] = t
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
available.MemoryBytes -= t.Spec.Resources.Reservations.MemoryBytes
|
||||
available.NanoCPUs -= t.Spec.Resources.Reservations.NanoCPUs
|
||||
|
||||
genericresource.ClaimResources(&available.Generic,
|
||||
&fakeStore, t.AssignedGenericResources)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/docker/swarmkit/manager/orchestrator/task.go
generated
vendored
2
vendor/github.com/docker/swarmkit/manager/orchestrator/task.go
generated
vendored
|
@ -63,7 +63,7 @@ func IsTaskDirty(s *api.Service, t *api.Task) bool {
|
|||
// If the spec version matches, we know the task is not dirty. However,
|
||||
// if it does not match, that doesn't mean the task is dirty, since
|
||||
// only a portion of the spec is included in the comparison.
|
||||
if t.SpecVersion != nil && *s.SpecVersion == *t.SpecVersion {
|
||||
if t.SpecVersion != nil && s.SpecVersion != nil && *s.SpecVersion == *t.SpecVersion {
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
19
vendor/github.com/docker/swarmkit/manager/scheduler/filter.go
generated
vendored
19
vendor/github.com/docker/swarmkit/manager/scheduler/filter.go
generated
vendored
|
@ -5,6 +5,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/api/genericresource"
|
||||
"github.com/docker/swarmkit/manager/constraint"
|
||||
)
|
||||
|
||||
|
@ -61,9 +62,12 @@ func (f *ResourceFilter) SetTask(t *api.Task) bool {
|
|||
if r == nil || r.Reservations == nil {
|
||||
return false
|
||||
}
|
||||
if r.Reservations.NanoCPUs == 0 && r.Reservations.MemoryBytes == 0 {
|
||||
|
||||
res := r.Reservations
|
||||
if res.NanoCPUs == 0 && res.MemoryBytes == 0 && len(res.Generic) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
f.reservations = r.Reservations
|
||||
return true
|
||||
}
|
||||
|
@ -78,6 +82,13 @@ func (f *ResourceFilter) Check(n *NodeInfo) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
for _, v := range f.reservations.Generic {
|
||||
enough, err := genericresource.HasEnough(n.AvailableResources.Generic, v)
|
||||
if err != nil || !enough {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -128,6 +139,12 @@ func (f *PluginFilter) SetTask(t *api.Task) bool {
|
|||
// Check returns true if the task can be scheduled into the given node.
|
||||
// TODO(amitshukla): investigate storing Plugins as a map so it can be easily probed
|
||||
func (f *PluginFilter) Check(n *NodeInfo) bool {
|
||||
if n.Description == nil || n.Description.Engine == nil {
|
||||
// If the node is not running Engine, plugins are not
|
||||
// supported.
|
||||
return true
|
||||
}
|
||||
|
||||
// Get list of plugins on the node
|
||||
nodePlugins := n.Description.Engine.Plugins
|
||||
|
||||
|
|
35
vendor/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go
generated
vendored
35
vendor/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/api/genericresource"
|
||||
"github.com/docker/swarmkit/log"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
@ -20,7 +21,7 @@ type NodeInfo struct {
|
|||
Tasks map[string]*api.Task
|
||||
ActiveTasksCount int
|
||||
ActiveTasksCountByService map[string]int
|
||||
AvailableResources api.Resources
|
||||
AvailableResources *api.Resources
|
||||
usedHostPorts map[hostPortSpec]struct{}
|
||||
|
||||
// recentFailures is a map from service ID to the timestamps of the
|
||||
|
@ -36,7 +37,7 @@ func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api
|
|||
Node: n,
|
||||
Tasks: make(map[string]*api.Task),
|
||||
ActiveTasksCountByService: make(map[string]int),
|
||||
AvailableResources: availableResources,
|
||||
AvailableResources: availableResources.Copy(),
|
||||
usedHostPorts: make(map[hostPortSpec]struct{}),
|
||||
recentFailures: make(map[string][]time.Time),
|
||||
}
|
||||
|
@ -44,6 +45,7 @@ func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api
|
|||
for _, t := range tasks {
|
||||
nodeInfo.addTask(t)
|
||||
}
|
||||
|
||||
return nodeInfo
|
||||
}
|
||||
|
||||
|
@ -62,8 +64,20 @@ func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool {
|
|||
}
|
||||
|
||||
reservations := taskReservations(t.Spec)
|
||||
nodeInfo.AvailableResources.MemoryBytes += reservations.MemoryBytes
|
||||
nodeInfo.AvailableResources.NanoCPUs += reservations.NanoCPUs
|
||||
resources := nodeInfo.AvailableResources
|
||||
|
||||
resources.MemoryBytes += reservations.MemoryBytes
|
||||
resources.NanoCPUs += reservations.NanoCPUs
|
||||
|
||||
if nodeInfo.Description == nil || nodeInfo.Description.Resources == nil ||
|
||||
nodeInfo.Description.Resources.Generic == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
taskAssigned := t.AssignedGenericResources
|
||||
nodeAvailableResources := &resources.Generic
|
||||
nodeRes := nodeInfo.Description.Resources.Generic
|
||||
genericresource.Reclaim(nodeAvailableResources, taskAssigned, nodeRes)
|
||||
|
||||
if t.Endpoint != nil {
|
||||
for _, port := range t.Endpoint.Ports {
|
||||
|
@ -97,9 +111,18 @@ func (nodeInfo *NodeInfo) addTask(t *api.Task) bool {
|
|||
}
|
||||
|
||||
nodeInfo.Tasks[t.ID] = t
|
||||
|
||||
reservations := taskReservations(t.Spec)
|
||||
nodeInfo.AvailableResources.MemoryBytes -= reservations.MemoryBytes
|
||||
nodeInfo.AvailableResources.NanoCPUs -= reservations.NanoCPUs
|
||||
resources := nodeInfo.AvailableResources
|
||||
|
||||
resources.MemoryBytes -= reservations.MemoryBytes
|
||||
resources.NanoCPUs -= reservations.NanoCPUs
|
||||
|
||||
// minimum size required
|
||||
t.AssignedGenericResources = make([]*api.GenericResource, 0, len(resources.Generic))
|
||||
taskAssigned := &t.AssignedGenericResources
|
||||
|
||||
genericresource.Claim(&resources.Generic, taskAssigned, reservations.Generic)
|
||||
|
||||
if t.Endpoint != nil {
|
||||
for _, port := range t.Endpoint.Ports {
|
||||
|
|
11
vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
generated
vendored
11
vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/api/genericresource"
|
||||
"github.com/docker/swarmkit/log"
|
||||
"github.com/docker/swarmkit/manager/state"
|
||||
"github.com/docker/swarmkit/manager/state/store"
|
||||
|
@ -297,15 +298,21 @@ func (s *Scheduler) deleteTask(ctx context.Context, t *api.Task) bool {
|
|||
|
||||
func (s *Scheduler) createOrUpdateNode(n *api.Node) {
|
||||
nodeInfo, _ := s.nodeSet.nodeInfo(n.ID)
|
||||
var resources api.Resources
|
||||
var resources *api.Resources
|
||||
if n.Description != nil && n.Description.Resources != nil {
|
||||
resources = *n.Description.Resources
|
||||
resources = n.Description.Resources.Copy()
|
||||
// reconcile resources by looping over all tasks in this node
|
||||
for _, task := range nodeInfo.Tasks {
|
||||
reservations := taskReservations(task.Spec)
|
||||
|
||||
resources.MemoryBytes -= reservations.MemoryBytes
|
||||
resources.NanoCPUs -= reservations.NanoCPUs
|
||||
|
||||
genericresource.ConsumeNodeResources(&resources.Generic,
|
||||
task.AssignedGenericResources)
|
||||
}
|
||||
} else {
|
||||
resources = &api.Resources{}
|
||||
}
|
||||
nodeInfo.Node = n
|
||||
nodeInfo.AvailableResources = resources
|
||||
|
|
2
vendor/github.com/docker/swarmkit/manager/state/raft/raft.go
generated
vendored
2
vendor/github.com/docker/swarmkit/manager/state/raft/raft.go
generated
vendored
|
@ -361,7 +361,7 @@ func (n *Node) JoinAndStart(ctx context.Context) (err error) {
|
|||
if err != nil {
|
||||
n.stopMu.Lock()
|
||||
// to shutdown transport
|
||||
close(n.stopped)
|
||||
n.cancelFunc()
|
||||
n.stopMu.Unlock()
|
||||
n.done()
|
||||
} else {
|
||||
|
|
59
vendor/github.com/docker/swarmkit/node/node.go
generated
vendored
59
vendor/github.com/docker/swarmkit/node/node.go
generated
vendored
|
@ -10,6 +10,7 @@ import (
|
|||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -33,6 +34,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
|
@ -344,14 +346,14 @@ func (n *Node) run(ctx context.Context) (err error) {
|
|||
log.G(ctx).WithError(err).Error("invalid new root certificate from the dispatcher")
|
||||
continue
|
||||
}
|
||||
if err := ca.SaveRootCA(newRootCA, paths.RootCA); err != nil {
|
||||
log.G(ctx).WithError(err).Error("could not save new root certificate from the dispatcher")
|
||||
continue
|
||||
}
|
||||
if err := securityConfig.UpdateRootCA(&newRootCA, newRootCA.Pool); err != nil {
|
||||
log.G(ctx).WithError(err).Error("could not use new root CA from dispatcher")
|
||||
continue
|
||||
}
|
||||
if err := ca.SaveRootCA(newRootCA, paths.RootCA); err != nil {
|
||||
log.G(ctx).WithError(err).Error("could not save new root certificate from the dispatcher")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -432,10 +434,10 @@ func (n *Node) run(ctx context.Context) (err error) {
|
|||
}()
|
||||
|
||||
wg.Wait()
|
||||
if managerErr != nil && managerErr != context.Canceled {
|
||||
if managerErr != nil && errors.Cause(managerErr) != context.Canceled {
|
||||
return managerErr
|
||||
}
|
||||
if agentErr != nil && agentErr != context.Canceled {
|
||||
if agentErr != nil && errors.Cause(agentErr) != context.Canceled {
|
||||
return agentErr
|
||||
}
|
||||
return err
|
||||
|
@ -516,7 +518,7 @@ waitPeer:
|
|||
rootCA := securityConfig.RootCA()
|
||||
issuer := securityConfig.IssuerInfo()
|
||||
|
||||
a, err := agent.New(&agent.Config{
|
||||
agentConfig := &agent.Config{
|
||||
Hostname: n.config.Hostname,
|
||||
ConnBroker: n.connBroker,
|
||||
Executor: n.config.Executor,
|
||||
|
@ -529,7 +531,14 @@ waitPeer:
|
|||
CertIssuerPublicKey: issuer.PublicKey,
|
||||
CertIssuerSubject: issuer.Subject,
|
||||
},
|
||||
})
|
||||
}
|
||||
// if a join address has been specified, then if the agent fails to connect due to a TLS error, fail fast - don't
|
||||
// keep re-trying to join
|
||||
if n.config.JoinAddr != "" {
|
||||
agentConfig.SessionTracker = &firstSessionErrorTracker{}
|
||||
}
|
||||
|
||||
a, err := agent.New(agentConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1055,3 +1064,37 @@ func (sp sortablePeers) Less(i, j int) bool { return sp[i].NodeID < sp[j].NodeID
|
|||
func (sp sortablePeers) Len() int { return len(sp) }
|
||||
|
||||
func (sp sortablePeers) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] }
|
||||
|
||||
// firstSessionErrorTracker is a utility that helps determine whether the agent should exit after
|
||||
// a TLS failure on establishing the first session. This should only happen if a join address
|
||||
// is specified. If establishing the first session succeeds, but later on some session fails
|
||||
// because of a TLS error, we don't want to exit the agent because a previously successful
|
||||
// session indicates that the TLS error may be a transient issue.
|
||||
type firstSessionErrorTracker struct {
|
||||
mu sync.Mutex
|
||||
pastFirstSession bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (fs *firstSessionErrorTracker) SessionEstablished() {
|
||||
fs.mu.Lock()
|
||||
fs.pastFirstSession = true
|
||||
fs.mu.Unlock()
|
||||
}
|
||||
|
||||
func (fs *firstSessionErrorTracker) SessionError(err error) {
|
||||
fs.mu.Lock()
|
||||
fs.err = err
|
||||
fs.mu.Unlock()
|
||||
}
|
||||
|
||||
func (fs *firstSessionErrorTracker) SessionClosed() error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
// unfortunately grpc connection errors are type grpc.rpcError, which are not exposed, and we can't get at the underlying error type
|
||||
if !fs.pastFirstSession && grpc.Code(fs.err) == codes.Internal &&
|
||||
strings.HasPrefix(grpc.ErrorDesc(fs.err), "connection error") && strings.Contains(grpc.ErrorDesc(fs.err), "transport: x509:") {
|
||||
return fs.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
7
vendor/github.com/docker/swarmkit/vendor.conf
generated
vendored
7
vendor/github.com/docker/swarmkit/vendor.conf
generated
vendored
|
@ -1,8 +1,9 @@
|
|||
# grpc and protobuf
|
||||
google.golang.org/grpc v1.0.4
|
||||
google.golang.org/grpc v1.3.0
|
||||
github.com/gogo/protobuf v0.4
|
||||
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
||||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||
|
||||
# metrics
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
|
||||
|
@ -50,7 +51,7 @@ github.com/spf13/cobra 8e91712f174ced10270cf66615e0a9127e7c4de5
|
|||
github.com/spf13/pflag 7f60f83a2c81bc3c3c0d5297f61ddfa68da9d3b7
|
||||
github.com/stretchr/testify v1.1.4
|
||||
golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2
|
||||
golang.org/x/net c427ad74c6d7a814201695e9ffde0c5d400a7674
|
||||
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
|
||||
golang.org/x/sys 5eaf0df67e70d6997a9fe0ed24383fa1b01638d3
|
||||
golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
|
||||
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
|
||||
|
|
Loading…
Add table
Reference in a new issue