Merge pull request #30843 from vdemeester/daemon-duplication-refactor

Few daemon duplication refactor
This commit is contained in:
Vincent Demeester 2017-02-28 13:14:07 +01:00 committed by GitHub
commit 98a1d9a712
17 changed files with 452 additions and 586 deletions

View file

@ -89,8 +89,7 @@ var errSwarmCertificatesExpired = errors.New("Swarm certificates have expired. T
// NetworkSubnetsProvider exposes functions for retrieving the subnets
// of networks managed by Docker, so they can be filtered.
type NetworkSubnetsProvider interface {
V4Subnets() []net.IPNet
V6Subnets() []net.IPNet
Subnets() ([]net.IPNet, []net.IPNet)
}
// Config provides values for Cluster.
@ -387,3 +386,18 @@ func detectLockedError(err error) error {
}
return err
}
func (c *Cluster) lockedManagerAction(fn func(ctx context.Context, state nodeState) error) error {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
return fn(ctx, state)
}

View file

@ -89,13 +89,7 @@ func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec {
endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String()))
for _, portState := range es.Ports {
endpointSpec.Ports = append(endpointSpec.Ports, types.PortConfig{
Name: portState.Name,
Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])),
PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portState.PublishMode)])),
TargetPort: portState.TargetPort,
PublishedPort: portState.PublishedPort,
})
endpointSpec.Ports = append(endpointSpec.Ports, swarmPortConfigToAPIPortConfig(portState))
}
}
return endpointSpec
@ -109,13 +103,7 @@ func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint {
}
for _, portState := range e.Ports {
endpoint.Ports = append(endpoint.Ports, types.PortConfig{
Name: portState.Name,
Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])),
PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portState.PublishMode)])),
TargetPort: portState.TargetPort,
PublishedPort: portState.PublishedPort,
})
endpoint.Ports = append(endpoint.Ports, swarmPortConfigToAPIPortConfig(portState))
}
for _, v := range e.VirtualIPs {
@ -129,6 +117,16 @@ func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint {
return endpoint
}
func swarmPortConfigToAPIPortConfig(portConfig *swarmapi.PortConfig) types.PortConfig {
return types.PortConfig{
Name: portConfig.Name,
Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portConfig.Protocol)])),
PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portConfig.PublishMode)])),
TargetPort: portConfig.TargetPort,
PublishedPort: portConfig.PublishedPort,
}
}
// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource.
func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource {
spec := n.Spec

View file

@ -162,8 +162,7 @@ func (c *Cluster) resolveSystemAddrViaSubnetCheck() (net.IP, error) {
var systemInterface string
// List Docker-managed subnets
v4Subnets := c.config.NetworkSubnetsProvider.V4Subnets()
v6Subnets := c.config.NetworkSubnetsProvider.V6Subnets()
v4Subnets, v6Subnets := c.config.NetworkSubnetsProvider.Subnets()
ifaceLoop:
for _, intf := range interfaces {

View file

@ -48,19 +48,16 @@ func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]
// GetNetwork returns a cluster network by an ID.
func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) {
c.mu.RLock()
defer c.mu.RUnlock()
var network *swarmapi.Network
state := c.currentNodeState()
if !state.IsActiveManager() {
return apitypes.NetworkResource{}, c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
network, err := getNetwork(ctx, state.controlClient, input)
if err != nil {
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
n, err := getNetwork(ctx, state.controlClient, input)
if err != nil {
return err
}
network = n
return nil
}); err != nil {
return apitypes.NetworkResource{}, err
}
return convert.BasicNetworkFromGRPC(*network), nil
@ -224,51 +221,38 @@ func (c *Cluster) DetachNetwork(target string, containerID string) error {
// CreateNetwork creates a new cluster managed network.
func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return "", c.errNoManager(state)
}
if runconfig.IsPreDefinedNetwork(s.Name) {
err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name)
return "", apierrors.NewRequestForbiddenError(err)
}
ctx, cancel := c.getRequestContext()
defer cancel()
networkSpec := convert.BasicNetworkCreateToGRPC(s)
r, err := state.controlClient.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec})
if err != nil {
var resp *swarmapi.CreateNetworkResponse
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
networkSpec := convert.BasicNetworkCreateToGRPC(s)
r, err := state.controlClient.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec})
if err != nil {
return err
}
resp = r
return nil
}); err != nil {
return "", err
}
return r.Network.ID, nil
return resp.Network.ID, nil
}
// RemoveNetwork removes a cluster network.
func (c *Cluster) RemoveNetwork(input string) error {
c.mu.RLock()
defer c.mu.RUnlock()
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
network, err := getNetwork(ctx, state.controlClient, input)
if err != nil {
return err
}
state := c.currentNodeState()
if !state.IsActiveManager() {
return c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
network, err := getNetwork(ctx, state.controlClient, input)
if err != nil {
_, err = state.controlClient.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID})
return err
}
_, err = state.controlClient.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID})
return err
})
}
func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error {

View file

@ -6,6 +6,7 @@ import (
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
swarmapi "github.com/docker/swarmkit/api"
"golang.org/x/net/context"
)
// GetNodes returns a list of all nodes known to a cluster.
@ -43,78 +44,61 @@ func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, erro
// GetNode returns a node based on an ID.
func (c *Cluster) GetNode(input string) (types.Node, error) {
c.mu.RLock()
defer c.mu.RUnlock()
var node *swarmapi.Node
state := c.currentNodeState()
if !state.IsActiveManager() {
return types.Node{}, c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
node, err := getNode(ctx, state.controlClient, input)
if err != nil {
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
n, err := getNode(ctx, state.controlClient, input)
if err != nil {
return err
}
node = n
return nil
}); err != nil {
return types.Node{}, err
}
return convert.NodeFromGRPC(*node), nil
}
// UpdateNode updates existing nodes properties.
func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error {
c.mu.RLock()
defer c.mu.RUnlock()
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
nodeSpec, err := convert.NodeSpecToGRPC(spec)
if err != nil {
return apierrors.NewBadRequestError(err)
}
state := c.currentNodeState()
if !state.IsActiveManager() {
return c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
nodeSpec, err := convert.NodeSpecToGRPC(spec)
if err != nil {
return apierrors.NewBadRequestError(err)
}
currentNode, err := getNode(ctx, state.controlClient, input)
if err != nil {
return err
}
ctx, cancel := c.getRequestContext()
defer cancel()
currentNode, err := getNode(ctx, state.controlClient, input)
if err != nil {
return err
}
_, err = state.controlClient.UpdateNode(
ctx,
&swarmapi.UpdateNodeRequest{
NodeID: currentNode.ID,
Spec: &nodeSpec,
NodeVersion: &swarmapi.Version{
Index: version,
_, err = state.controlClient.UpdateNode(
ctx,
&swarmapi.UpdateNodeRequest{
NodeID: currentNode.ID,
Spec: &nodeSpec,
NodeVersion: &swarmapi.Version{
Index: version,
},
},
},
)
return err
)
return err
})
}
// RemoveNode removes a node from a cluster
func (c *Cluster) RemoveNode(input string, force bool) error {
c.mu.RLock()
defer c.mu.RUnlock()
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
node, err := getNode(ctx, state.controlClient, input)
if err != nil {
return err
}
state := c.currentNodeState()
if !state.IsActiveManager() {
return c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
node, err := getNode(ctx, state.controlClient, input)
if err != nil {
_, err = state.controlClient.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force})
return err
}
_, err = state.controlClient.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force})
return err
})
}

View file

@ -5,23 +5,21 @@ import (
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
swarmapi "github.com/docker/swarmkit/api"
"golang.org/x/net/context"
)
// GetSecret returns a secret from a managed swarm cluster
func (c *Cluster) GetSecret(input string) (types.Secret, error) {
c.mu.RLock()
defer c.mu.RUnlock()
var secret *swarmapi.Secret
state := c.currentNodeState()
if !state.IsActiveManager() {
return types.Secret{}, c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
secret, err := getSecret(ctx, state.controlClient, input)
if err != nil {
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
s, err := getSecret(ctx, state.controlClient, input)
if err != nil {
return err
}
secret = s
return nil
}); err != nil {
return types.Secret{}, err
}
return convert.SecretFromGRPC(secret), nil
@ -61,77 +59,54 @@ func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret
// CreateSecret creates a new secret in a managed swarm cluster.
func (c *Cluster) CreateSecret(s types.SecretSpec) (string, error) {
c.mu.RLock()
defer c.mu.RUnlock()
var resp *swarmapi.CreateSecretResponse
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
secretSpec := convert.SecretSpecToGRPC(s)
state := c.currentNodeState()
if !state.IsActiveManager() {
return "", c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
secretSpec := convert.SecretSpecToGRPC(s)
r, err := state.controlClient.CreateSecret(ctx,
&swarmapi.CreateSecretRequest{Spec: &secretSpec})
if err != nil {
r, err := state.controlClient.CreateSecret(ctx,
&swarmapi.CreateSecretRequest{Spec: &secretSpec})
if err != nil {
return err
}
resp = r
return nil
}); err != nil {
return "", err
}
return r.Secret.ID, nil
return resp.Secret.ID, nil
}
// RemoveSecret removes a secret from a managed swarm cluster.
func (c *Cluster) RemoveSecret(input string) error {
c.mu.RLock()
defer c.mu.RUnlock()
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
secret, err := getSecret(ctx, state.controlClient, input)
if err != nil {
return err
}
state := c.currentNodeState()
if !state.IsActiveManager() {
return c.errNoManager(state)
}
req := &swarmapi.RemoveSecretRequest{
SecretID: secret.ID,
}
ctx, cancel := c.getRequestContext()
defer cancel()
secret, err := getSecret(ctx, state.controlClient, input)
if err != nil {
_, err = state.controlClient.RemoveSecret(ctx, req)
return err
}
req := &swarmapi.RemoveSecretRequest{
SecretID: secret.ID,
}
_, err = state.controlClient.RemoveSecret(ctx, req)
return err
})
}
// UpdateSecret updates a secret in a managed swarm cluster.
// Note: this is not exposed to the CLI but is available from the API only
func (c *Cluster) UpdateSecret(id string, version uint64, spec types.SecretSpec) error {
c.mu.RLock()
defer c.mu.RUnlock()
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
secretSpec := convert.SecretSpecToGRPC(spec)
state := c.currentNodeState()
if !state.IsActiveManager() {
return c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
secretSpec := convert.SecretSpecToGRPC(spec)
_, err := state.controlClient.UpdateSecret(ctx,
&swarmapi.UpdateSecretRequest{
SecretID: id,
SecretVersion: &swarmapi.Version{
Index: version,
},
Spec: &secretSpec,
})
return err
_, err := state.controlClient.UpdateSecret(ctx,
&swarmapi.UpdateSecretRequest{
SecretID: id,
SecretVersion: &swarmapi.Version{
Index: version,
},
Spec: &secretSpec,
})
return err
})
}

View file

@ -59,19 +59,15 @@ func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Serv
// GetService returns a service based on an ID or name.
func (c *Cluster) GetService(input string) (types.Service, error) {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return types.Service{}, c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
service, err := getService(ctx, state.controlClient, input)
if err != nil {
var service *swarmapi.Service
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
s, err := getService(ctx, state.controlClient, input)
if err != nil {
return err
}
service = s
return nil
}); err != nil {
return types.Service{}, err
}
return convert.ServiceFromGRPC(*service), nil
@ -79,187 +75,165 @@ func (c *Cluster) GetService(input string) (types.Service, error) {
// CreateService creates a new service in a managed swarm cluster.
func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (*apitypes.ServiceCreateResponse, error) {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return nil, c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
err := c.populateNetworkID(ctx, state.controlClient, &s)
if err != nil {
return nil, err
}
serviceSpec, err := convert.ServiceSpecToGRPC(s)
if err != nil {
return nil, apierrors.NewBadRequestError(err)
}
ctnr := serviceSpec.Task.GetContainer()
if ctnr == nil {
return nil, errors.New("service does not use container tasks")
}
if encodedAuth != "" {
ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
}
// retrieve auth config from encoded auth
authConfig := &apitypes.AuthConfig{}
if encodedAuth != "" {
if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
resp := &apitypes.ServiceCreateResponse{}
// pin image by digest
if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" {
digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
var resp *apitypes.ServiceCreateResponse
err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
err := c.populateNetworkID(ctx, state.controlClient, &s)
if err != nil {
logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()))
} else if ctnr.Image != digestImage {
logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
ctnr.Image = digestImage
} else {
logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
return err
}
}
r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
if err != nil {
return nil, err
}
serviceSpec, err := convert.ServiceSpecToGRPC(s)
if err != nil {
return apierrors.NewBadRequestError(err)
}
resp.ID = r.Service.ID
return resp, nil
ctnr := serviceSpec.Task.GetContainer()
if ctnr == nil {
return errors.New("service does not use container tasks")
}
if encodedAuth != "" {
ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
}
// retrieve auth config from encoded auth
authConfig := &apitypes.AuthConfig{}
if encodedAuth != "" {
if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
resp = &apitypes.ServiceCreateResponse{}
// pin image by digest
if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" {
digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
if err != nil {
logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()))
} else if ctnr.Image != digestImage {
logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
ctnr.Image = digestImage
} else {
logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
}
}
r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
if err != nil {
return err
}
resp.ID = r.Service.ID
return nil
})
return resp, err
}
// UpdateService updates existing service to match new properties.
func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, encodedAuth string, registryAuthFrom string) (*apitypes.ServiceUpdateResponse, error) {
c.mu.RLock()
defer c.mu.RUnlock()
var resp *apitypes.ServiceUpdateResponse
state := c.currentNodeState()
if !state.IsActiveManager() {
return nil, c.errNoManager(state)
}
err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
ctx, cancel := c.getRequestContext()
defer cancel()
err := c.populateNetworkID(ctx, state.controlClient, &spec)
if err != nil {
return nil, err
}
serviceSpec, err := convert.ServiceSpecToGRPC(spec)
if err != nil {
return nil, apierrors.NewBadRequestError(err)
}
currentService, err := getService(ctx, state.controlClient, serviceIDOrName)
if err != nil {
return nil, err
}
newCtnr := serviceSpec.Task.GetContainer()
if newCtnr == nil {
return nil, errors.New("service does not use container tasks")
}
if encodedAuth != "" {
newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
} else {
// this is needed because if the encodedAuth isn't being updated then we
// shouldn't lose it, and continue to use the one that was already present
var ctnr *swarmapi.ContainerSpec
switch registryAuthFrom {
case apitypes.RegistryAuthFromSpec, "":
ctnr = currentService.Spec.Task.GetContainer()
case apitypes.RegistryAuthFromPreviousSpec:
if currentService.PreviousSpec == nil {
return nil, errors.New("service does not have a previous spec")
}
ctnr = currentService.PreviousSpec.Task.GetContainer()
default:
return nil, errors.New("unsupported registryAuthFrom value")
}
if ctnr == nil {
return nil, errors.New("service does not use container tasks")
}
newCtnr.PullOptions = ctnr.PullOptions
// update encodedAuth so it can be used to pin image by digest
if ctnr.PullOptions != nil {
encodedAuth = ctnr.PullOptions.RegistryAuth
}
}
// retrieve auth config from encoded auth
authConfig := &apitypes.AuthConfig{}
if encodedAuth != "" {
if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
resp := &apitypes.ServiceUpdateResponse{}
// pin image by digest
if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" {
digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
err := c.populateNetworkID(ctx, state.controlClient, &spec)
if err != nil {
logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()))
} else if newCtnr.Image != digestImage {
logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
newCtnr.Image = digestImage
} else {
logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
return err
}
}
_, err = state.controlClient.UpdateService(
ctx,
&swarmapi.UpdateServiceRequest{
ServiceID: currentService.ID,
Spec: &serviceSpec,
ServiceVersion: &swarmapi.Version{
Index: version,
serviceSpec, err := convert.ServiceSpecToGRPC(spec)
if err != nil {
return apierrors.NewBadRequestError(err)
}
currentService, err := getService(ctx, state.controlClient, serviceIDOrName)
if err != nil {
return err
}
newCtnr := serviceSpec.Task.GetContainer()
if newCtnr == nil {
return errors.New("service does not use container tasks")
}
if encodedAuth != "" {
newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
} else {
// this is needed because if the encodedAuth isn't being updated then we
// shouldn't lose it, and continue to use the one that was already present
var ctnr *swarmapi.ContainerSpec
switch registryAuthFrom {
case apitypes.RegistryAuthFromSpec, "":
ctnr = currentService.Spec.Task.GetContainer()
case apitypes.RegistryAuthFromPreviousSpec:
if currentService.PreviousSpec == nil {
return errors.New("service does not have a previous spec")
}
ctnr = currentService.PreviousSpec.Task.GetContainer()
default:
return errors.New("unsupported registryAuthFrom value")
}
if ctnr == nil {
return errors.New("service does not use container tasks")
}
newCtnr.PullOptions = ctnr.PullOptions
// update encodedAuth so it can be used to pin image by digest
if ctnr.PullOptions != nil {
encodedAuth = ctnr.PullOptions.RegistryAuth
}
}
// retrieve auth config from encoded auth
authConfig := &apitypes.AuthConfig{}
if encodedAuth != "" {
if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
resp := &apitypes.ServiceUpdateResponse{}
// pin image by digest
if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" {
digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
if err != nil {
logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()))
} else if newCtnr.Image != digestImage {
logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
newCtnr.Image = digestImage
} else {
logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
}
}
_, err = state.controlClient.UpdateService(
ctx,
&swarmapi.UpdateServiceRequest{
ServiceID: currentService.ID,
Spec: &serviceSpec,
ServiceVersion: &swarmapi.Version{
Index: version,
},
},
},
)
)
return err
})
return resp, err
}
// RemoveService removes a service from a managed swarm cluster.
func (c *Cluster) RemoveService(input string) error {
c.mu.RLock()
defer c.mu.RUnlock()
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
service, err := getService(ctx, state.controlClient, input)
if err != nil {
return err
}
state := c.currentNodeState()
if !state.IsActiveManager() {
return c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
service, err := getService(ctx, state.controlClient, input)
if err != nil {
_, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID})
return err
}
_, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID})
return err
})
}
// ServiceLogs collects service logs and writes them back to `config.OutStream`

View file

@ -187,95 +187,75 @@ func (c *Cluster) Join(req types.JoinRequest) error {
// Inspect retrieves the configuration properties of a managed swarm cluster.
func (c *Cluster) Inspect() (types.Swarm, error) {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return types.Swarm{}, c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
swarm, err := getSwarm(ctx, state.controlClient)
if err != nil {
var swarm *swarmapi.Cluster
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
s, err := getSwarm(ctx, state.controlClient)
if err != nil {
return err
}
swarm = s
return nil
}); err != nil {
return types.Swarm{}, err
}
return convert.SwarmFromGRPC(*swarm), nil
}
// Update updates configuration of a managed swarm cluster.
func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error {
c.mu.RLock()
defer c.mu.RUnlock()
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
swarm, err := getSwarm(ctx, state.controlClient)
if err != nil {
return err
}
state := c.currentNodeState()
if !state.IsActiveManager() {
return c.errNoManager(state)
}
// In update, client should provide the complete spec of the swarm, including
// Name and Labels. If a field is specified with 0 or nil, then the default value
// will be used to swarmkit.
clusterSpec, err := convert.SwarmSpecToGRPC(spec)
if err != nil {
return apierrors.NewBadRequestError(err)
}
ctx, cancel := c.getRequestContext()
defer cancel()
swarm, err := getSwarm(ctx, state.controlClient)
if err != nil {
_, err = state.controlClient.UpdateCluster(
ctx,
&swarmapi.UpdateClusterRequest{
ClusterID: swarm.ID,
Spec: &clusterSpec,
ClusterVersion: &swarmapi.Version{
Index: version,
},
Rotation: swarmapi.KeyRotation{
WorkerJoinToken: flags.RotateWorkerToken,
ManagerJoinToken: flags.RotateManagerToken,
ManagerUnlockKey: flags.RotateManagerUnlockKey,
},
},
)
return err
}
// In update, client should provide the complete spec of the swarm, including
// Name and Labels. If a field is specified with 0 or nil, then the default value
// will be used to swarmkit.
clusterSpec, err := convert.SwarmSpecToGRPC(spec)
if err != nil {
return apierrors.NewBadRequestError(err)
}
_, err = state.controlClient.UpdateCluster(
ctx,
&swarmapi.UpdateClusterRequest{
ClusterID: swarm.ID,
Spec: &clusterSpec,
ClusterVersion: &swarmapi.Version{
Index: version,
},
Rotation: swarmapi.KeyRotation{
WorkerJoinToken: flags.RotateWorkerToken,
ManagerJoinToken: flags.RotateManagerToken,
ManagerUnlockKey: flags.RotateManagerUnlockKey,
},
},
)
return err
})
}
// GetUnlockKey returns the unlock key for the swarm.
func (c *Cluster) GetUnlockKey() (string, error) {
c.mu.RLock()
defer c.mu.RUnlock()
var resp *swarmapi.GetUnlockKeyResponse
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
client := swarmapi.NewCAClient(state.grpcConn)
state := c.currentNodeState()
if !state.IsActiveManager() {
return "", c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
client := swarmapi.NewCAClient(state.grpcConn)
r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{})
if err != nil {
r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{})
if err != nil {
return err
}
resp = r
return nil
}); err != nil {
return "", err
}
if len(r.UnlockKey) == 0 {
if len(resp.UnlockKey) == 0 {
// no key
return "", nil
}
return encryption.HumanReadableKey(r.UnlockKey), nil
return encryption.HumanReadableKey(resp.UnlockKey), nil
}
// UnlockSwarm provides a key to decrypt data that is encrypted at rest.

View file

@ -6,6 +6,7 @@ import (
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
swarmapi "github.com/docker/swarmkit/api"
"golang.org/x/net/context"
)
// GetTasks returns a list of tasks matching the filter options.
@ -71,19 +72,15 @@ func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, erro
// GetTask returns a task by an ID.
func (c *Cluster) GetTask(input string) (types.Task, error) {
c.mu.RLock()
defer c.mu.RUnlock()
state := c.currentNodeState()
if !state.IsActiveManager() {
return types.Task{}, c.errNoManager(state)
}
ctx, cancel := c.getRequestContext()
defer cancel()
task, err := getTask(ctx, state.controlClient, input)
if err != nil {
var task *swarmapi.Task
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
t, err := getTask(ctx, state.controlClient, input)
if err != nil {
return err
}
task = t
return nil
}); err != nil {
return types.Task{}, err
}
return convert.TaskFromGRPC(*task), nil

View file

@ -55,6 +55,16 @@ func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, e
return daemon.containers.Get(containerID), nil
}
// checkContainer make sure the specified container validates the specified conditions
func (daemon *Daemon) checkContainer(container *container.Container, conditions ...func(*container.Container) error) error {
for _, condition := range conditions {
if err := condition(container); err != nil {
return err
}
}
return nil
}
// Exists returns a true if a container of the specified ID or name exists,
// false otherwise.
func (daemon *Daemon) Exists(id string) bool {

View file

@ -829,16 +829,24 @@ func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n li
delete(container.NetworkSettings.Networks, n.Name())
if daemon.clusterProvider != nil && n.Info().Dynamic() && !container.Managed {
if err := daemon.clusterProvider.DetachNetwork(n.Name(), container.ID); err != nil {
logrus.Warnf("error detaching from network %s: %v", n.Name(), err)
if err := daemon.clusterProvider.DetachNetwork(n.ID(), container.ID); err != nil {
logrus.Warnf("error detaching from network %s: %v", n.ID(), err)
daemon.tryDetachContainerFromClusterNetwork(n, container)
return nil
}
func (daemon *Daemon) tryDetachContainerFromClusterNetwork(network libnetwork.Network, container *container.Container) {
if daemon.clusterProvider != nil && network.Info().Dynamic() && !container.Managed {
if err := daemon.clusterProvider.DetachNetwork(network.Name(), container.ID); err != nil {
logrus.Warnf("error detaching from network %s: %v", network.Name(), err)
if err := daemon.clusterProvider.DetachNetwork(network.ID(), container.ID); err != nil {
logrus.Warnf("error detaching from network %s: %v", network.ID(), err)
}
}
}
return nil
attributes := map[string]string{
"container": container.ID,
}
daemon.LogNetworkEventWithAttributes(network, "disconnect", attributes)
}
func (daemon *Daemon) initializeNetworking(container *container.Container) error {
@ -931,19 +939,7 @@ func (daemon *Daemon) releaseNetwork(container *container.Container) {
}
for _, nw := range networks {
if daemon.clusterProvider != nil && nw.Info().Dynamic() && !container.Managed {
if err := daemon.clusterProvider.DetachNetwork(nw.Name(), container.ID); err != nil {
logrus.Warnf("error detaching from network %s: %v", nw.Name(), err)
if err := daemon.clusterProvider.DetachNetwork(nw.ID(), container.ID); err != nil {
logrus.Warnf("error detaching from network %s: %v", nw.ID(), err)
}
}
}
attributes := map[string]string{
"container": container.ID,
}
daemon.LogNetworkEventWithAttributes(nw, "disconnect", attributes)
daemon.tryDetachContainerFromClusterNetwork(nw, container)
}
networkActions.WithValues("release").UpdateSince(start)
}

View file

@ -58,32 +58,34 @@ func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]s
func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) {
containerID := container.HostConfig.IpcMode.Container()
c, err := daemon.GetContainer(containerID)
container, err := daemon.GetContainer(containerID)
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "cannot join IPC of a non running container: %s", container.ID)
}
if !c.IsRunning() {
return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID)
}
if c.IsRestarting() {
return nil, errContainerIsRestarting(container.ID)
}
return c, nil
return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting)
}
func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) {
containerID := container.HostConfig.PidMode.Container()
c, err := daemon.GetContainer(containerID)
container, err := daemon.GetContainer(containerID)
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", container.ID)
}
return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting)
}
func containerIsRunning(c *container.Container) error {
if !c.IsRunning() {
return nil, fmt.Errorf("cannot join PID of a non running container: %s", containerID)
return errors.Errorf("container %s is not running", c.ID)
}
return nil
}
func containerIsNotRestarting(c *container.Container) error {
if c.IsRestarting() {
return nil, errContainerIsRestarting(container.ID)
return errContainerIsRestarting(c.ID)
}
return c, nil
return nil
}
func (daemon *Daemon) setupIpcDirs(c *container.Container) error {

View file

@ -878,40 +878,28 @@ func (daemon *Daemon) Unmount(container *container.Container) error {
return nil
}
// V4Subnets returns the IPv4 subnets of networks that are managed by Docker.
func (daemon *Daemon) V4Subnets() []net.IPNet {
var subnets []net.IPNet
// Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker.
func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) {
var v4Subnets []net.IPNet
var v6Subnets []net.IPNet
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks {
v4Infos, _ := managedNetwork.Info().IpamInfo()
for _, v4Info := range v4Infos {
if v4Info.IPAMData.Pool != nil {
subnets = append(subnets, *v4Info.IPAMData.Pool)
v4infos, v6infos := managedNetwork.Info().IpamInfo()
for _, info := range v4infos {
if info.IPAMData.Pool != nil {
v4Subnets = append(v4Subnets, *info.IPAMData.Pool)
}
}
for _, info := range v6infos {
if info.IPAMData.Pool != nil {
v6Subnets = append(v6Subnets, *info.IPAMData.Pool)
}
}
}
return subnets
}
// V6Subnets returns the IPv6 subnets of networks that are managed by Docker.
func (daemon *Daemon) V6Subnets() []net.IPNet {
var subnets []net.IPNet
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks {
_, v6Infos := managedNetwork.Info().IpamInfo()
for _, v6Info := range v6Infos {
if v6Info.IPAMData.Pool != nil {
subnets = append(subnets, *v6Info.IPAMData.Pool)
}
}
}
return subnets
return v4Subnets, v6Subnets
}
// GraphDriverName returns the name of the graph driver used by the layer.Store

View file

@ -1276,19 +1276,21 @@ func (daemon *Daemon) initCgroupsPath(path string) error {
path = filepath.Join(root, path)
sysinfo := sysinfo.New(true)
if sysinfo.CPURealtimePeriod && daemon.configStore.CPURealtimePeriod != 0 {
if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
return err
}
if err := ioutil.WriteFile(filepath.Join(path, "cpu.rt_period_us"), []byte(strconv.FormatInt(daemon.configStore.CPURealtimePeriod, 10)), 0700); err != nil {
return err
}
if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
return err
}
if sysinfo.CPURealtimeRuntime && daemon.configStore.CPURealtimeRuntime != 0 {
if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path); err != nil {
return err
}
return nil
}
func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error {
if sysinfoPresent && configValue != 0 {
if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
return err
}
if err := ioutil.WriteFile(filepath.Join(path, "cpu.rt_runtime_us"), []byte(strconv.FormatInt(daemon.configStore.CPURealtimeRuntime, 10)), 0700); err != nil {
if err := ioutil.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700); err != nil {
return err
}
}

View file

@ -24,29 +24,19 @@ func NewRefCounter(c Checker) *RefCounter {
// Increment increaes the ref count for the given id and returns the current count
func (c *RefCounter) Increment(path string) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count++
count := m.count
c.mu.Unlock()
return count
return c.incdec(path, func(minfo *minfo) {
minfo.count++
})
}
// Decrement decreases the ref count for the given id and returns the current count
func (c *RefCounter) Decrement(path string) int {
return c.incdec(path, func(minfo *minfo) {
minfo.count--
})
}
func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
@ -62,7 +52,7 @@ func (c *RefCounter) Decrement(path string) int {
m.count++
}
}
m.count--
infoOp(m)
count := m.count
c.mu.Unlock()
return count

View file

@ -68,26 +68,14 @@ func (d *graphDriverProxy) String() string {
}
func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error {
args := &graphDriverRequest{
ID: id,
Parent: parent,
}
if opts != nil {
args.MountLabel = opts.MountLabel
args.StorageOpt = opts.StorageOpt
}
var ret graphDriverResponse
if err := d.p.Client().Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
return d.create("GraphDriver.CreateReadWrite", id, parent, opts)
}
func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error {
return d.create("GraphDriver.Create", id, parent, opts)
}
func (d *graphDriverProxy) create(method, id, parent string, opts *CreateOpts) error {
args := &graphDriverRequest{
ID: id,
Parent: parent,
@ -97,7 +85,7 @@ func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error {
args.StorageOpt = opts.StorageOpt
}
var ret graphDriverResponse
if err := d.p.Client().Call("GraphDriver.Create", args, &ret); err != nil {
if err := d.p.Client().Call(method, args, &ret); err != nil {
return err
}
if ret.Err != "" {

View file

@ -333,49 +333,13 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte
}
publishFilter := map[nat.Port]bool{}
err = psFilters.WalkValues("publish", func(value string) error {
if strings.Contains(value, ":") {
return fmt.Errorf("filter for 'publish' should not contain ':': %v", value)
}
//support two formats, original format <portnum>/[<proto>] or <startport-endport>/[<proto>]
proto, port := nat.SplitProtoPort(value)
start, end, err := nat.ParsePortRange(port)
if err != nil {
return fmt.Errorf("error while looking up for publish %v: %s", value, err)
}
for i := start; i <= end; i++ {
p, err := nat.NewPort(proto, strconv.FormatUint(i, 10))
if err != nil {
return fmt.Errorf("error while looking up for publish %v: %s", value, err)
}
publishFilter[p] = true
}
return nil
})
err = psFilters.WalkValues("publish", portOp("publish", publishFilter))
if err != nil {
return nil, err
}
exposeFilter := map[nat.Port]bool{}
err = psFilters.WalkValues("expose", func(value string) error {
if strings.Contains(value, ":") {
return fmt.Errorf("filter for 'expose' should not contain ':': %v", value)
}
//support two formats, original format <portnum>/[<proto>] or <startport-endport>/[<proto>]
proto, port := nat.SplitProtoPort(value)
start, end, err := nat.ParsePortRange(port)
if err != nil {
return fmt.Errorf("error while looking up for 'expose' %v: %s", value, err)
}
for i := start; i <= end; i++ {
p, err := nat.NewPort(proto, strconv.FormatUint(i, 10))
if err != nil {
return fmt.Errorf("error while looking up for 'expose' %v: %s", value, err)
}
exposeFilter[p] = true
}
return nil
})
err = psFilters.WalkValues("expose", portOp("expose", exposeFilter))
if err != nil {
return nil, err
}
@ -395,6 +359,27 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte
names: daemon.nameIndex.GetAll(),
}, nil
}
func portOp(key string, filter map[nat.Port]bool) func(value string) error {
return func(value string) error {
if strings.Contains(value, ":") {
return fmt.Errorf("filter for '%s' should not contain ':': %s", key, value)
}
//support two formats, original format <portnum>/[<proto>] or <startport-endport>/[<proto>]
proto, port := nat.SplitProtoPort(value)
start, end, err := nat.ParsePortRange(port)
if err != nil {
return fmt.Errorf("error while looking up for %s %s: %s", key, value, err)
}
for i := start; i <= end; i++ {
p, err := nat.NewPort(proto, strconv.FormatUint(i, 10))
if err != nil {
return fmt.Errorf("error while looking up for %s %s: %s", key, value, err)
}
filter[p] = true
}
return nil
}
}
// includeContainerInList decides whether a container should be included in the output or not based in the filter.
// It also decides if the iteration should be stopped or not.