소스 검색

Merge pull request #44704 from thaJeztah/api_hostconfig_RestartPolicyMode_enum

api/types/container: add RestartPolicyMode type and enum
Sebastiaan van Stijn 1 년 전
부모
커밋
70ae5c13ea

+ 9 - 0
api/types/container/errors.go

@@ -0,0 +1,9 @@
+package container
+
+type errInvalidParameter struct{ error }
+
+func (e *errInvalidParameter) InvalidParameter() {}
+
+func (e *errInvalidParameter) Unwrap() error {
+	return e.error
+}

+ 38 - 5
api/types/container/hostconfig.go

@@ -1,6 +1,7 @@
 package container // import "github.com/docker/docker/api/types/container"
 
 import (
+	"fmt"
 	"strings"
 
 	"github.com/docker/docker/api/types/blkiodev"
@@ -271,33 +272,42 @@ type DeviceMapping struct {
 
 // RestartPolicy represents the restart policies of the container.
 type RestartPolicy struct {
-	Name              string
+	Name              RestartPolicyMode
 	MaximumRetryCount int
 }
 
+type RestartPolicyMode string
+
+const (
+	RestartPolicyDisabled      RestartPolicyMode = "no"
+	RestartPolicyAlways        RestartPolicyMode = "always"
+	RestartPolicyOnFailure     RestartPolicyMode = "on-failure"
+	RestartPolicyUnlessStopped RestartPolicyMode = "unless-stopped"
+)
+
 // IsNone indicates whether the container has the "no" restart policy.
 // This means the container will not automatically restart when exiting.
 func (rp *RestartPolicy) IsNone() bool {
-	return rp.Name == "no" || rp.Name == ""
+	return rp.Name == RestartPolicyDisabled || rp.Name == ""
 }
 
 // IsAlways indicates whether the container has the "always" restart policy.
 // This means the container will automatically restart regardless of the exit status.
 func (rp *RestartPolicy) IsAlways() bool {
-	return rp.Name == "always"
+	return rp.Name == RestartPolicyAlways
 }
 
 // IsOnFailure indicates whether the container has the "on-failure" restart policy.
 // This means the container will automatically restart of exiting with a non-zero exit status.
 func (rp *RestartPolicy) IsOnFailure() bool {
-	return rp.Name == "on-failure"
+	return rp.Name == RestartPolicyOnFailure
 }
 
 // IsUnlessStopped indicates whether the container has the
 // "unless-stopped" restart policy. This means the container will
 // automatically restart unless user has put it to stopped state.
 func (rp *RestartPolicy) IsUnlessStopped() bool {
-	return rp.Name == "unless-stopped"
+	return rp.Name == RestartPolicyUnlessStopped
 }
 
 // IsSame compares two RestartPolicy to see if they are the same
@@ -305,6 +315,29 @@ func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
 	return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
 }
 
+// ValidateRestartPolicy validates the given RestartPolicy.
+func ValidateRestartPolicy(policy RestartPolicy) error {
+	switch policy.Name {
+	case RestartPolicyAlways, RestartPolicyUnlessStopped, RestartPolicyDisabled:
+		if policy.MaximumRetryCount != 0 {
+			return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be used with restart policy '%s'", policy.Name)}
+		}
+		return nil
+	case RestartPolicyOnFailure:
+		if policy.MaximumRetryCount < 0 {
+			return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")}
+		}
+		return nil
+	case "":
+		// Versions before v25.0.0 created an empty restart-policy "name" as
+		// default. Allow an empty name with "any" MaximumRetryCount for
+		// backward-compatibility.
+		return nil
+	default:
+		return &errInvalidParameter{fmt.Errorf("invalid restart policy: '%s'", policy.Name)}
+	}
+}
+
 // LogMode is a type to define the available modes for logging
 // These modes affect how logs are handled when log messages start piling up.
 type LogMode string

+ 105 - 0
api/types/container/hostconfig_test.go

@@ -0,0 +1,105 @@
+package container
+
+import (
+	"testing"
+
+	"github.com/docker/docker/errdefs"
+	"gotest.tools/v3/assert"
+	is "gotest.tools/v3/assert/cmp"
+)
+
+func TestValidateRestartPolicy(t *testing.T) {
+	tests := []struct {
+		name        string
+		input       RestartPolicy
+		expectedErr string
+	}{
+		{
+			name:  "empty",
+			input: RestartPolicy{},
+		},
+		{
+			name:        "empty with invalid MaxRestartCount (for backward compatibility)",
+			input:       RestartPolicy{MaximumRetryCount: 123},
+			expectedErr: "", // Allowed for backward compatibility
+		},
+		{
+			name:        "empty with negative MaxRestartCount)",
+			input:       RestartPolicy{MaximumRetryCount: -123},
+			expectedErr: "", // Allowed for backward compatibility
+		},
+		{
+			name:  "always",
+			input: RestartPolicy{Name: RestartPolicyAlways},
+		},
+		{
+			name:        "always with MaxRestartCount",
+			input:       RestartPolicy{Name: RestartPolicyAlways, MaximumRetryCount: 123},
+			expectedErr: "invalid restart policy: maximum retry count cannot be used with restart policy 'always'",
+		},
+		{
+			name:        "always with negative MaxRestartCount",
+			input:       RestartPolicy{Name: RestartPolicyAlways, MaximumRetryCount: -123},
+			expectedErr: "invalid restart policy: maximum retry count cannot be used with restart policy 'always'",
+		},
+		{
+			name:  "unless-stopped",
+			input: RestartPolicy{Name: RestartPolicyUnlessStopped},
+		},
+		{
+			name:        "unless-stopped with MaxRestartCount",
+			input:       RestartPolicy{Name: RestartPolicyUnlessStopped, MaximumRetryCount: 123},
+			expectedErr: "invalid restart policy: maximum retry count cannot be used with restart policy 'unless-stopped'",
+		},
+		{
+			name:        "unless-stopped with negative MaxRestartCount",
+			input:       RestartPolicy{Name: RestartPolicyUnlessStopped, MaximumRetryCount: -123},
+			expectedErr: "invalid restart policy: maximum retry count cannot be used with restart policy 'unless-stopped'",
+		},
+		{
+			name:  "disabled",
+			input: RestartPolicy{Name: RestartPolicyDisabled},
+		},
+		{
+			name:        "disabled with MaxRestartCount",
+			input:       RestartPolicy{Name: RestartPolicyDisabled, MaximumRetryCount: 123},
+			expectedErr: "invalid restart policy: maximum retry count cannot be used with restart policy 'no'",
+		},
+		{
+			name:        "disabled with negative MaxRestartCount",
+			input:       RestartPolicy{Name: RestartPolicyDisabled, MaximumRetryCount: -123},
+			expectedErr: "invalid restart policy: maximum retry count cannot be used with restart policy 'no'",
+		},
+		{
+			name:  "on-failure",
+			input: RestartPolicy{Name: RestartPolicyOnFailure},
+		},
+		{
+			name:  "on-failure with MaxRestartCount",
+			input: RestartPolicy{Name: RestartPolicyOnFailure, MaximumRetryCount: 123},
+		},
+		{
+			name:        "on-failure with negative MaxRestartCount",
+			input:       RestartPolicy{Name: RestartPolicyOnFailure, MaximumRetryCount: -123},
+			expectedErr: "invalid restart policy: maximum retry count cannot be negative",
+		},
+		{
+			name:        "unknown policy",
+			input:       RestartPolicy{Name: "I do not exist"},
+			expectedErr: "invalid restart policy: 'I do not exist'",
+		},
+	}
+
+	for _, tc := range tests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			err := ValidateRestartPolicy(tc.input)
+			if tc.expectedErr == "" {
+				assert.Check(t, err)
+			} else {
+				assert.Check(t, is.ErrorType(err, errdefs.IsInvalidParameter))
+				assert.Check(t, is.Error(err, tc.expectedErr))
+			}
+		})
+	}
+}

+ 1 - 1
api/types/container/hostconfig_unix_test.go

@@ -221,7 +221,7 @@ func TestRestartPolicy(t *testing.T) {
 		{Name: "on-failure", MaximumRetryCount: 0}: {none: false, always: false, onFailure: true},
 	}
 	for policy, expected := range policies {
-		t.Run("policy="+policy.Name, func(t *testing.T) {
+		t.Run("policy="+string(policy.Name), func(t *testing.T) {
 			assert.Check(t, is.Equal(policy.IsNone(), expected.none))
 			assert.Check(t, is.Equal(policy.IsAlways(), expected.always))
 			assert.Check(t, is.Equal(policy.IsOnFailure(), expected.onFailure))

+ 1 - 20
daemon/container.go

@@ -298,7 +298,7 @@ func validateHostConfig(hostConfig *containertypes.HostConfig) error {
 	if err := validatePortBindings(hostConfig.PortBindings); err != nil {
 		return err
 	}
-	if err := validateRestartPolicy(hostConfig.RestartPolicy); err != nil {
+	if err := containertypes.ValidateRestartPolicy(hostConfig.RestartPolicy); err != nil {
 		return err
 	}
 	if err := validateCapabilities(hostConfig); err != nil {
@@ -362,25 +362,6 @@ func validatePortBindings(ports nat.PortMap) error {
 	return nil
 }
 
-func validateRestartPolicy(policy containertypes.RestartPolicy) error {
-	switch policy.Name {
-	case "always", "unless-stopped", "no":
-		if policy.MaximumRetryCount != 0 {
-			return errors.Errorf("maximum retry count cannot be used with restart policy '%s'", policy.Name)
-		}
-	case "on-failure":
-		if policy.MaximumRetryCount < 0 {
-			return errors.Errorf("maximum retry count cannot be negative")
-		}
-	case "":
-		// do nothing
-		return nil
-	default:
-		return errors.Errorf("invalid restart policy '%s'", policy.Name)
-	}
-	return nil
-}
-
 // translateWorkingDir translates the working-dir for the target platform,
 // and returns an error if the given path is not an absolute path.
 func translateWorkingDir(config *containertypes.Config) error {

+ 10 - 0
daemon/create.go

@@ -63,6 +63,16 @@ func (daemon *Daemon) containerCreate(ctx context.Context, daemonCfg *configStor
 		return containertypes.CreateResponse{}, errdefs.InvalidParameter(errors.New("Config cannot be empty in order to create a container"))
 	}
 
+	// Normalize some defaults. Doing this "ad-hoc" here for now, as there's
+	// only one field to migrate, but we should consider having a better
+	// location for this (and decide where in the flow would be most appropriate).
+	//
+	// TODO(thaJeztah): we should have a more visible, more canonical location for this.
+	if opts.params.HostConfig != nil && opts.params.HostConfig.RestartPolicy.Name == "" {
+		// Set the default restart-policy ("none") if no restart-policy was set.
+		opts.params.HostConfig.RestartPolicy.Name = containertypes.RestartPolicyDisabled
+	}
+
 	warnings, err := daemon.verifyContainerSettings(daemonCfg, opts.params.HostConfig, opts.params.Config, false)
 	if err != nil {
 		return containertypes.CreateResponse{Warnings: warnings}, errdefs.InvalidParameter(err)

+ 14 - 1
daemon/daemon.go

@@ -338,8 +338,21 @@ func (daemon *Daemon) restore(cfg *configStore) error {
 
 			baseLogger := log.G(context.TODO()).WithField("container", c.ID)
 
+			// Migrate containers that don't have the default ("no") restart-policy set.
+			// The RestartPolicy.Name field may be empty for containers that were
+			// created with versions before v25.0.0.
+			//
+			// We also need to set the MaximumRetryCount to 0, to prevent
+			// validation from failing (MaximumRetryCount is not allowed if
+			// no restart-policy ("none") is set).
+			if c.HostConfig != nil && c.HostConfig.RestartPolicy.Name == "" {
+				baseLogger.WithError(err).Debug("migrated restart-policy")
+				c.HostConfig.RestartPolicy.Name = containertypes.RestartPolicyDisabled
+				c.HostConfig.RestartPolicy.MaximumRetryCount = 0
+			}
+
 			if err := daemon.checkpointAndSave(c); err != nil {
-				baseLogger.WithError(err).Error("error saving backported mountspec to disk")
+				baseLogger.WithError(err).Error("failed to save migrated container config to disk")
 			}
 
 			daemon.setStateCounter(c)

+ 5 - 5
integration/container/daemon_linux_test.go

@@ -10,7 +10,7 @@ import (
 	"time"
 
 	"github.com/docker/docker/api/types"
-	containerapi "github.com/docker/docker/api/types/container"
+	containertypes "github.com/docker/docker/api/types/container"
 	realcontainer "github.com/docker/docker/container"
 	"github.com/docker/docker/integration/internal/container"
 	"github.com/docker/docker/testutil/daemon"
@@ -101,7 +101,7 @@ func TestDaemonRestartIpcMode(t *testing.T) {
 	// check the container is created with private ipc mode as per daemon default
 	cID := container.Run(ctx, t, c,
 		container.WithCmd("top"),
-		container.WithRestartPolicy("always"),
+		container.WithRestartPolicy(containertypes.RestartPolicyAlways),
 	)
 	defer c.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true})
 
@@ -199,7 +199,7 @@ func TestRestartDaemonWithRestartingContainer(t *testing.T) {
 	// Just create the container, no need to start it to be started.
 	// We really want to make sure there is no process running when docker starts back up.
 	// We will manipulate the on disk state later
-	id := container.Create(ctx, t, apiClient, container.WithRestartPolicy("always"), container.WithCmd("/bin/sh", "-c", "exit 1"))
+	id := container.Create(ctx, t, apiClient, container.WithRestartPolicy(containertypes.RestartPolicyAlways), container.WithCmd("/bin/sh", "-c", "exit 1"))
 
 	d.Stop(t)
 
@@ -212,7 +212,7 @@ func TestRestartDaemonWithRestartingContainer(t *testing.T) {
 
 	ctxTimeout, cancel := context.WithTimeout(ctx, 30*time.Second)
 	defer cancel()
-	chOk, chErr := apiClient.ContainerWait(ctxTimeout, id, containerapi.WaitConditionNextExit)
+	chOk, chErr := apiClient.ContainerWait(ctxTimeout, id, containertypes.WaitConditionNextExit)
 	select {
 	case <-chOk:
 	case err := <-chErr:
@@ -284,6 +284,6 @@ func TestHardRestartWhenContainerIsRunning(t *testing.T) {
 		}
 
 		stopTimeout := 0
-		assert.Assert(t, apiClient.ContainerStop(ctx, onFailure, containerapi.StopOptions{Timeout: &stopTimeout}))
+		assert.Assert(t, apiClient.ContainerStop(ctx, onFailure, containertypes.StopOptions{Timeout: &stopTimeout}))
 	})
 }

+ 2 - 1
integration/container/kill_test.go

@@ -6,6 +6,7 @@ import (
 	"testing"
 	"time"
 
+	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration/internal/container"
 	"github.com/docker/docker/testutil/request"
@@ -112,7 +113,7 @@ func TestKillWithStopSignalAndRestartPolicies(t *testing.T) {
 		t.Run(tc.doc, func(t *testing.T) {
 			ctx := context.Background()
 			id := container.Run(ctx, t, apiClient,
-				container.WithRestartPolicy("always"),
+				container.WithRestartPolicy(containertypes.RestartPolicyAlways),
 				func(c *container.TestContainerConfig) {
 					c.Config.StopSignal = tc.stopsignal
 				})

+ 1 - 1
integration/container/stop_test.go

@@ -24,7 +24,7 @@ func TestStopContainerWithRestartPolicyAlways(t *testing.T) {
 		container.Run(ctx, t, apiClient,
 			container.WithName(name),
 			container.WithCmd("false"),
-			container.WithRestartPolicy("always"),
+			container.WithRestartPolicy(containertypes.RestartPolicyAlways),
 		)
 	}
 

+ 8 - 7
integration/daemon/daemon_test.go

@@ -13,6 +13,7 @@ import (
 	"testing"
 
 	"github.com/docker/docker/api/types"
+	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/mount"
 	"github.com/docker/docker/api/types/volume"
 	"github.com/docker/docker/daemon/config"
@@ -380,9 +381,9 @@ func testLiveRestoreVolumeReferences(t *testing.T) {
 	c := d.NewClientT(t)
 	ctx := context.Background()
 
-	runTest := func(t *testing.T, policy string) {
-		t.Run(policy, func(t *testing.T) {
-			volName := "test-live-restore-volume-references-" + policy
+	runTest := func(t *testing.T, policy containertypes.RestartPolicyMode) {
+		t.Run(string(policy), func(t *testing.T) {
+			volName := "test-live-restore-volume-references-" + string(policy)
 			_, err := c.VolumeCreate(ctx, volume.CreateOptions{Name: volName})
 			assert.NilError(t, err)
 
@@ -408,10 +409,10 @@ func testLiveRestoreVolumeReferences(t *testing.T) {
 	}
 
 	t.Run("restartPolicy", func(t *testing.T) {
-		runTest(t, "always")
-		runTest(t, "unless-stopped")
-		runTest(t, "on-failure")
-		runTest(t, "no")
+		runTest(t, containertypes.RestartPolicyAlways)
+		runTest(t, containertypes.RestartPolicyUnlessStopped)
+		runTest(t, containertypes.RestartPolicyOnFailure)
+		runTest(t, containertypes.RestartPolicyDisabled)
 	})
 
 	// Make sure that the local volume driver's mount ref count is restored

+ 1 - 1
integration/internal/container/ops.go

@@ -169,7 +169,7 @@ func WithPidsLimit(limit *int64) func(*TestContainerConfig) {
 }
 
 // WithRestartPolicy sets container's restart policy
-func WithRestartPolicy(policy string) func(c *TestContainerConfig) {
+func WithRestartPolicy(policy container.RestartPolicyMode) func(c *TestContainerConfig) {
 	return func(c *TestContainerConfig) {
 		c.HostConfig.RestartPolicy.Name = policy
 	}