restart_test.go 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. package container // import "github.com/docker/docker/integration/container"
  2. import (
  3. "context"
  4. "fmt"
  5. "testing"
  6. "time"
  7. "github.com/docker/docker/api/types"
  8. "github.com/docker/docker/api/types/container"
  9. "github.com/docker/docker/client"
  10. "github.com/docker/docker/testutil/daemon"
  11. "gotest.tools/v3/assert"
  12. "gotest.tools/v3/poll"
  13. "gotest.tools/v3/skip"
  14. )
  15. func TestDaemonRestartKillContainers(t *testing.T) {
  16. skip.If(t, testEnv.IsRemoteDaemon, "cannot start daemon on remote test run")
  17. skip.If(t, testEnv.DaemonInfo.OSType == "windows")
  18. skip.If(t, testEnv.IsRootless, "rootless mode doesn't support live-restore")
  19. type testCase struct {
  20. desc string
  21. config *container.Config
  22. hostConfig *container.HostConfig
  23. xRunning bool
  24. xRunningLiveRestore bool
  25. xStart bool
  26. xHealthCheck bool
  27. }
  28. for _, tc := range []testCase{
  29. {
  30. desc: "container without restart policy",
  31. config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
  32. xRunningLiveRestore: true,
  33. xStart: true,
  34. },
  35. {
  36. desc: "container with restart=always",
  37. config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
  38. hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}},
  39. xRunning: true,
  40. xRunningLiveRestore: true,
  41. xStart: true,
  42. },
  43. {
  44. desc: "container with restart=always and with healthcheck",
  45. config: &container.Config{Image: "busybox", Cmd: []string{"top"},
  46. Healthcheck: &container.HealthConfig{
  47. Test: []string{"CMD-SHELL", "sleep 1"},
  48. Interval: time.Second,
  49. },
  50. },
  51. hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}},
  52. xRunning: true,
  53. xRunningLiveRestore: true,
  54. xStart: true,
  55. xHealthCheck: true,
  56. },
  57. {
  58. desc: "container created should not be restarted",
  59. config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
  60. hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}},
  61. },
  62. } {
  63. for _, liveRestoreEnabled := range []bool{false, true} {
  64. for fnName, stopDaemon := range map[string]func(*testing.T, *daemon.Daemon){
  65. "kill-daemon": func(t *testing.T, d *daemon.Daemon) {
  66. err := d.Kill()
  67. assert.NilError(t, err)
  68. },
  69. "stop-daemon": func(t *testing.T, d *daemon.Daemon) {
  70. d.Stop(t)
  71. },
  72. } {
  73. t.Run(fmt.Sprintf("live-restore=%v/%s/%s", liveRestoreEnabled, tc.desc, fnName), func(t *testing.T) {
  74. c := tc
  75. liveRestoreEnabled := liveRestoreEnabled
  76. stopDaemon := stopDaemon
  77. t.Parallel()
  78. d := daemon.New(t)
  79. client := d.NewClientT(t)
  80. args := []string{"--iptables=false"}
  81. if liveRestoreEnabled {
  82. args = append(args, "--live-restore")
  83. }
  84. d.StartWithBusybox(t, args...)
  85. defer d.Stop(t)
  86. ctx := context.Background()
  87. resp, err := client.ContainerCreate(ctx, c.config, c.hostConfig, nil, nil, "")
  88. assert.NilError(t, err)
  89. defer client.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
  90. if c.xStart {
  91. err = client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
  92. assert.NilError(t, err)
  93. }
  94. stopDaemon(t, d)
  95. d.Start(t, args...)
  96. expected := c.xRunning
  97. if liveRestoreEnabled {
  98. expected = c.xRunningLiveRestore
  99. }
  100. var running bool
  101. for i := 0; i < 30; i++ {
  102. inspect, err := client.ContainerInspect(ctx, resp.ID)
  103. assert.NilError(t, err)
  104. running = inspect.State.Running
  105. if running == expected {
  106. break
  107. }
  108. time.Sleep(2 * time.Second)
  109. }
  110. assert.Equal(t, expected, running, "got unexpected running state, expected %v, got: %v", expected, running)
  111. if c.xHealthCheck {
  112. startTime := time.Now()
  113. ctxPoll, cancel := context.WithTimeout(ctx, 30*time.Second)
  114. defer cancel()
  115. poll.WaitOn(t, pollForNewHealthCheck(ctxPoll, client, startTime, resp.ID), poll.WithDelay(100*time.Millisecond))
  116. }
  117. // TODO(cpuguy83): test pause states... this seems to be rather undefined currently
  118. })
  119. }
  120. }
  121. }
  122. }
  123. func pollForNewHealthCheck(ctx context.Context, client *client.Client, startTime time.Time, containerID string) func(log poll.LogT) poll.Result {
  124. return func(log poll.LogT) poll.Result {
  125. inspect, err := client.ContainerInspect(ctx, containerID)
  126. if err != nil {
  127. return poll.Error(err)
  128. }
  129. healthChecksTotal := len(inspect.State.Health.Log)
  130. if healthChecksTotal > 0 {
  131. if inspect.State.Health.Log[healthChecksTotal-1].Start.After(startTime) {
  132. return poll.Success()
  133. }
  134. }
  135. return poll.Continue("waiting for a new container healthcheck")
  136. }
  137. }