integration: test container healthcheck is reset
Update the TestDaemonRestartKilContainers integration test to assert that a container's healthcheck status is always reset to the Starting state after a daemon restart, even when the container is live-restored. Signed-off-by: Cory Snider <csnider@mirantis.com>
This commit is contained in:
parent
0e62dbadcd
commit
312450d079
3 changed files with 68 additions and 42 deletions
|
@ -29,9 +29,8 @@ func TestDaemonRestartKillContainers(t *testing.T) {
|
|||
ctx := testutil.StartSpan(baseContext, t)
|
||||
|
||||
type testCase struct {
|
||||
desc string
|
||||
config *container.Config
|
||||
hostConfig *container.HostConfig
|
||||
desc string
|
||||
restartPolicy container.RestartPolicy
|
||||
|
||||
xRunning bool
|
||||
xRunningLiveRestore bool
|
||||
|
@ -42,37 +41,27 @@ func TestDaemonRestartKillContainers(t *testing.T) {
|
|||
for _, tc := range []testCase{
|
||||
{
|
||||
desc: "container without restart policy",
|
||||
config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
|
||||
xRunningLiveRestore: true,
|
||||
xStart: true,
|
||||
},
|
||||
{
|
||||
desc: "container with restart=always",
|
||||
config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
|
||||
hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}},
|
||||
restartPolicy: container.RestartPolicy{Name: "always"},
|
||||
xRunning: true,
|
||||
xRunningLiveRestore: true,
|
||||
xStart: true,
|
||||
},
|
||||
{
|
||||
desc: "container with restart=always and with healthcheck",
|
||||
config: &container.Config{
|
||||
Image: "busybox", Cmd: []string{"top"},
|
||||
Healthcheck: &container.HealthConfig{
|
||||
Test: []string{"CMD-SHELL", "sleep 1"},
|
||||
Interval: time.Second,
|
||||
},
|
||||
},
|
||||
hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}},
|
||||
desc: "container with restart=always and with healthcheck",
|
||||
restartPolicy: container.RestartPolicy{Name: "always"},
|
||||
xRunning: true,
|
||||
xRunningLiveRestore: true,
|
||||
xStart: true,
|
||||
xHealthCheck: true,
|
||||
},
|
||||
{
|
||||
desc: "container created should not be restarted",
|
||||
config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
|
||||
hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}},
|
||||
desc: "container created should not be restarted",
|
||||
restartPolicy: container.RestartPolicy{Name: "always"},
|
||||
},
|
||||
} {
|
||||
for _, liveRestoreEnabled := range []bool{false, true} {
|
||||
|
@ -104,16 +93,31 @@ func TestDaemonRestartKillContainers(t *testing.T) {
|
|||
d.StartWithBusybox(ctx, t, args...)
|
||||
defer d.Stop(t)
|
||||
|
||||
resp, err := apiClient.ContainerCreate(ctx, tc.config, tc.hostConfig, nil, nil, "")
|
||||
config := container.Config{Image: "busybox", Cmd: []string{"top"}}
|
||||
hostConfig := container.HostConfig{RestartPolicy: tc.restartPolicy}
|
||||
if tc.xHealthCheck {
|
||||
config.Healthcheck = &container.HealthConfig{
|
||||
Test: []string{"CMD-SHELL", "! test -f /tmp/unhealthy"},
|
||||
StartPeriod: 60 * time.Second,
|
||||
StartInterval: 1 * time.Second,
|
||||
Interval: 60 * time.Second,
|
||||
}
|
||||
}
|
||||
resp, err := apiClient.ContainerCreate(ctx, &config, &hostConfig, nil, nil, "")
|
||||
assert.NilError(t, err)
|
||||
defer apiClient.ContainerRemove(ctx, resp.ID, container.RemoveOptions{Force: true})
|
||||
|
||||
if tc.xStart {
|
||||
err = apiClient.ContainerStart(ctx, resp.ID, container.StartOptions{})
|
||||
assert.NilError(t, err)
|
||||
if tc.xHealthCheck {
|
||||
poll.WaitOn(t, pollForHealthStatus(ctx, apiClient, resp.ID, types.Healthy), poll.WithDelay(100*time.Millisecond), poll.WithTimeout(30*time.Second))
|
||||
testContainer.ExecT(ctx, t, apiClient, resp.ID, []string{"touch", "/tmp/unhealthy"}).AssertSuccess(t)
|
||||
}
|
||||
}
|
||||
|
||||
stopDaemon(t, d)
|
||||
startTime := time.Now()
|
||||
d.Start(t, args...)
|
||||
|
||||
expected := tc.xRunning
|
||||
|
@ -121,24 +125,18 @@ func TestDaemonRestartKillContainers(t *testing.T) {
|
|||
expected = tc.xRunningLiveRestore
|
||||
}
|
||||
|
||||
var running bool
|
||||
for i := 0; i < 30; i++ {
|
||||
inspect, err := apiClient.ContainerInspect(ctx, resp.ID)
|
||||
assert.NilError(t, err)
|
||||
|
||||
running = inspect.State.Running
|
||||
if running == expected {
|
||||
break
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
assert.Equal(t, expected, running, "got unexpected running state, expected %v, got: %v", expected, running)
|
||||
poll.WaitOn(t, testContainer.RunningStateFlagIs(ctx, apiClient, resp.ID, expected), poll.WithDelay(100*time.Millisecond), poll.WithTimeout(30*time.Second))
|
||||
|
||||
if tc.xHealthCheck {
|
||||
startTime := time.Now()
|
||||
ctxPoll, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
poll.WaitOn(t, pollForNewHealthCheck(ctxPoll, apiClient, startTime, resp.ID), poll.WithDelay(100*time.Millisecond))
|
||||
// We have arranged to have the container's health probes fail until we tell it
|
||||
// to become healthy, which gives us the entire StartPeriod (60s) to assert that
|
||||
// the container's health state is Starting before we have to worry about racing
|
||||
// the health monitor.
|
||||
assert.Equal(t, testContainer.Inspect(ctx, t, apiClient, resp.ID).State.Health.Status, types.Starting)
|
||||
poll.WaitOn(t, pollForNewHealthCheck(ctx, apiClient, startTime, resp.ID), poll.WithDelay(100*time.Millisecond), poll.WithTimeout(30*time.Second))
|
||||
|
||||
testContainer.ExecT(ctx, t, apiClient, resp.ID, []string{"rm", "/tmp/unhealthy"}).AssertSuccess(t)
|
||||
poll.WaitOn(t, pollForHealthStatus(ctx, apiClient, resp.ID, types.Healthy), poll.WithDelay(100*time.Millisecond), poll.WithTimeout(30*time.Second))
|
||||
}
|
||||
// TODO(cpuguy83): test pause states... this seems to be rather undefined currently
|
||||
})
|
||||
|
|
|
@ -3,6 +3,7 @@ package container
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
|
@ -16,20 +17,32 @@ type ExecResult struct {
|
|||
}
|
||||
|
||||
// Stdout returns stdout output of a command run by Exec()
|
||||
func (res *ExecResult) Stdout() string {
|
||||
func (res ExecResult) Stdout() string {
|
||||
return res.outBuffer.String()
|
||||
}
|
||||
|
||||
// Stderr returns stderr output of a command run by Exec()
|
||||
func (res *ExecResult) Stderr() string {
|
||||
func (res ExecResult) Stderr() string {
|
||||
return res.errBuffer.String()
|
||||
}
|
||||
|
||||
// Combined returns combined stdout and stderr output of a command run by Exec()
|
||||
func (res *ExecResult) Combined() string {
|
||||
func (res ExecResult) Combined() string {
|
||||
return res.outBuffer.String() + res.errBuffer.String()
|
||||
}
|
||||
|
||||
// AssertSuccess fails the test and stops execution if the command exited with a
|
||||
// nonzero status code.
|
||||
func (res ExecResult) AssertSuccess(t testing.TB) {
|
||||
t.Helper()
|
||||
if res.ExitCode != 0 {
|
||||
t.Logf("expected exit code 0, got %d", res.ExitCode)
|
||||
t.Logf("stdout: %s", res.Stdout())
|
||||
t.Logf("stderr: %s", res.Stderr())
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// Exec executes a command inside a container, returning the result
|
||||
// containing stdout, stderr, and exit code. Note:
|
||||
// - this is a synchronous operation;
|
||||
|
@ -72,3 +85,13 @@ func Exec(ctx context.Context, apiClient client.APIClient, id string, cmd []stri
|
|||
|
||||
return ExecResult{ExitCode: iresp.ExitCode, outBuffer: &s.stdout, errBuffer: &s.stderr}, nil
|
||||
}
|
||||
|
||||
// ExecT calls Exec() and aborts the test if an error occurs.
|
||||
func ExecT(ctx context.Context, t testing.TB, apiClient client.APIClient, id string, cmd []string, ops ...func(*types.ExecConfig)) ExecResult {
|
||||
t.Helper()
|
||||
res, err := Exec(ctx, apiClient, id, cmd, ops...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -10,22 +10,27 @@ import (
|
|||
"gotest.tools/v3/poll"
|
||||
)
|
||||
|
||||
// IsStopped verifies the container is in stopped state.
|
||||
func IsStopped(ctx context.Context, apiClient client.APIClient, containerID string) func(log poll.LogT) poll.Result {
|
||||
// RunningStateFlagIs polls for the container's Running state flag to be equal to running.
|
||||
func RunningStateFlagIs(ctx context.Context, apiClient client.APIClient, containerID string, running bool) func(log poll.LogT) poll.Result {
|
||||
return func(log poll.LogT) poll.Result {
|
||||
inspect, err := apiClient.ContainerInspect(ctx, containerID)
|
||||
|
||||
switch {
|
||||
case err != nil:
|
||||
return poll.Error(err)
|
||||
case !inspect.State.Running:
|
||||
case inspect.State.Running == running:
|
||||
return poll.Success()
|
||||
default:
|
||||
return poll.Continue("waiting for container to be stopped")
|
||||
return poll.Continue("waiting for container to be %s", map[bool]string{true: "running", false: "stopped"}[running])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IsStopped verifies the container is in stopped state.
|
||||
func IsStopped(ctx context.Context, apiClient client.APIClient, containerID string) func(log poll.LogT) poll.Result {
|
||||
return RunningStateFlagIs(ctx, apiClient, containerID, false)
|
||||
}
|
||||
|
||||
// IsInState verifies the container is in one of the specified state, e.g., "running", "exited", etc.
|
||||
func IsInState(ctx context.Context, apiClient client.APIClient, containerID string, state ...string) func(log poll.LogT) poll.Result {
|
||||
return func(log poll.LogT) poll.Result {
|
||||
|
|
Loading…
Add table
Reference in a new issue