Merge pull request #45196 from vvoland/integration-restart-race-23
[backport 23.0] TestDaemonRestartKillContainers: Fix races
This commit is contained in:
commit
219f21bf07
2 changed files with 18 additions and 19 deletions
|
@ -76,11 +76,10 @@ func TestDaemonRestartKillContainers(t *testing.T) {
|
|||
d.Stop(t)
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
liveRestoreEnabled := liveRestoreEnabled
|
||||
stopDaemon := stopDaemon
|
||||
t.Run(fmt.Sprintf("live-restore=%v/%s/%s", liveRestoreEnabled, tc.desc, fnName), func(t *testing.T) {
|
||||
c := tc
|
||||
liveRestoreEnabled := liveRestoreEnabled
|
||||
stopDaemon := stopDaemon
|
||||
|
||||
t.Parallel()
|
||||
|
||||
d := daemon.New(t)
|
||||
|
@ -95,11 +94,11 @@ func TestDaemonRestartKillContainers(t *testing.T) {
|
|||
defer d.Stop(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, err := client.ContainerCreate(ctx, c.config, c.hostConfig, nil, nil, "")
|
||||
resp, err := client.ContainerCreate(ctx, tc.config, tc.hostConfig, nil, nil, "")
|
||||
assert.NilError(t, err)
|
||||
defer client.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
|
||||
|
||||
if c.xStart {
|
||||
if tc.xStart {
|
||||
err = client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
@ -107,9 +106,9 @@ func TestDaemonRestartKillContainers(t *testing.T) {
|
|||
stopDaemon(t, d)
|
||||
d.Start(t, args...)
|
||||
|
||||
expected := c.xRunning
|
||||
expected := tc.xRunning
|
||||
if liveRestoreEnabled {
|
||||
expected = c.xRunningLiveRestore
|
||||
expected = tc.xRunningLiveRestore
|
||||
}
|
||||
|
||||
var running bool
|
||||
|
@ -125,7 +124,7 @@ func TestDaemonRestartKillContainers(t *testing.T) {
|
|||
}
|
||||
assert.Equal(t, expected, running, "got unexpected running state, expected %v, got: %v", expected, running)
|
||||
|
||||
if c.xHealthCheck {
|
||||
if tc.xHealthCheck {
|
||||
startTime := time.Now()
|
||||
ctxPoll, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
|
|
@ -392,25 +392,27 @@ func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
|
|||
}
|
||||
|
||||
d.args = append(d.args, providedArgs...)
|
||||
d.cmd = exec.Command(dockerdBinary, d.args...)
|
||||
d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1")
|
||||
d.cmd.Env = append(d.cmd.Env, d.extraEnv...)
|
||||
d.cmd.Stdout = out
|
||||
d.cmd.Stderr = out
|
||||
cmd := exec.Command(dockerdBinary, d.args...)
|
||||
cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1")
|
||||
cmd.Env = append(cmd.Env, d.extraEnv...)
|
||||
cmd.Stdout = out
|
||||
cmd.Stderr = out
|
||||
d.logFile = out
|
||||
if d.rootlessUser != nil {
|
||||
// sudo requires this for propagating signals
|
||||
setsid(d.cmd)
|
||||
setsid(cmd)
|
||||
}
|
||||
|
||||
if err := d.cmd.Start(); err != nil {
|
||||
if err := cmd.Start(); err != nil {
|
||||
return errors.Wrapf(err, "[%s] could not start daemon container", d.id)
|
||||
}
|
||||
|
||||
wait := make(chan error, 1)
|
||||
d.cmd = cmd
|
||||
d.Wait = wait
|
||||
|
||||
go func() {
|
||||
ret := d.cmd.Wait()
|
||||
ret := cmd.Wait()
|
||||
d.log.Logf("[%s] exiting daemon", d.id)
|
||||
// If we send before logging, we might accidentally log _after_ the test is done.
|
||||
// As of Go 1.12, this incurs a panic instead of silently being dropped.
|
||||
|
@ -418,8 +420,6 @@ func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
|
|||
close(wait)
|
||||
}()
|
||||
|
||||
d.Wait = wait
|
||||
|
||||
clientConfig, err := d.getClientConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
Loading…
Reference in a new issue