Merge pull request #45197 from vvoland/integration-restart-race-2010
[backport 20.10] TestDaemonRestartKillContainers: Fix races
This commit is contained in:
commit
d9433ee096
2 changed files with 16 additions and 17 deletions
|
@ -58,11 +58,10 @@ func TestDaemonRestartKillContainers(t *testing.T) {
|
|||
d.Stop(t)
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
liveRestoreEnabled := liveRestoreEnabled
|
||||
stopDaemon := stopDaemon
|
||||
t.Run(fmt.Sprintf("live-restore=%v/%s/%s", liveRestoreEnabled, tc.desc, fnName), func(t *testing.T) {
|
||||
c := tc
|
||||
liveRestoreEnabled := liveRestoreEnabled
|
||||
stopDaemon := stopDaemon
|
||||
|
||||
t.Parallel()
|
||||
|
||||
d := daemon.New(t)
|
||||
|
@ -77,11 +76,11 @@ func TestDaemonRestartKillContainers(t *testing.T) {
|
|||
defer d.Stop(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, err := client.ContainerCreate(ctx, c.config, c.hostConfig, nil, nil, "")
|
||||
resp, err := client.ContainerCreate(ctx, tc.config, tc.hostConfig, nil, nil, "")
|
||||
assert.NilError(t, err)
|
||||
defer client.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
|
||||
|
||||
if c.xStart {
|
||||
if tc.xStart {
|
||||
err = client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
@ -89,9 +88,9 @@ func TestDaemonRestartKillContainers(t *testing.T) {
|
|||
stopDaemon(t, d)
|
||||
d.Start(t, args...)
|
||||
|
||||
expected := c.xRunning
|
||||
expected := tc.xRunning
|
||||
if liveRestoreEnabled {
|
||||
expected = c.xRunningLiveRestore
|
||||
expected = tc.xRunningLiveRestore
|
||||
}
|
||||
|
||||
var running bool
|
||||
|
|
|
@ -380,24 +380,26 @@ func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
|
|||
}
|
||||
|
||||
d.args = append(d.args, providedArgs...)
|
||||
d.cmd = exec.Command(dockerdBinary, d.args...)
|
||||
d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1")
|
||||
d.cmd.Stdout = out
|
||||
d.cmd.Stderr = out
|
||||
cmd := exec.Command(dockerdBinary, d.args...)
|
||||
cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1")
|
||||
cmd.Stdout = out
|
||||
cmd.Stderr = out
|
||||
d.logFile = out
|
||||
if d.rootlessUser != nil {
|
||||
// sudo requires this for propagating signals
|
||||
setsid(d.cmd)
|
||||
setsid(cmd)
|
||||
}
|
||||
|
||||
if err := d.cmd.Start(); err != nil {
|
||||
if err := cmd.Start(); err != nil {
|
||||
return errors.Wrapf(err, "[%s] could not start daemon container", d.id)
|
||||
}
|
||||
|
||||
wait := make(chan error, 1)
|
||||
d.cmd = cmd
|
||||
d.Wait = wait
|
||||
|
||||
go func() {
|
||||
ret := d.cmd.Wait()
|
||||
ret := cmd.Wait()
|
||||
d.log.Logf("[%s] exiting daemon", d.id)
|
||||
// If we send before logging, we might accidentally log _after_ the test is done.
|
||||
// As of Go 1.12, this incurs a panic instead of silently being dropped.
|
||||
|
@ -405,8 +407,6 @@ func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
|
|||
close(wait)
|
||||
}()
|
||||
|
||||
d.Wait = wait
|
||||
|
||||
clientConfig, err := d.getClientConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
Loading…
Add table
Reference in a new issue