Use State as embedded to Container
Signed-off-by: Alexandr Morozov <lk4d4math@gmail.com>
This commit is contained in:
parent
5f6b420f91
commit
e0339d4b88
17 changed files with 72 additions and 72 deletions
|
@ -413,7 +413,7 @@ func (b *Builder) run(c *daemon.Container) error {
|
|||
}
|
||||
|
||||
// Wait for it to finish
|
||||
if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 {
|
||||
if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
|
||||
err := &utils.JSONError{
|
||||
Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret),
|
||||
Code: ret,
|
||||
|
|
|
@ -106,7 +106,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
|
|||
// If we are in stdinonce mode, wait for the process to end
|
||||
// otherwise, simply return
|
||||
if container.Config.StdinOnce && !container.Config.Tty {
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
container.WaitStop(-1 * time.Second)
|
||||
}
|
||||
}
|
||||
return engine.StatusOK
|
||||
|
|
|
@ -283,7 +283,7 @@ func (container *Container) Start() (err error) {
|
|||
container.Lock()
|
||||
defer container.Unlock()
|
||||
|
||||
if container.State.Running {
|
||||
if container.Running {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -333,7 +333,7 @@ func (container *Container) Run() error {
|
|||
if err := container.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
container.WaitStop(-1 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -347,7 +347,7 @@ func (container *Container) Output() (output []byte, err error) {
|
|||
return nil, err
|
||||
}
|
||||
output, err = ioutil.ReadAll(pipe)
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
container.WaitStop(-1 * time.Second)
|
||||
return output, err
|
||||
}
|
||||
|
||||
|
@ -533,11 +533,11 @@ func (container *Container) KillSig(sig int) error {
|
|||
defer container.Unlock()
|
||||
|
||||
// We could unpause the container for them rather than returning this error
|
||||
if container.State.Paused {
|
||||
if container.Paused {
|
||||
return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID)
|
||||
}
|
||||
|
||||
if !container.State.Running {
|
||||
if !container.Running {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -548,7 +548,7 @@ func (container *Container) KillSig(sig int) error {
|
|||
// if the container is currently restarting we do not need to send the signal
|
||||
// to the process. Telling the monitor that it should exit on it's next event
|
||||
// loop is enough
|
||||
if container.State.Restarting {
|
||||
if container.Restarting {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -556,27 +556,27 @@ func (container *Container) KillSig(sig int) error {
|
|||
}
|
||||
|
||||
func (container *Container) Pause() error {
|
||||
if container.State.IsPaused() {
|
||||
if container.IsPaused() {
|
||||
return fmt.Errorf("Container %s is already paused", container.ID)
|
||||
}
|
||||
if !container.State.IsRunning() {
|
||||
if !container.IsRunning() {
|
||||
return fmt.Errorf("Container %s is not running", container.ID)
|
||||
}
|
||||
return container.daemon.Pause(container)
|
||||
}
|
||||
|
||||
func (container *Container) Unpause() error {
|
||||
if !container.State.IsPaused() {
|
||||
if !container.IsPaused() {
|
||||
return fmt.Errorf("Container %s is not paused", container.ID)
|
||||
}
|
||||
if !container.State.IsRunning() {
|
||||
if !container.IsRunning() {
|
||||
return fmt.Errorf("Container %s is not running", container.ID)
|
||||
}
|
||||
return container.daemon.Unpause(container)
|
||||
}
|
||||
|
||||
func (container *Container) Kill() error {
|
||||
if !container.State.IsRunning() {
|
||||
if !container.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -586,9 +586,9 @@ func (container *Container) Kill() error {
|
|||
}
|
||||
|
||||
// 2. Wait for the process to die, in last resort, try to kill the process directly
|
||||
if _, err := container.State.WaitStop(10 * time.Second); err != nil {
|
||||
if _, err := container.WaitStop(10 * time.Second); err != nil {
|
||||
// Ensure that we don't kill ourselves
|
||||
if pid := container.State.GetPid(); pid != 0 {
|
||||
if pid := container.GetPid(); pid != 0 {
|
||||
log.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
|
||||
if err := syscall.Kill(pid, 9); err != nil {
|
||||
return err
|
||||
|
@ -596,12 +596,12 @@ func (container *Container) Kill() error {
|
|||
}
|
||||
}
|
||||
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
container.WaitStop(-1 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (container *Container) Stop(seconds int) error {
|
||||
if !container.State.IsRunning() {
|
||||
if !container.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -614,11 +614,11 @@ func (container *Container) Stop(seconds int) error {
|
|||
}
|
||||
|
||||
// 2. Wait for the process to exit on its own
|
||||
if _, err := container.State.WaitStop(time.Duration(seconds) * time.Second); err != nil {
|
||||
if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil {
|
||||
log.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
|
||||
// 3. If it doesn't, then send SIGKILL
|
||||
if err := container.Kill(); err != nil {
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
container.WaitStop(-1 * time.Second)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1006,7 +1006,7 @@ func (container *Container) setupLinkedContainers() ([]string, error) {
|
|||
}
|
||||
|
||||
for linkAlias, child := range children {
|
||||
if !child.State.IsRunning() {
|
||||
if !child.IsRunning() {
|
||||
return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias)
|
||||
}
|
||||
|
||||
|
@ -1173,7 +1173,7 @@ func (container *Container) getNetworkedContainer() (*Container, error) {
|
|||
if nc == nil {
|
||||
return nil, fmt.Errorf("no such container to join network: %s", parts[1])
|
||||
}
|
||||
if !nc.State.IsRunning() {
|
||||
if !nc.IsRunning() {
|
||||
return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1])
|
||||
}
|
||||
return nc, nil
|
||||
|
|
|
@ -213,11 +213,11 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
|
|||
// FIXME: if the container is supposed to be running but is not, auto restart it?
|
||||
// if so, then we need to restart monitor and init a new lock
|
||||
// If the container is supposed to be running, make sure of it
|
||||
if container.State.IsRunning() {
|
||||
if container.IsRunning() {
|
||||
log.Debugf("killing old running container %s", container.ID)
|
||||
|
||||
existingPid := container.State.Pid
|
||||
container.State.SetStopped(0)
|
||||
existingPid := container.Pid
|
||||
container.SetStopped(0)
|
||||
|
||||
// We only have to handle this for lxc because the other drivers will ensure that
|
||||
// no processes are left when docker dies
|
||||
|
@ -249,7 +249,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
|
|||
|
||||
log.Debugf("Marking as stopped")
|
||||
|
||||
container.State.SetStopped(-127)
|
||||
container.SetStopped(-127)
|
||||
if err := container.ToDisk(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -363,7 +363,7 @@ func (daemon *Daemon) restore() error {
|
|||
|
||||
for _, container := range registeredContainers {
|
||||
if container.hostConfig.RestartPolicy.Name == "always" ||
|
||||
(container.hostConfig.RestartPolicy.Name == "on-failure" && container.State.ExitCode != 0) {
|
||||
(container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) {
|
||||
log.Debugf("Starting container %s", container.ID)
|
||||
|
||||
if err := container.Start(); err != nil {
|
||||
|
@ -891,7 +891,7 @@ func (daemon *Daemon) shutdown() error {
|
|||
log.Debugf("starting clean shutdown of all containers...")
|
||||
for _, container := range daemon.List() {
|
||||
c := container
|
||||
if c.State.IsRunning() {
|
||||
if c.IsRunning() {
|
||||
log.Debugf("stopping %s", c.ID)
|
||||
group.Add(1)
|
||||
|
||||
|
@ -900,7 +900,7 @@ func (daemon *Daemon) shutdown() error {
|
|||
if err := c.KillSig(15); err != nil {
|
||||
log.Debugf("kill 15 error for %s - %s", c.ID, err)
|
||||
}
|
||||
c.State.WaitStop(-1 * time.Second)
|
||||
c.WaitStop(-1 * time.Second)
|
||||
log.Debugf("container stopped %s", c.ID)
|
||||
}()
|
||||
}
|
||||
|
@ -980,7 +980,7 @@ func (daemon *Daemon) Pause(c *Container) error {
|
|||
if err := daemon.execDriver.Pause(c.command); err != nil {
|
||||
return err
|
||||
}
|
||||
c.State.SetPaused()
|
||||
c.SetPaused()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -988,7 +988,7 @@ func (daemon *Daemon) Unpause(c *Container) error {
|
|||
if err := daemon.execDriver.Unpause(c.command); err != nil {
|
||||
return err
|
||||
}
|
||||
c.State.SetUnpaused()
|
||||
c.SetUnpaused()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
|
|||
}
|
||||
|
||||
if container != nil {
|
||||
if container.State.IsRunning() {
|
||||
if container.IsRunning() {
|
||||
if forceRemove {
|
||||
if err := container.Kill(); err != nil {
|
||||
return job.Errorf("Could not kill running container, cannot remove - %v", err)
|
||||
|
|
|
@ -138,7 +138,7 @@ func (daemon *Daemon) canDeleteImage(imgID string, force, untagged bool) error {
|
|||
|
||||
if err := parent.WalkHistory(func(p *image.Image) error {
|
||||
if imgID == p.ID {
|
||||
if container.State.IsRunning() {
|
||||
if container.IsRunning() {
|
||||
if force {
|
||||
return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it%s, stop it and retry", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
|
|||
writeCont := func(container *Container) error {
|
||||
container.Lock()
|
||||
defer container.Unlock()
|
||||
if !container.State.Running && !all && n <= 0 && since == "" && before == "" {
|
||||
if !container.Running && !all && n <= 0 && since == "" && before == "" {
|
||||
return nil
|
||||
}
|
||||
if before != "" && !foundBefore {
|
||||
|
@ -87,10 +87,10 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
|
|||
return errLast
|
||||
}
|
||||
}
|
||||
if len(filt_exited) > 0 && !container.State.Running {
|
||||
if len(filt_exited) > 0 && !container.Running {
|
||||
should_skip := true
|
||||
for _, code := range filt_exited {
|
||||
if code == container.State.GetExitCode() {
|
||||
if code == container.GetExitCode() {
|
||||
should_skip = false
|
||||
break
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
|||
}
|
||||
}
|
||||
}
|
||||
if follow && container.State.IsRunning() {
|
||||
if follow && container.IsRunning() {
|
||||
errors := make(chan error, 2)
|
||||
if stdout {
|
||||
stdoutPipe := container.StdoutLogPipe()
|
||||
|
|
|
@ -110,7 +110,7 @@ func (m *containerMonitor) Start() error {
|
|||
defer func() {
|
||||
if afterRun {
|
||||
m.container.Lock()
|
||||
m.container.State.setStopped(exitStatus)
|
||||
m.container.setStopped(exitStatus)
|
||||
defer m.container.Unlock()
|
||||
}
|
||||
m.Close()
|
||||
|
@ -152,7 +152,7 @@ func (m *containerMonitor) Start() error {
|
|||
m.resetMonitor(err == nil && exitStatus == 0)
|
||||
|
||||
if m.shouldRestart(exitStatus) {
|
||||
m.container.State.SetRestarting(exitStatus)
|
||||
m.container.SetRestarting(exitStatus)
|
||||
m.container.LogEvent("die")
|
||||
m.resetContainer(true)
|
||||
|
||||
|
@ -243,7 +243,7 @@ func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid
|
|||
}
|
||||
}
|
||||
|
||||
m.container.State.setRunning(pid)
|
||||
m.container.setRunning(pid)
|
||||
|
||||
// signal that the process has started
|
||||
// close channel only if not closed
|
||||
|
|
|
@ -22,7 +22,7 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
|
|||
return job.Errorf("No such container: %s", name)
|
||||
}
|
||||
|
||||
if container.State.IsRunning() {
|
||||
if container.IsRunning() {
|
||||
return job.Errorf("Container already started")
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status {
|
|||
t = job.GetenvInt("t")
|
||||
}
|
||||
if container := daemon.Get(name); container != nil {
|
||||
if !container.State.IsRunning() {
|
||||
if !container.IsRunning() {
|
||||
return job.Errorf("Container already stopped")
|
||||
}
|
||||
if err := container.Stop(int(t)); err != nil {
|
||||
|
|
|
@ -22,7 +22,7 @@ func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
|
|||
}
|
||||
|
||||
if container := daemon.Get(name); container != nil {
|
||||
if !container.State.IsRunning() {
|
||||
if !container.IsRunning() {
|
||||
return job.Errorf("Container %s is not running", name)
|
||||
}
|
||||
pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID)
|
||||
|
|
|
@ -12,7 +12,7 @@ func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status {
|
|||
}
|
||||
name := job.Args[0]
|
||||
if container := daemon.Get(name); container != nil {
|
||||
status, _ := container.State.WaitStop(-1 * time.Second)
|
||||
status, _ := container.WaitStop(-1 * time.Second)
|
||||
job.Printf("%d\n", status)
|
||||
return engine.StatusOK
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ func waitContainerStart(t *testing.T, timeout time.Duration) *daemon.Container {
|
|||
setTimeout(t, "Waiting for the container to be started timed out", timeout, func() {
|
||||
for {
|
||||
l := globalDaemon.List()
|
||||
if len(l) == 1 && l[0].State.IsRunning() {
|
||||
if len(l) == 1 && l[0].IsRunning() {
|
||||
container = l[0]
|
||||
break
|
||||
}
|
||||
|
@ -150,8 +150,8 @@ func TestRunDisconnect(t *testing.T) {
|
|||
// cause /bin/cat to exit.
|
||||
setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() {
|
||||
container := globalDaemon.List()[0]
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
if container.State.IsRunning() {
|
||||
container.WaitStop(-1 * time.Second)
|
||||
if container.IsRunning() {
|
||||
t.Fatalf("/bin/cat is still running after closing stdin")
|
||||
}
|
||||
})
|
||||
|
@ -202,8 +202,8 @@ func TestRunDisconnectTty(t *testing.T) {
|
|||
// In tty mode, we expect the process to stay alive even after client's stdin closes.
|
||||
|
||||
// Give some time to monitor to do his thing
|
||||
container.State.WaitStop(500 * time.Millisecond)
|
||||
if !container.State.IsRunning() {
|
||||
container.WaitStop(500 * time.Millisecond)
|
||||
if !container.IsRunning() {
|
||||
t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)")
|
||||
}
|
||||
}
|
||||
|
@ -247,7 +247,7 @@ func TestRunDetach(t *testing.T) {
|
|||
closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if !container.State.IsRunning() {
|
||||
if !container.IsRunning() {
|
||||
t.Fatal("The detached container should be still running")
|
||||
}
|
||||
|
||||
|
@ -328,7 +328,7 @@ func TestAttachDetach(t *testing.T) {
|
|||
closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if !container.State.IsRunning() {
|
||||
if !container.IsRunning() {
|
||||
t.Fatal("The detached container should be still running")
|
||||
}
|
||||
|
||||
|
@ -393,7 +393,7 @@ func TestAttachDetachTruncatedID(t *testing.T) {
|
|||
closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if !container.State.IsRunning() {
|
||||
if !container.IsRunning() {
|
||||
t.Fatal("The detached container should be still running")
|
||||
}
|
||||
|
||||
|
@ -426,7 +426,7 @@ func TestAttachDisconnect(t *testing.T) {
|
|||
setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() {
|
||||
for {
|
||||
l := globalDaemon.List()
|
||||
if len(l) == 1 && l[0].State.IsRunning() {
|
||||
if len(l) == 1 && l[0].IsRunning() {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
@ -461,15 +461,15 @@ func TestAttachDisconnect(t *testing.T) {
|
|||
|
||||
// We closed stdin, expect /bin/cat to still be running
|
||||
// Wait a little bit to make sure container.monitor() did his thing
|
||||
_, err := container.State.WaitStop(500 * time.Millisecond)
|
||||
if err == nil || !container.State.IsRunning() {
|
||||
_, err := container.WaitStop(500 * time.Millisecond)
|
||||
if err == nil || !container.IsRunning() {
|
||||
t.Fatalf("/bin/cat is not running after closing stdin")
|
||||
}
|
||||
|
||||
// Try to avoid the timeout in destroy. Best effort, don't check error
|
||||
cStdin, _ := container.StdinPipe()
|
||||
cStdin.Close()
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
container.WaitStop(-1 * time.Second)
|
||||
}
|
||||
|
||||
// Expected behaviour: container gets deleted automatically after exit
|
||||
|
|
|
@ -42,7 +42,7 @@ func TestRestartStdin(t *testing.T) {
|
|||
if err := stdin.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
container.WaitStop(-1 * time.Second)
|
||||
output, err := ioutil.ReadAll(stdout)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -72,7 +72,7 @@ func TestRestartStdin(t *testing.T) {
|
|||
if err := stdin.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
container.WaitStop(-1 * time.Second)
|
||||
output, err = ioutil.ReadAll(stdout)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -120,7 +120,7 @@ func TestStdin(t *testing.T) {
|
|||
if err := stdin.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
container.WaitStop(-1 * time.Second)
|
||||
output, err := ioutil.ReadAll(stdout)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -165,7 +165,7 @@ func TestTty(t *testing.T) {
|
|||
if err := stdin.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
container.WaitStop(-1 * time.Second)
|
||||
output, err := ioutil.ReadAll(stdout)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -227,7 +227,7 @@ func BenchmarkRunParallel(b *testing.B) {
|
|||
complete <- err
|
||||
return
|
||||
}
|
||||
if _, err := container.State.WaitStop(15 * time.Second); err != nil {
|
||||
if _, err := container.WaitStop(15 * time.Second); err != nil {
|
||||
complete <- err
|
||||
return
|
||||
}
|
||||
|
|
|
@ -492,13 +492,13 @@ func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daem
|
|||
}
|
||||
|
||||
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
|
||||
for !container.State.IsRunning() {
|
||||
for !container.IsRunning() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
|
||||
// Even if the state is running, lets give some time to lxc to spawn the process
|
||||
container.State.WaitStop(500 * time.Millisecond)
|
||||
container.WaitStop(500 * time.Millisecond)
|
||||
|
||||
strPort = container.NetworkSettings.Ports[p][0].HostPort
|
||||
return daemon, container, strPort
|
||||
|
@ -606,17 +606,17 @@ func TestRestore(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container2.State.IsRunning() {
|
||||
if !container2.IsRunning() {
|
||||
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
|
||||
}
|
||||
|
||||
// Simulate a crash/manual quit of dockerd: process dies, states stays 'Running'
|
||||
cStdin, _ := container2.StdinPipe()
|
||||
cStdin.Close()
|
||||
if _, err := container2.State.WaitStop(2 * time.Second); err != nil {
|
||||
if _, err := container2.WaitStop(2 * time.Second); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container2.State.SetRunning(42)
|
||||
container2.SetRunning(42)
|
||||
container2.ToDisk()
|
||||
|
||||
if len(daemon1.List()) != 2 {
|
||||
|
@ -626,7 +626,7 @@ func TestRestore(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container2.State.IsRunning() {
|
||||
if !container2.IsRunning() {
|
||||
t.Fatalf("Container %v should appear as running but isn't", container2.ID)
|
||||
}
|
||||
|
||||
|
@ -639,7 +639,7 @@ func TestRestore(t *testing.T) {
|
|||
}
|
||||
runningCount := 0
|
||||
for _, c := range daemon2.List() {
|
||||
if c.State.IsRunning() {
|
||||
if c.IsRunning() {
|
||||
t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
|
||||
runningCount++
|
||||
}
|
||||
|
@ -654,7 +654,7 @@ func TestRestore(t *testing.T) {
|
|||
if err := container3.Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container2.State.SetStopped(0)
|
||||
container2.SetStopped(0)
|
||||
}
|
||||
|
||||
func TestDefaultContainerName(t *testing.T) {
|
||||
|
|
|
@ -96,12 +96,12 @@ func containerAttach(eng *engine.Engine, id string, t log.Fataler) (io.WriteClos
|
|||
}
|
||||
|
||||
func containerWait(eng *engine.Engine, id string, t log.Fataler) int {
|
||||
ex, _ := getContainer(eng, id, t).State.WaitStop(-1 * time.Second)
|
||||
ex, _ := getContainer(eng, id, t).WaitStop(-1 * time.Second)
|
||||
return ex
|
||||
}
|
||||
|
||||
func containerWaitTimeout(eng *engine.Engine, id string, t log.Fataler) error {
|
||||
_, err := getContainer(eng, id, t).State.WaitStop(500 * time.Millisecond)
|
||||
_, err := getContainer(eng, id, t).WaitStop(500 * time.Millisecond)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,7 @@ func containerKill(eng *engine.Engine, id string, t log.Fataler) {
|
|||
}
|
||||
|
||||
func containerRunning(eng *engine.Engine, id string, t log.Fataler) bool {
|
||||
return getContainer(eng, id, t).State.IsRunning()
|
||||
return getContainer(eng, id, t).IsRunning()
|
||||
}
|
||||
|
||||
func containerAssertExists(eng *engine.Engine, id string, t log.Fataler) {
|
||||
|
@ -303,7 +303,7 @@ func runContainer(eng *engine.Engine, r *daemon.Daemon, args []string, t *testin
|
|||
return "", err
|
||||
}
|
||||
|
||||
container.State.WaitStop(-1 * time.Second)
|
||||
container.WaitStop(-1 * time.Second)
|
||||
data, err := ioutil.ReadAll(stdout)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
|
Loading…
Reference in a new issue