|
@@ -58,8 +58,8 @@ func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *
|
|
|
container: container,
|
|
|
restartPolicy: policy,
|
|
|
timeIncrement: defaultTimeIncrement,
|
|
|
- stopChan: make(chan struct{}, 1),
|
|
|
- startSignal: make(chan struct{}, 1),
|
|
|
+ stopChan: make(chan struct{}),
|
|
|
+ startSignal: make(chan struct{}),
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -103,8 +103,17 @@ func (m *containerMonitor) Start() error {
|
|
|
exitStatus int
|
|
|
)
|
|
|
|
|
|
+ // this variable indicates that we under container.Lock
|
|
|
+ underLock := true
|
|
|
+
|
|
|
// ensure that when the monitor finally exits we release the networking and unmount the rootfs
|
|
|
- defer m.Close()
|
|
|
+ defer func() {
|
|
|
+ if !underLock {
|
|
|
+ m.container.Lock()
|
|
|
+ defer m.container.Unlock()
|
|
|
+ }
|
|
|
+ m.Close()
|
|
|
+ }()
|
|
|
|
|
|
// reset the restart count
|
|
|
m.container.RestartCount = -1
|
|
@@ -136,6 +145,9 @@ func (m *containerMonitor) Start() error {
|
|
|
log.Errorf("Error running container: %s", err)
|
|
|
}
|
|
|
|
|
|
+ // here container.Lock is already lost
|
|
|
+ underLock = false
|
|
|
+
|
|
|
m.resetMonitor(err == nil && exitStatus == 0)
|
|
|
|
|
|
if m.shouldRestart(exitStatus) {
|
|
@@ -244,10 +256,12 @@ func (m *containerMonitor) callback(command *execdriver.Command) {
|
|
|
|
|
|
m.container.State.SetRunning(command.Pid())
|
|
|
|
|
|
- if m.startSignal != nil {
|
|
|
- // signal that the process has started
|
|
|
+ // signal that the process has started
|
|
|
+ // close channel only if not closed
|
|
|
+ select {
|
|
|
+ case <-m.startSignal:
|
|
|
+ default:
|
|
|
close(m.startSignal)
|
|
|
- m.startSignal = nil
|
|
|
}
|
|
|
|
|
|
if err := m.container.ToDisk(); err != nil {
|