2018-02-05 21:05:59 +00:00
|
|
|
package daemon // import "github.com/docker/docker/daemon"
|
2016-03-18 18:50:19 +00:00
|
|
|
|
|
|
|
import (
|
2017-09-22 13:52:41 +00:00
|
|
|
"context"
|
2016-03-18 18:50:19 +00:00
|
|
|
"strconv"
|
2016-10-05 20:29:56 +00:00
|
|
|
"time"
|
2016-03-18 18:50:19 +00:00
|
|
|
|
2023-06-23 00:33:17 +00:00
|
|
|
"github.com/containerd/containerd/log"
|
2016-09-06 18:18:12 +00:00
|
|
|
"github.com/docker/docker/api/types"
|
2017-02-10 02:57:35 +00:00
|
|
|
"github.com/docker/docker/container"
|
2022-08-17 21:13:49 +00:00
|
|
|
"github.com/docker/docker/daemon/config"
|
2022-05-10 19:59:00 +00:00
|
|
|
"github.com/docker/docker/errdefs"
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 22:30:52 +00:00
|
|
|
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
|
2016-10-05 20:29:56 +00:00
|
|
|
"github.com/docker/docker/restartmanager"
|
2019-07-11 20:58:15 +00:00
|
|
|
"github.com/pkg/errors"
|
2016-03-18 18:50:19 +00:00
|
|
|
)
|
|
|
|
|
2017-02-10 02:57:35 +00:00
|
|
|
func (daemon *Daemon) setStateCounter(c *container.Container) {
|
|
|
|
switch c.StateString() {
|
|
|
|
case "paused":
|
|
|
|
stateCtr.set(c.ID, "paused")
|
|
|
|
case "running":
|
|
|
|
stateCtr.set(c.ID, "running")
|
|
|
|
default:
|
|
|
|
stateCtr.set(c.ID, "stopped")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontainerdtypes.EventInfo) error {
|
2022-05-10 19:59:00 +00:00
|
|
|
var exitStatus container.ExitStatus
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
c.Lock()
|
2022-08-25 20:34:13 +00:00
|
|
|
|
2022-08-17 21:13:49 +00:00
|
|
|
cfg := daemon.config()
|
|
|
|
|
2022-08-25 20:34:13 +00:00
|
|
|
// Health checks will be automatically restarted if/when the
|
|
|
|
// container is started again.
|
|
|
|
daemon.stopHealthchecks(c)
|
|
|
|
|
2022-05-10 19:59:00 +00:00
|
|
|
tsk, ok := c.Task()
|
|
|
|
if ok {
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
|
|
es, err := tsk.Delete(ctx)
|
|
|
|
cancel()
|
|
|
|
if err != nil {
|
2023-07-30 15:18:56 +00:00
|
|
|
log.G(ctx).WithFields(log.Fields{
|
|
|
|
"error": err,
|
|
|
|
"container": c.ID,
|
2023-05-05 15:49:28 +00:00
|
|
|
}).Warn("failed to delete container from containerd")
|
2022-05-10 19:59:00 +00:00
|
|
|
} else {
|
|
|
|
exitStatus = container.ExitStatus{
|
|
|
|
ExitCode: int(es.ExitCode()),
|
|
|
|
ExitedAt: es.ExitTime(),
|
|
|
|
}
|
|
|
|
}
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
}
|
|
|
|
|
2022-05-10 19:59:00 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
c.StreamConfig.Wait(ctx)
|
|
|
|
cancel()
|
2020-11-03 03:56:58 +00:00
|
|
|
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
c.Reset(false)
|
|
|
|
|
|
|
|
if e != nil {
|
|
|
|
exitStatus.ExitCode = int(e.ExitCode)
|
|
|
|
exitStatus.ExitedAt = e.ExitedAt
|
|
|
|
if e.Error != nil {
|
|
|
|
c.SetError(e.Error)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-19 10:17:08 +00:00
|
|
|
daemonShutdown := daemon.IsShuttingDown()
|
|
|
|
execDuration := time.Since(c.StartedAt)
|
2022-05-10 19:59:00 +00:00
|
|
|
restart, wait, err := c.RestartManager().ShouldRestart(uint32(exitStatus.ExitCode), daemonShutdown || c.HasBeenManuallyStopped, execDuration)
|
2022-07-19 10:17:08 +00:00
|
|
|
if err != nil {
|
2023-07-30 15:18:56 +00:00
|
|
|
log.G(ctx).WithFields(log.Fields{
|
|
|
|
"error": err,
|
2023-05-05 15:49:28 +00:00
|
|
|
"container": c.ID,
|
|
|
|
"restartCount": c.RestartCount,
|
|
|
|
"exitStatus": exitStatus,
|
|
|
|
"daemonShuttingDown": daemonShutdown,
|
|
|
|
"hasBeenManuallyStopped": c.HasBeenManuallyStopped,
|
|
|
|
"execDuration": execDuration,
|
|
|
|
}).Warn("ShouldRestart failed, container will not be restarted")
|
2022-07-19 10:17:08 +00:00
|
|
|
restart = false
|
|
|
|
}
|
2020-11-19 19:48:04 +00:00
|
|
|
|
|
|
|
attributes := map[string]string{
|
2023-05-08 07:22:58 +00:00
|
|
|
"exitCode": strconv.Itoa(exitStatus.ExitCode),
|
|
|
|
"execDuration": strconv.Itoa(int(execDuration.Seconds())),
|
2020-11-19 19:48:04 +00:00
|
|
|
}
|
|
|
|
daemon.Cleanup(c)
|
|
|
|
|
2022-07-19 10:17:08 +00:00
|
|
|
if restart {
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
c.RestartCount++
|
2023-07-30 15:18:56 +00:00
|
|
|
log.G(ctx).WithFields(log.Fields{
|
2023-05-05 15:49:28 +00:00
|
|
|
"container": c.ID,
|
|
|
|
"restartCount": c.RestartCount,
|
|
|
|
"exitStatus": exitStatus,
|
|
|
|
"manualRestart": c.HasBeenManuallyRestarted,
|
|
|
|
}).Debug("Restarting container")
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
c.SetRestarting(&exitStatus)
|
|
|
|
} else {
|
|
|
|
c.SetStopped(&exitStatus)
|
2022-07-19 10:17:08 +00:00
|
|
|
if !c.HasBeenManuallyRestarted {
|
2022-08-31 20:12:30 +00:00
|
|
|
defer daemon.autoRemove(&cfg.Config, c)
|
2022-07-19 10:17:08 +00:00
|
|
|
}
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
}
|
|
|
|
defer c.Unlock() // needs to be called before autoRemove
|
|
|
|
|
|
|
|
daemon.setStateCounter(c)
|
|
|
|
cpErr := c.CheckpointTo(daemon.containersReplica)
|
|
|
|
|
2020-11-19 19:48:04 +00:00
|
|
|
daemon.LogContainerEventWithAttributes(c, "die", attributes)
|
|
|
|
|
2022-07-19 10:17:08 +00:00
|
|
|
if restart {
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
go func() {
|
|
|
|
err := <-wait
|
|
|
|
if err == nil {
|
|
|
|
// daemon.netController is initialized when daemon is restoring containers.
|
|
|
|
// But containerStart will use daemon.netController segment.
|
|
|
|
// So to avoid panic at startup process, here must wait util daemon restore done.
|
|
|
|
daemon.waitForStartupDone()
|
2022-08-17 21:13:49 +00:00
|
|
|
cfg := daemon.config() // Apply the most up-to-date daemon config to the restarted container.
|
|
|
|
if err = daemon.containerStart(context.Background(), cfg, c, "", "", false); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(ctx).Debugf("failed to restart container: %+v", err)
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
c.Lock()
|
|
|
|
c.SetStopped(&exitStatus)
|
|
|
|
daemon.setStateCounter(c)
|
|
|
|
c.CheckpointTo(daemon.containersReplica)
|
|
|
|
c.Unlock()
|
2022-08-31 20:12:30 +00:00
|
|
|
defer daemon.autoRemove(&cfg.Config, c)
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
if err != restartmanager.ErrRestartCanceled {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(ctx).Errorf("restartmanger wait error: %+v", err)
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
return cpErr
|
|
|
|
}
|
|
|
|
|
2017-09-22 13:52:41 +00:00
|
|
|
// ProcessEvent is called by libcontainerd whenever an event occurs
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 22:30:52 +00:00
|
|
|
func (daemon *Daemon) ProcessEvent(id string, e libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) error {
|
2017-09-22 13:52:41 +00:00
|
|
|
c, err := daemon.GetContainer(id)
|
2019-07-11 20:58:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrapf(err, "could not find container %s", id)
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
|
|
|
|
2017-09-22 13:52:41 +00:00
|
|
|
switch e {
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 22:30:52 +00:00
|
|
|
case libcontainerdtypes.EventOOM:
|
2016-03-18 18:50:19 +00:00
|
|
|
// StateOOM is Linux specific and should never be hit on Windows
|
2019-10-13 00:29:21 +00:00
|
|
|
if isWindows {
|
2017-08-17 19:16:30 +00:00
|
|
|
return errors.New("received StateOOM from libcontainerd on Windows. This should never happen")
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
2017-11-15 01:59:40 +00:00
|
|
|
|
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
2022-04-26 18:30:52 +00:00
|
|
|
c.OOMKilled = true
|
2016-04-18 09:48:13 +00:00
|
|
|
daemon.updateHealthMonitor(c)
|
2017-04-06 21:42:10 +00:00
|
|
|
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-15 01:59:40 +00:00
|
|
|
|
2016-03-18 18:50:19 +00:00
|
|
|
daemon.LogContainerEvent(c, "oom")
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 22:30:52 +00:00
|
|
|
case libcontainerdtypes.EventExit:
|
2023-01-30 19:44:04 +00:00
|
|
|
if ei.ProcessID == ei.ContainerID {
|
Handle missing c8d task on stop
In this case, we are sending a signal to the container (typically this
would be SIGKILL or SIGTERM, but could be any signal), but container
reports that the process does not exist.
At the point this code is happening, dockerd thinks that the container
is running, but containerd reports that it is not.
Since containerd reports that it is not running, try to collect the exit
status of the container from containerd, and mark the container as
stopped in dockerd.
Repro this problem like so:
```
id=$(docker run -d busybox top)
pkill containerd && pkill top
docker stop $id
```
Without this change, `docker stop $id` will first try to send SIGTERM,
wait for exit, then try SIGKILL.
Because the process doesn't exist to begin with, no signal is sent, and
so nothing happens.
Since we won't receive any event here to process, the container can
never be marked as stopped until the daemon is restarted.
With the change `docker stop` succeeds immediately (since the process is
already stopped) and we mark the container as stopped. We handle the
case as if we missed a exit event.
There are definitely some other places in the stack that could use some
improvement here, but this helps people get out of a sticky situation.
With io.containerd.runc.v2, no event is ever recieved by docker because
the shim quits trying to send the event.
With io.containerd.runtime.v1.linux the TastExit event is sent before
dockerd can reconnect to the event stream and we miss the event.
No matter what, we shouldn't be reliant on the shim doing the right
thing here, nor can we rely on a steady event stream.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2020-07-17 18:47:40 +00:00
|
|
|
return daemon.handleContainerExit(c, &ei)
|
2016-04-02 00:02:38 +00:00
|
|
|
}
|
2017-09-22 13:52:41 +00:00
|
|
|
|
2019-06-28 17:42:30 +00:00
|
|
|
exitCode := 127
|
2017-11-27 15:53:16 +00:00
|
|
|
if execConfig := c.ExecCommands.Get(ei.ProcessID); execConfig != nil {
|
2017-09-22 13:52:41 +00:00
|
|
|
ec := int(ei.ExitCode)
|
2016-10-12 23:56:52 +00:00
|
|
|
execConfig.Lock()
|
|
|
|
defer execConfig.Unlock()
|
2022-08-24 23:35:07 +00:00
|
|
|
|
|
|
|
// Remove the exec command from the container's store only and not the
|
|
|
|
// daemon's store so that the exec command can be inspected. Remove it
|
|
|
|
// before mutating execConfig to maintain the invariant that
|
2023-06-22 20:45:32 +00:00
|
|
|
// c.ExecCommands only contains execs that have not exited.
|
2022-08-24 23:35:07 +00:00
|
|
|
c.ExecCommands.Delete(execConfig.ID)
|
|
|
|
|
2016-03-18 18:50:19 +00:00
|
|
|
execConfig.ExitCode = &ec
|
|
|
|
execConfig.Running = false
|
2019-06-20 20:21:42 +00:00
|
|
|
|
2019-08-06 02:18:54 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
2019-06-20 20:21:42 +00:00
|
|
|
execConfig.StreamConfig.Wait(ctx)
|
2019-08-06 02:18:54 +00:00
|
|
|
cancel()
|
2019-06-20 20:21:42 +00:00
|
|
|
|
2016-03-18 18:50:19 +00:00
|
|
|
if err := execConfig.CloseStreams(); err != nil {
|
2023-06-23 00:33:17 +00:00
|
|
|
log.G(ctx).Errorf("failed to cleanup exec %s streams: %s", c.ID, err)
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
|
|
|
|
2019-06-28 17:42:30 +00:00
|
|
|
exitCode = ec
|
2022-05-10 19:59:00 +00:00
|
|
|
|
2023-06-22 20:45:32 +00:00
|
|
|
// If the exec failed at start in such a way that containerd
|
|
|
|
// publishes an exit event for it, we will race processing the event
|
|
|
|
// with daemon.ContainerExecStart() removing the exec from
|
|
|
|
// c.ExecCommands. If we win the race, we will find that there is no
|
|
|
|
// process to clean up. (And ContainerExecStart will clobber the
|
|
|
|
// exit code we set.) Prevent a nil-dereferenc panic in that
|
|
|
|
// situation to restore the status quo where this is merely a
|
|
|
|
// logical race condition.
|
|
|
|
if execConfig.Process != nil {
|
|
|
|
go func() {
|
|
|
|
if _, err := execConfig.Process.Delete(context.Background()); err != nil {
|
2023-07-30 15:18:56 +00:00
|
|
|
log.G(ctx).WithFields(log.Fields{
|
|
|
|
"error": err,
|
|
|
|
"container": ei.ContainerID,
|
|
|
|
"process": ei.ProcessID,
|
2023-06-22 20:45:32 +00:00
|
|
|
}).Warn("failed to delete process")
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2019-06-28 17:42:30 +00:00
|
|
|
}
|
|
|
|
attributes := map[string]string{
|
|
|
|
"execID": ei.ProcessID,
|
|
|
|
"exitCode": strconv.Itoa(exitCode),
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
2019-06-28 17:42:30 +00:00
|
|
|
daemon.LogContainerEventWithAttributes(c, "exec_die", attributes)
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 22:30:52 +00:00
|
|
|
case libcontainerdtypes.EventStart:
|
2017-09-22 13:52:41 +00:00
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
|
|
|
|
|
|
|
// This is here to handle start not generated by docker
|
|
|
|
if !c.Running {
|
2022-05-10 19:59:00 +00:00
|
|
|
ctr, err := daemon.containerd.LoadContainer(context.Background(), c.ID)
|
|
|
|
if err != nil {
|
|
|
|
if errdefs.IsNotFound(err) {
|
|
|
|
// The container was started by not-docker and so could have been deleted by
|
|
|
|
// not-docker before we got around to loading it from containerd.
|
2023-07-30 15:18:56 +00:00
|
|
|
log.G(context.TODO()).WithFields(log.Fields{
|
|
|
|
"error": err,
|
|
|
|
"container": c.ID,
|
2023-05-05 15:49:28 +00:00
|
|
|
}).Debug("could not load containerd container for start event")
|
2022-05-10 19:59:00 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
tsk, err := ctr.Task(context.Background())
|
|
|
|
if err != nil {
|
|
|
|
if errdefs.IsNotFound(err) {
|
2023-07-30 15:18:56 +00:00
|
|
|
log.G(context.TODO()).WithFields(log.Fields{
|
|
|
|
"error": err,
|
|
|
|
"container": c.ID,
|
2023-05-05 15:49:28 +00:00
|
|
|
}).Debug("failed to load task for externally-started container")
|
2022-05-10 19:59:00 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.SetRunning(ctr, tsk, false)
|
2017-09-22 13:52:41 +00:00
|
|
|
c.HasBeenManuallyStopped = false
|
|
|
|
c.HasBeenStartedBefore = true
|
|
|
|
daemon.setStateCounter(c)
|
|
|
|
|
|
|
|
daemon.initHealthMonitor(c)
|
|
|
|
|
|
|
|
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
daemon.LogContainerEvent(c, "start")
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
2017-02-10 02:57:35 +00:00
|
|
|
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 22:30:52 +00:00
|
|
|
case libcontainerdtypes.EventPaused:
|
2017-09-22 13:52:41 +00:00
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
|
|
|
|
|
|
|
if !c.Paused {
|
|
|
|
c.Paused = true
|
|
|
|
daemon.setStateCounter(c)
|
|
|
|
daemon.updateHealthMonitor(c)
|
|
|
|
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
daemon.LogContainerEvent(c, "pause")
|
2016-08-19 09:12:01 +00:00
|
|
|
}
|
Windows: Experimental: Allow containerd for runtime
Signed-off-by: John Howard <jhoward@microsoft.com>
This is the first step in refactoring moby (dockerd) to use containerd on Windows.
Similar to the current model in Linux, this adds the option to enable it for runtime.
It does not switch the graphdriver to containerd snapshotters.
- Refactors libcontainerd to a series of subpackages so that either a
"local" containerd (1) or a "remote" (2) containerd can be loaded as opposed
to conditional compile as "local" for Windows and "remote" for Linux.
- Updates libcontainerd such that Windows has an option to allow the use of a
"remote" containerd. Here, it communicates over a named pipe using GRPC.
This is currently guarded behind the experimental flag, an environment variable,
and the providing of a pipename to connect to containerd.
- Infrastructure pieces such as under pkg/system to have helper functions for
determining whether containerd is being used.
(1) "local" containerd is what the daemon on Windows has used since inception.
It's not really containerd at all - it's simply local invocation of HCS APIs
directly in-process from the daemon through the Microsoft/hcsshim library.
(2) "remote" containerd is what docker on Linux uses for it's runtime. It means
that there is a separate containerd service running, and docker communicates over
GRPC to it.
To try this out, you will need to start with something like the following:
Window 1:
containerd --log-level debug
Window 2:
$env:DOCKER_WINDOWS_CONTAINERD=1
dockerd --experimental -D --containerd \\.\pipe\containerd-containerd
You will need the following binary from github.com/containerd/containerd in your path:
- containerd.exe
You will need the following binaries from github.com/Microsoft/hcsshim in your path:
- runhcs.exe
- containerd-shim-runhcs-v1.exe
For LCOW, it will require and initrd.img and kernel in `C:\Program Files\Linux Containers`.
This is no different to the current requirements. However, you may need updated binaries,
particularly initrd.img built from Microsoft/opengcs as (at the time of writing), Linuxkit
binaries are somewhat out of date.
Note that containerd and hcsshim for HCS v2 APIs do not yet support all the required
functionality needed for docker. This will come in time - this is a baby (although large)
step to migrating Docker on Windows to containerd.
Note that the HCS v2 APIs are only called on RS5+ builds. RS1..RS4 will still use
HCS v1 APIs as the v2 APIs were not fully developed enough on these builds to be usable.
This abstraction is done in HCSShim. (Referring specifically to runtime)
Note the LCOW graphdriver still uses HCS v1 APIs regardless.
Note also that this does not migrate docker to use containerd snapshotters
rather than graphdrivers. This needs to be done in conjunction with Linux also
doing the same switch.
2019-01-08 22:30:52 +00:00
|
|
|
case libcontainerdtypes.EventResumed:
|
2017-09-22 13:52:41 +00:00
|
|
|
c.Lock()
|
|
|
|
defer c.Unlock()
|
|
|
|
|
|
|
|
if c.Paused {
|
|
|
|
c.Paused = false
|
|
|
|
daemon.setStateCounter(c)
|
|
|
|
daemon.updateHealthMonitor(c)
|
|
|
|
|
|
|
|
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
daemon.LogContainerEvent(c, "unpause")
|
2016-08-19 09:12:01 +00:00
|
|
|
}
|
2016-03-18 18:50:19 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-02-14 18:35:20 +00:00
|
|
|
|
2022-08-17 21:13:49 +00:00
|
|
|
func (daemon *Daemon) autoRemove(cfg *config.Config, c *container.Container) {
|
2017-02-14 18:35:20 +00:00
|
|
|
c.Lock()
|
|
|
|
ar := c.HostConfig.AutoRemove
|
|
|
|
c.Unlock()
|
|
|
|
if !ar {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-17 21:13:49 +00:00
|
|
|
err := daemon.containerRm(cfg, c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true})
|
2019-08-05 23:56:38 +00:00
|
|
|
if err == nil {
|
2017-02-14 18:35:20 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if c := daemon.containers.Get(c.ID); c == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-07-30 15:18:56 +00:00
|
|
|
log.G(context.TODO()).WithFields(log.Fields{"error": err, "container": c.ID}).Error("error removing container")
|
2017-02-14 18:35:20 +00:00
|
|
|
}
|