remote_daemon.go 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. package supervisor // import "github.com/docker/docker/libcontainerd/supervisor"
  2. import (
  3. "context"
  4. "os"
  5. "os/exec"
  6. "path/filepath"
  7. "runtime"
  8. "strings"
  9. "time"
  10. "github.com/containerd/containerd"
  11. "github.com/containerd/containerd/defaults"
  12. "github.com/containerd/containerd/log"
  13. "github.com/containerd/containerd/services/server/config"
  14. "github.com/containerd/containerd/sys"
  15. "github.com/docker/docker/pkg/pidfile"
  16. "github.com/docker/docker/pkg/process"
  17. "github.com/docker/docker/pkg/system"
  18. "github.com/pelletier/go-toml"
  19. "github.com/pkg/errors"
  20. "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
  21. "google.golang.org/grpc"
  22. "google.golang.org/grpc/credentials/insecure"
  23. )
  24. const (
  25. maxConnectionRetryCount = 3
  26. healthCheckTimeout = 3 * time.Second
  27. shutdownTimeout = 15 * time.Second
  28. startupTimeout = 15 * time.Second
  29. configFile = "containerd.toml"
  30. binaryName = "containerd"
  31. pidFile = "containerd.pid"
  32. )
  33. type remote struct {
  34. config.Config
  35. // configFile is the location where the generated containerd configuration
  36. // file is saved.
  37. configFile string
  38. daemonPid int
  39. pidFile string
  40. logger *log.Entry
  41. daemonWaitCh chan struct{}
  42. daemonStartCh chan error
  43. daemonStopCh chan struct{}
  44. stateDir string
  45. // oomScore adjusts the OOM score for the containerd process.
  46. oomScore int
  47. // logLevel overrides the containerd logging-level through the --log-level
  48. // command-line option.
  49. logLevel string
  50. }
  51. // Daemon represents a running containerd daemon
  52. type Daemon interface {
  53. WaitTimeout(time.Duration) error
  54. Address() string
  55. }
  56. // DaemonOpt allows to configure parameters of container daemons
  57. type DaemonOpt func(c *remote) error
  58. // Start starts a containerd daemon and monitors it
  59. func Start(ctx context.Context, rootDir, stateDir string, opts ...DaemonOpt) (Daemon, error) {
  60. r := &remote{
  61. stateDir: stateDir,
  62. Config: config.Config{
  63. Version: 2,
  64. Root: filepath.Join(rootDir, "daemon"),
  65. State: filepath.Join(stateDir, "daemon"),
  66. },
  67. configFile: filepath.Join(stateDir, configFile),
  68. daemonPid: -1,
  69. pidFile: filepath.Join(stateDir, pidFile),
  70. logger: log.G(ctx).WithField("module", "libcontainerd"),
  71. daemonStartCh: make(chan error, 1),
  72. daemonStopCh: make(chan struct{}),
  73. }
  74. for _, opt := range opts {
  75. if err := opt(r); err != nil {
  76. return nil, err
  77. }
  78. }
  79. r.setDefaults()
  80. if err := system.MkdirAll(stateDir, 0o700); err != nil {
  81. return nil, err
  82. }
  83. go r.monitorDaemon(ctx)
  84. timeout := time.NewTimer(startupTimeout)
  85. defer timeout.Stop()
  86. select {
  87. case <-timeout.C:
  88. return nil, errors.New("timeout waiting for containerd to start")
  89. case err := <-r.daemonStartCh:
  90. if err != nil {
  91. return nil, err
  92. }
  93. }
  94. return r, nil
  95. }
  96. func (r *remote) WaitTimeout(d time.Duration) error {
  97. timeout := time.NewTimer(d)
  98. defer timeout.Stop()
  99. select {
  100. case <-timeout.C:
  101. return errors.New("timeout waiting for containerd to stop")
  102. case <-r.daemonStopCh:
  103. }
  104. return nil
  105. }
  106. func (r *remote) Address() string {
  107. return r.GRPC.Address
  108. }
  109. func (r *remote) getContainerdConfig() (string, error) {
  110. f, err := os.OpenFile(r.configFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o600)
  111. if err != nil {
  112. return "", errors.Wrapf(err, "failed to open containerd config file (%s)", r.configFile)
  113. }
  114. defer f.Close()
  115. if err := toml.NewEncoder(f).Encode(r); err != nil {
  116. return "", errors.Wrapf(err, "failed to write containerd config file (%s)", r.configFile)
  117. }
  118. return r.configFile, nil
  119. }
  120. func (r *remote) startContainerd() error {
  121. pid, err := pidfile.Read(r.pidFile)
  122. if err != nil && !errors.Is(err, os.ErrNotExist) {
  123. return err
  124. }
  125. if pid > 0 {
  126. r.daemonPid = pid
  127. r.logger.WithField("pid", pid).Infof("%s is still running", binaryName)
  128. return nil
  129. }
  130. cfgFile, err := r.getContainerdConfig()
  131. if err != nil {
  132. return err
  133. }
  134. args := []string{"--config", cfgFile}
  135. if r.logLevel != "" {
  136. args = append(args, "--log-level", r.logLevel)
  137. }
  138. cmd := exec.Command(binaryName, args...)
  139. // redirect containerd logs to docker logs
  140. cmd.Stdout = os.Stdout
  141. cmd.Stderr = os.Stderr
  142. cmd.SysProcAttr = containerdSysProcAttr()
  143. // clear the NOTIFY_SOCKET from the env when starting containerd
  144. cmd.Env = nil
  145. for _, e := range os.Environ() {
  146. if !strings.HasPrefix(e, "NOTIFY_SOCKET") {
  147. cmd.Env = append(cmd.Env, e)
  148. }
  149. }
  150. startedCh := make(chan error)
  151. go func() {
  152. // On Linux, when cmd.SysProcAttr.Pdeathsig is set,
  153. // the signal is sent to the subprocess when the creating thread
  154. // terminates. The runtime terminates a thread if a goroutine
  155. // exits while locked to it. Prevent the containerd process
  156. // from getting killed prematurely by ensuring that the thread
  157. // used to start it remains alive until it or the daemon process
  158. // exits. See https://go.dev/issue/27505 for more details.
  159. runtime.LockOSThread()
  160. defer runtime.UnlockOSThread()
  161. err := cmd.Start()
  162. startedCh <- err
  163. if err != nil {
  164. return
  165. }
  166. r.daemonWaitCh = make(chan struct{})
  167. // Reap our child when needed
  168. if err := cmd.Wait(); err != nil {
  169. r.logger.WithError(err).Errorf("containerd did not exit successfully")
  170. }
  171. close(r.daemonWaitCh)
  172. }()
  173. if err := <-startedCh; err != nil {
  174. return err
  175. }
  176. r.daemonPid = cmd.Process.Pid
  177. if err := r.adjustOOMScore(); err != nil {
  178. r.logger.WithError(err).Warn("failed to adjust OOM score")
  179. }
  180. err = pidfile.Write(r.pidFile, r.daemonPid)
  181. if err != nil {
  182. process.Kill(r.daemonPid)
  183. return errors.Wrap(err, "libcontainerd: failed to save daemon pid to disk")
  184. }
  185. r.logger.WithField("pid", r.daemonPid).WithField("address", r.Address()).Infof("started new %s process", binaryName)
  186. return nil
  187. }
  188. func (r *remote) adjustOOMScore() error {
  189. if r.oomScore == 0 || r.daemonPid <= 1 {
  190. // no score configured, or daemonPid contains an invalid PID (we don't
  191. // expect containerd to be running as PID 1 :)).
  192. return nil
  193. }
  194. if err := sys.SetOOMScore(r.daemonPid, r.oomScore); err != nil {
  195. return errors.Wrap(err, "failed to adjust OOM score for containerd process")
  196. }
  197. return nil
  198. }
  199. func (r *remote) monitorDaemon(ctx context.Context) {
  200. var (
  201. transientFailureCount = 0
  202. client *containerd.Client
  203. err error
  204. delay time.Duration
  205. timer = time.NewTimer(0)
  206. started bool
  207. )
  208. defer func() {
  209. if r.daemonPid != -1 {
  210. r.stopDaemon()
  211. }
  212. // cleanup some files
  213. _ = os.Remove(r.pidFile)
  214. r.platformCleanup()
  215. close(r.daemonStopCh)
  216. timer.Stop()
  217. }()
  218. // ensure no races on sending to timer.C even though there is a 0 duration.
  219. if !timer.Stop() {
  220. <-timer.C
  221. }
  222. for {
  223. timer.Reset(delay)
  224. select {
  225. case <-ctx.Done():
  226. r.logger.Info("stopping healthcheck following graceful shutdown")
  227. if client != nil {
  228. client.Close()
  229. }
  230. return
  231. case <-timer.C:
  232. }
  233. if r.daemonPid == -1 {
  234. if r.daemonWaitCh != nil {
  235. select {
  236. case <-ctx.Done():
  237. r.logger.Info("stopping containerd startup following graceful shutdown")
  238. return
  239. case <-r.daemonWaitCh:
  240. }
  241. }
  242. os.RemoveAll(r.GRPC.Address)
  243. if err := r.startContainerd(); err != nil {
  244. if !started {
  245. r.daemonStartCh <- err
  246. return
  247. }
  248. r.logger.WithError(err).Error("failed restarting containerd")
  249. delay = 50 * time.Millisecond
  250. continue
  251. }
  252. client, err = containerd.New(
  253. r.GRPC.Address,
  254. containerd.WithTimeout(60*time.Second),
  255. containerd.WithDialOpts([]grpc.DialOption{
  256. grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),
  257. grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),
  258. grpc.WithTransportCredentials(insecure.NewCredentials()),
  259. grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
  260. grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
  261. }),
  262. )
  263. if err != nil {
  264. r.logger.WithError(err).Error("failed connecting to containerd")
  265. delay = 100 * time.Millisecond
  266. continue
  267. }
  268. r.logger.WithField("address", r.GRPC.Address).Debug("created containerd monitoring client")
  269. }
  270. if client != nil {
  271. tctx, cancel := context.WithTimeout(ctx, healthCheckTimeout)
  272. _, err := client.IsServing(tctx)
  273. cancel()
  274. if err == nil {
  275. if !started {
  276. close(r.daemonStartCh)
  277. started = true
  278. }
  279. transientFailureCount = 0
  280. select {
  281. case <-r.daemonWaitCh:
  282. case <-ctx.Done():
  283. }
  284. // Set a small delay in case there is a recurring failure (or bug in this code)
  285. // to ensure we don't end up in a super tight loop.
  286. delay = 500 * time.Millisecond
  287. continue
  288. }
  289. r.logger.WithError(err).WithField("binary", binaryName).Debug("daemon is not responding")
  290. transientFailureCount++
  291. if transientFailureCount < maxConnectionRetryCount || process.Alive(r.daemonPid) {
  292. delay = time.Duration(transientFailureCount) * 200 * time.Millisecond
  293. continue
  294. }
  295. client.Close()
  296. client = nil
  297. }
  298. if process.Alive(r.daemonPid) {
  299. r.logger.WithField("pid", r.daemonPid).Info("killing and restarting containerd")
  300. r.killDaemon()
  301. }
  302. r.daemonPid = -1
  303. delay = 0
  304. transientFailureCount = 0
  305. }
  306. }