exec.go 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. package daemon
  2. import (
  3. "fmt"
  4. "io"
  5. "strings"
  6. "time"
  7. "golang.org/x/net/context"
  8. "github.com/docker/docker/api/types"
  9. "github.com/docker/docker/api/types/strslice"
  10. "github.com/docker/docker/container"
  11. "github.com/docker/docker/container/stream"
  12. "github.com/docker/docker/daemon/exec"
  13. "github.com/docker/docker/pkg/pools"
  14. "github.com/docker/docker/pkg/signal"
  15. "github.com/docker/docker/pkg/term"
  16. specs "github.com/opencontainers/runtime-spec/specs-go"
  17. "github.com/pkg/errors"
  18. "github.com/sirupsen/logrus"
  19. )
  20. // Seconds to wait after sending TERM before trying KILL
  21. const termProcessTimeout = 10
  22. func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) {
  23. // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
  24. container.ExecCommands.Add(config.ID, config)
  25. // Storing execs in daemon for easy access via Engine API.
  26. d.execCommands.Add(config.ID, config)
  27. }
  28. // ExecExists looks up the exec instance and returns a bool if it exists or not.
  29. // It will also return the error produced by `getConfig`
  30. func (d *Daemon) ExecExists(name string) (bool, error) {
  31. if _, err := d.getExecConfig(name); err != nil {
  32. return false, err
  33. }
  34. return true, nil
  35. }
  36. // getExecConfig looks up the exec instance by name. If the container associated
  37. // with the exec instance is stopped or paused, it will return an error.
  38. func (d *Daemon) getExecConfig(name string) (*exec.Config, error) {
  39. ec := d.execCommands.Get(name)
  40. // If the exec is found but its container is not in the daemon's list of
  41. // containers then it must have been deleted, in which case instead of
  42. // saying the container isn't running, we should return a 404 so that
  43. // the user sees the same error now that they will after the
  44. // 5 minute clean-up loop is run which erases old/dead execs.
  45. if ec != nil {
  46. if container := d.containers.Get(ec.ContainerID); container != nil {
  47. if !container.IsRunning() {
  48. return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String())
  49. }
  50. if container.IsPaused() {
  51. return nil, errExecPaused(container.ID)
  52. }
  53. if container.IsRestarting() {
  54. return nil, errContainerIsRestarting(container.ID)
  55. }
  56. return ec, nil
  57. }
  58. }
  59. return nil, errExecNotFound(name)
  60. }
  61. func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) {
  62. container.ExecCommands.Delete(execConfig.ID, execConfig.Pid)
  63. d.execCommands.Delete(execConfig.ID, execConfig.Pid)
  64. }
  65. func (d *Daemon) getActiveContainer(name string) (*container.Container, error) {
  66. container, err := d.GetContainer(name)
  67. if err != nil {
  68. return nil, err
  69. }
  70. if !container.IsRunning() {
  71. return nil, errNotRunning(container.ID)
  72. }
  73. if container.IsPaused() {
  74. return nil, errExecPaused(name)
  75. }
  76. if container.IsRestarting() {
  77. return nil, errContainerIsRestarting(container.ID)
  78. }
  79. return container, nil
  80. }
  81. // ContainerExecCreate sets up an exec in a running container.
  82. func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) {
  83. cntr, err := d.getActiveContainer(name)
  84. if err != nil {
  85. return "", err
  86. }
  87. cmd := strslice.StrSlice(config.Cmd)
  88. entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd)
  89. keys := []byte{}
  90. if config.DetachKeys != "" {
  91. keys, err = term.ToBytes(config.DetachKeys)
  92. if err != nil {
  93. err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys)
  94. return "", err
  95. }
  96. }
  97. execConfig := exec.NewConfig()
  98. execConfig.OpenStdin = config.AttachStdin
  99. execConfig.OpenStdout = config.AttachStdout
  100. execConfig.OpenStderr = config.AttachStderr
  101. execConfig.ContainerID = cntr.ID
  102. execConfig.DetachKeys = keys
  103. execConfig.Entrypoint = entrypoint
  104. execConfig.Args = args
  105. execConfig.Tty = config.Tty
  106. execConfig.Privileged = config.Privileged
  107. execConfig.User = config.User
  108. execConfig.WorkingDir = config.WorkingDir
  109. linkedEnv, err := d.setupLinkedContainers(cntr)
  110. if err != nil {
  111. return "", err
  112. }
  113. execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env)
  114. if len(execConfig.User) == 0 {
  115. execConfig.User = cntr.Config.User
  116. }
  117. if len(execConfig.WorkingDir) == 0 {
  118. execConfig.WorkingDir = cntr.Config.WorkingDir
  119. }
  120. d.registerExecCommand(cntr, execConfig)
  121. attributes := map[string]string{
  122. "execID": execConfig.ID,
  123. }
  124. d.LogContainerEventWithAttributes(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " "), attributes)
  125. return execConfig.ID, nil
  126. }
  127. // ContainerExecStart starts a previously set up exec instance. The
  128. // std streams are set up.
  129. // If ctx is cancelled, the process is terminated.
  130. func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.Reader, stdout io.Writer, stderr io.Writer) (err error) {
  131. var (
  132. cStdin io.ReadCloser
  133. cStdout, cStderr io.Writer
  134. )
  135. ec, err := d.getExecConfig(name)
  136. if err != nil {
  137. return errExecNotFound(name)
  138. }
  139. ec.Lock()
  140. if ec.ExitCode != nil {
  141. ec.Unlock()
  142. err := fmt.Errorf("Error: Exec command %s has already run", ec.ID)
  143. return stateConflictError{err}
  144. }
  145. if ec.Running {
  146. ec.Unlock()
  147. return stateConflictError{fmt.Errorf("Error: Exec command %s is already running", ec.ID)}
  148. }
  149. ec.Running = true
  150. ec.Unlock()
  151. c := d.containers.Get(ec.ContainerID)
  152. logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID)
  153. attributes := map[string]string{
  154. "execID": ec.ID,
  155. }
  156. d.LogContainerEventWithAttributes(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " "), attributes)
  157. defer func() {
  158. if err != nil {
  159. ec.Lock()
  160. ec.Running = false
  161. exitCode := 126
  162. ec.ExitCode = &exitCode
  163. if err := ec.CloseStreams(); err != nil {
  164. logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err)
  165. }
  166. ec.Unlock()
  167. c.ExecCommands.Delete(ec.ID, ec.Pid)
  168. }
  169. }()
  170. if ec.OpenStdin && stdin != nil {
  171. r, w := io.Pipe()
  172. go func() {
  173. defer w.Close()
  174. defer logrus.Debug("Closing buffered stdin pipe")
  175. pools.Copy(w, stdin)
  176. }()
  177. cStdin = r
  178. }
  179. if ec.OpenStdout {
  180. cStdout = stdout
  181. }
  182. if ec.OpenStderr {
  183. cStderr = stderr
  184. }
  185. if ec.OpenStdin {
  186. ec.StreamConfig.NewInputPipes()
  187. } else {
  188. ec.StreamConfig.NewNopInputPipe()
  189. }
  190. p := &specs.Process{
  191. Args: append([]string{ec.Entrypoint}, ec.Args...),
  192. Env: ec.Env,
  193. Terminal: ec.Tty,
  194. Cwd: ec.WorkingDir,
  195. }
  196. if p.Cwd == "" {
  197. p.Cwd = "/"
  198. }
  199. if err := d.execSetPlatformOpt(c, ec, p); err != nil {
  200. return err
  201. }
  202. attachConfig := stream.AttachConfig{
  203. TTY: ec.Tty,
  204. UseStdin: cStdin != nil,
  205. UseStdout: cStdout != nil,
  206. UseStderr: cStderr != nil,
  207. Stdin: cStdin,
  208. Stdout: cStdout,
  209. Stderr: cStderr,
  210. DetachKeys: ec.DetachKeys,
  211. CloseStdin: true,
  212. }
  213. ec.StreamConfig.AttachStreams(&attachConfig)
  214. attachErr := ec.StreamConfig.CopyStreams(ctx, &attachConfig)
  215. // Synchronize with libcontainerd event loop
  216. ec.Lock()
  217. c.ExecCommands.Lock()
  218. systemPid, err := d.containerd.Exec(ctx, c.ID, ec.ID, p, cStdin != nil, ec.InitializeStdio)
  219. if err != nil {
  220. c.ExecCommands.Unlock()
  221. ec.Unlock()
  222. return translateContainerdStartErr(ec.Entrypoint, ec.SetExitCode, err)
  223. }
  224. ec.Pid = systemPid
  225. c.ExecCommands.Unlock()
  226. ec.Unlock()
  227. select {
  228. case <-ctx.Done():
  229. logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID)
  230. d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["TERM"]))
  231. select {
  232. case <-time.After(termProcessTimeout * time.Second):
  233. logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout)
  234. d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["KILL"]))
  235. case <-attachErr:
  236. // TERM signal worked
  237. }
  238. return fmt.Errorf("context cancelled")
  239. case err := <-attachErr:
  240. if err != nil {
  241. if _, ok := err.(term.EscapeError); !ok {
  242. return errors.Wrap(systemError{err}, "exec attach failed")
  243. }
  244. attributes := map[string]string{
  245. "execID": ec.ID,
  246. }
  247. d.LogContainerEventWithAttributes(c, "exec_detach", attributes)
  248. }
  249. }
  250. return nil
  251. }
  252. // execCommandGC runs a ticker to clean up the daemon references
  253. // of exec configs that are no longer part of the container.
  254. func (d *Daemon) execCommandGC() {
  255. for range time.Tick(5 * time.Minute) {
  256. var (
  257. cleaned int
  258. liveExecCommands = d.containerExecIds()
  259. )
  260. for id, config := range d.execCommands.Commands() {
  261. if config.CanRemove {
  262. cleaned++
  263. d.execCommands.Delete(id, config.Pid)
  264. } else {
  265. if _, exists := liveExecCommands[id]; !exists {
  266. config.CanRemove = true
  267. }
  268. }
  269. }
  270. if cleaned > 0 {
  271. logrus.Debugf("clean %d unused exec commands", cleaned)
  272. }
  273. }
  274. }
  275. // containerExecIds returns a list of all the current exec ids that are in use
  276. // and running inside a container.
  277. func (d *Daemon) containerExecIds() map[string]struct{} {
  278. ids := map[string]struct{}{}
  279. for _, c := range d.containers.List() {
  280. for _, id := range c.ExecCommands.List() {
  281. ids[id] = struct{}{}
  282. }
  283. }
  284. return ids
  285. }