container.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793
  1. // DEPRECATION NOTICE. PLEASE DO NOT ADD ANYTHING TO THIS FILE.
  2. //
  3. // For additional commments see server/server.go
  4. //
  5. package server
  6. import (
  7. "bytes"
  8. "encoding/json"
  9. "errors"
  10. "fmt"
  11. "io"
  12. "log"
  13. "os"
  14. "os/exec"
  15. "path"
  16. "path/filepath"
  17. "strconv"
  18. "strings"
  19. "syscall"
  20. "time"
  21. "github.com/docker/docker/daemon"
  22. "github.com/docker/docker/engine"
  23. "github.com/docker/docker/graph"
  24. "github.com/docker/docker/pkg/graphdb"
  25. "github.com/docker/docker/pkg/parsers"
  26. "github.com/docker/docker/pkg/signal"
  27. "github.com/docker/docker/pkg/tailfile"
  28. "github.com/docker/docker/runconfig"
  29. "github.com/docker/docker/utils"
  30. )
  31. // ContainerKill send signal to the container
  32. // If no signal is given (sig 0), then Kill with SIGKILL and wait
  33. // for the container to exit.
  34. // If a signal is given, then just send it to the container and return.
  35. func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
  36. if n := len(job.Args); n < 1 || n > 2 {
  37. return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
  38. }
  39. var (
  40. name = job.Args[0]
  41. sig uint64
  42. err error
  43. )
  44. // If we have a signal, look at it. Otherwise, do nothing
  45. if len(job.Args) == 2 && job.Args[1] != "" {
  46. // Check if we passed the signal as a number:
  47. // The largest legal signal is 31, so let's parse on 5 bits
  48. sig, err = strconv.ParseUint(job.Args[1], 10, 5)
  49. if err != nil {
  50. // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL")
  51. sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")])
  52. }
  53. if sig == 0 {
  54. return job.Errorf("Invalid signal: %s", job.Args[1])
  55. }
  56. }
  57. if container := srv.daemon.Get(name); container != nil {
  58. // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
  59. if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
  60. if err := container.Kill(); err != nil {
  61. return job.Errorf("Cannot kill container %s: %s", name, err)
  62. }
  63. srv.LogEvent("kill", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  64. } else {
  65. // Otherwise, just send the requested signal
  66. if err := container.KillSig(int(sig)); err != nil {
  67. return job.Errorf("Cannot kill container %s: %s", name, err)
  68. }
  69. // FIXME: Add event for signals
  70. }
  71. } else {
  72. return job.Errorf("No such container: %s", name)
  73. }
  74. return engine.StatusOK
  75. }
  76. func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
  77. if len(job.Args) != 1 {
  78. return job.Errorf("Usage: %s container_id", job.Name)
  79. }
  80. name := job.Args[0]
  81. if container := srv.daemon.Get(name); container != nil {
  82. data, err := container.Export()
  83. if err != nil {
  84. return job.Errorf("%s: %s", name, err)
  85. }
  86. defer data.Close()
  87. // Stream the entire contents of the container (basically a volatile snapshot)
  88. if _, err := io.Copy(job.Stdout, data); err != nil {
  89. return job.Errorf("%s: %s", name, err)
  90. }
  91. // FIXME: factor job-specific LogEvent to engine.Job.Run()
  92. srv.LogEvent("export", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  93. return engine.StatusOK
  94. }
  95. return job.Errorf("No such container: %s", name)
  96. }
  97. func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
  98. if len(job.Args) != 1 && len(job.Args) != 2 {
  99. return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
  100. }
  101. var (
  102. name = job.Args[0]
  103. psArgs = "-ef"
  104. )
  105. if len(job.Args) == 2 && job.Args[1] != "" {
  106. psArgs = job.Args[1]
  107. }
  108. if container := srv.daemon.Get(name); container != nil {
  109. if !container.State.IsRunning() {
  110. return job.Errorf("Container %s is not running", name)
  111. }
  112. pids, err := srv.daemon.ExecutionDriver().GetPidsForContainer(container.ID)
  113. if err != nil {
  114. return job.Error(err)
  115. }
  116. output, err := exec.Command("ps", psArgs).Output()
  117. if err != nil {
  118. return job.Errorf("Error running ps: %s", err)
  119. }
  120. lines := strings.Split(string(output), "\n")
  121. header := strings.Fields(lines[0])
  122. out := &engine.Env{}
  123. out.SetList("Titles", header)
  124. pidIndex := -1
  125. for i, name := range header {
  126. if name == "PID" {
  127. pidIndex = i
  128. }
  129. }
  130. if pidIndex == -1 {
  131. return job.Errorf("Couldn't find PID field in ps output")
  132. }
  133. processes := [][]string{}
  134. for _, line := range lines[1:] {
  135. if len(line) == 0 {
  136. continue
  137. }
  138. fields := strings.Fields(line)
  139. p, err := strconv.Atoi(fields[pidIndex])
  140. if err != nil {
  141. return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
  142. }
  143. for _, pid := range pids {
  144. if pid == p {
  145. // Make sure number of fields equals number of header titles
  146. // merging "overhanging" fields
  147. process := fields[:len(header)-1]
  148. process = append(process, strings.Join(fields[len(header)-1:], " "))
  149. processes = append(processes, process)
  150. }
  151. }
  152. }
  153. out.SetJson("Processes", processes)
  154. out.WriteTo(job.Stdout)
  155. return engine.StatusOK
  156. }
  157. return job.Errorf("No such container: %s", name)
  158. }
  159. func (srv *Server) ContainerChanges(job *engine.Job) engine.Status {
  160. if n := len(job.Args); n != 1 {
  161. return job.Errorf("Usage: %s CONTAINER", job.Name)
  162. }
  163. name := job.Args[0]
  164. if container := srv.daemon.Get(name); container != nil {
  165. outs := engine.NewTable("", 0)
  166. changes, err := container.Changes()
  167. if err != nil {
  168. return job.Error(err)
  169. }
  170. for _, change := range changes {
  171. out := &engine.Env{}
  172. if err := out.Import(change); err != nil {
  173. return job.Error(err)
  174. }
  175. outs.Add(out)
  176. }
  177. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  178. return job.Error(err)
  179. }
  180. } else {
  181. return job.Errorf("No such container: %s", name)
  182. }
  183. return engine.StatusOK
  184. }
  185. func (srv *Server) Containers(job *engine.Job) engine.Status {
  186. var (
  187. foundBefore bool
  188. displayed int
  189. all = job.GetenvBool("all")
  190. since = job.Getenv("since")
  191. before = job.Getenv("before")
  192. n = job.GetenvInt("limit")
  193. size = job.GetenvBool("size")
  194. )
  195. outs := engine.NewTable("Created", 0)
  196. names := map[string][]string{}
  197. srv.daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
  198. names[e.ID()] = append(names[e.ID()], p)
  199. return nil
  200. }, -1)
  201. var beforeCont, sinceCont *daemon.Container
  202. if before != "" {
  203. beforeCont = srv.daemon.Get(before)
  204. if beforeCont == nil {
  205. return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
  206. }
  207. }
  208. if since != "" {
  209. sinceCont = srv.daemon.Get(since)
  210. if sinceCont == nil {
  211. return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
  212. }
  213. }
  214. errLast := errors.New("last container")
  215. writeCont := func(container *daemon.Container) error {
  216. container.Lock()
  217. defer container.Unlock()
  218. if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
  219. return nil
  220. }
  221. if before != "" && !foundBefore {
  222. if container.ID == beforeCont.ID {
  223. foundBefore = true
  224. }
  225. return nil
  226. }
  227. if n > 0 && displayed == n {
  228. return errLast
  229. }
  230. if since != "" {
  231. if container.ID == sinceCont.ID {
  232. return errLast
  233. }
  234. }
  235. displayed++
  236. out := &engine.Env{}
  237. out.Set("Id", container.ID)
  238. out.SetList("Names", names[container.ID])
  239. out.Set("Image", srv.daemon.Repositories().ImageName(container.Image))
  240. if len(container.Args) > 0 {
  241. args := []string{}
  242. for _, arg := range container.Args {
  243. if strings.Contains(arg, " ") {
  244. args = append(args, fmt.Sprintf("'%s'", arg))
  245. } else {
  246. args = append(args, arg)
  247. }
  248. }
  249. argsAsString := strings.Join(args, " ")
  250. out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString))
  251. } else {
  252. out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
  253. }
  254. out.SetInt64("Created", container.Created.Unix())
  255. out.Set("Status", container.State.String())
  256. str, err := container.NetworkSettings.PortMappingAPI().ToListString()
  257. if err != nil {
  258. return err
  259. }
  260. out.Set("Ports", str)
  261. if size {
  262. sizeRw, sizeRootFs := container.GetSize()
  263. out.SetInt64("SizeRw", sizeRw)
  264. out.SetInt64("SizeRootFs", sizeRootFs)
  265. }
  266. outs.Add(out)
  267. return nil
  268. }
  269. for _, container := range srv.daemon.List() {
  270. if err := writeCont(container); err != nil {
  271. if err != errLast {
  272. return job.Error(err)
  273. }
  274. break
  275. }
  276. }
  277. outs.ReverseSort()
  278. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  279. return job.Error(err)
  280. }
  281. return engine.StatusOK
  282. }
  283. func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
  284. if len(job.Args) != 1 {
  285. return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
  286. }
  287. name := job.Args[0]
  288. container := srv.daemon.Get(name)
  289. if container == nil {
  290. return job.Errorf("No such container: %s", name)
  291. }
  292. var (
  293. config = container.Config
  294. newConfig runconfig.Config
  295. )
  296. if err := job.GetenvJson("config", &newConfig); err != nil {
  297. return job.Error(err)
  298. }
  299. if err := runconfig.Merge(&newConfig, config); err != nil {
  300. return job.Error(err)
  301. }
  302. img, err := srv.daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig)
  303. if err != nil {
  304. return job.Error(err)
  305. }
  306. job.Printf("%s\n", img.ID)
  307. return engine.StatusOK
  308. }
  309. func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
  310. var name string
  311. if len(job.Args) == 1 {
  312. name = job.Args[0]
  313. } else if len(job.Args) > 1 {
  314. return job.Errorf("Usage: %s", job.Name)
  315. }
  316. config := runconfig.ContainerConfigFromJob(job)
  317. if config.Memory != 0 && config.Memory < 524288 {
  318. return job.Errorf("Minimum memory limit allowed is 512k")
  319. }
  320. if config.Memory > 0 && !srv.daemon.SystemConfig().MemoryLimit {
  321. job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
  322. config.Memory = 0
  323. }
  324. if config.Memory > 0 && !srv.daemon.SystemConfig().SwapLimit {
  325. job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
  326. config.MemorySwap = -1
  327. }
  328. container, buildWarnings, err := srv.daemon.Create(config, name)
  329. if err != nil {
  330. if srv.daemon.Graph().IsNotExist(err) {
  331. _, tag := parsers.ParseRepositoryTag(config.Image)
  332. if tag == "" {
  333. tag = graph.DEFAULTTAG
  334. }
  335. return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
  336. }
  337. return job.Error(err)
  338. }
  339. if !container.Config.NetworkDisabled && srv.daemon.SystemConfig().IPv4ForwardingDisabled {
  340. job.Errorf("IPv4 forwarding is disabled.\n")
  341. }
  342. srv.LogEvent("create", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  343. // FIXME: this is necessary because daemon.Create might return a nil container
  344. // with a non-nil error. This should not happen! Once it's fixed we
  345. // can remove this workaround.
  346. if container != nil {
  347. job.Printf("%s\n", container.ID)
  348. }
  349. for _, warning := range buildWarnings {
  350. job.Errorf("%s\n", warning)
  351. }
  352. return engine.StatusOK
  353. }
  354. func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
  355. if len(job.Args) != 1 {
  356. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  357. }
  358. var (
  359. name = job.Args[0]
  360. t = 10
  361. )
  362. if job.EnvExists("t") {
  363. t = job.GetenvInt("t")
  364. }
  365. if container := srv.daemon.Get(name); container != nil {
  366. if err := container.Restart(int(t)); err != nil {
  367. return job.Errorf("Cannot restart container %s: %s\n", name, err)
  368. }
  369. srv.LogEvent("restart", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  370. } else {
  371. return job.Errorf("No such container: %s\n", name)
  372. }
  373. return engine.StatusOK
  374. }
  375. func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
  376. if len(job.Args) != 1 {
  377. return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
  378. }
  379. name := job.Args[0]
  380. removeVolume := job.GetenvBool("removeVolume")
  381. removeLink := job.GetenvBool("removeLink")
  382. stop := job.GetenvBool("stop")
  383. kill := job.GetenvBool("kill")
  384. container := srv.daemon.Get(name)
  385. if removeLink {
  386. if container == nil {
  387. return job.Errorf("No such link: %s", name)
  388. }
  389. name, err := daemon.GetFullContainerName(name)
  390. if err != nil {
  391. job.Error(err)
  392. }
  393. parent, n := path.Split(name)
  394. if parent == "/" {
  395. return job.Errorf("Conflict, cannot remove the default name of the container")
  396. }
  397. pe := srv.daemon.ContainerGraph().Get(parent)
  398. if pe == nil {
  399. return job.Errorf("Cannot get parent %s for name %s", parent, name)
  400. }
  401. parentContainer := srv.daemon.Get(pe.ID())
  402. if parentContainer != nil {
  403. parentContainer.DisableLink(n)
  404. }
  405. if err := srv.daemon.ContainerGraph().Delete(name); err != nil {
  406. return job.Error(err)
  407. }
  408. return engine.StatusOK
  409. }
  410. if container != nil {
  411. if container.State.IsRunning() {
  412. if stop {
  413. if err := container.Stop(5); err != nil {
  414. return job.Errorf("Could not stop running container, cannot remove - %v", err)
  415. }
  416. } else if kill {
  417. if err := container.Kill(); err != nil {
  418. return job.Errorf("Could not kill running container, cannot remove - %v", err)
  419. }
  420. } else {
  421. return job.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -s or -k")
  422. }
  423. }
  424. if err := srv.daemon.Destroy(container); err != nil {
  425. return job.Errorf("Cannot destroy container %s: %s", name, err)
  426. }
  427. srv.LogEvent("destroy", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  428. if removeVolume {
  429. var (
  430. volumes = make(map[string]struct{})
  431. binds = make(map[string]struct{})
  432. usedVolumes = make(map[string]*daemon.Container)
  433. )
  434. // the volume id is always the base of the path
  435. getVolumeId := func(p string) string {
  436. return filepath.Base(strings.TrimSuffix(p, "/layer"))
  437. }
  438. // populate bind map so that they can be skipped and not removed
  439. for _, bind := range container.HostConfig().Binds {
  440. source := strings.Split(bind, ":")[0]
  441. // TODO: refactor all volume stuff, all of it
  442. // it is very important that we eval the link or comparing the keys to container.Volumes will not work
  443. //
  444. // eval symlink can fail, ref #5244 if we receive an is not exist error we can ignore it
  445. p, err := filepath.EvalSymlinks(source)
  446. if err != nil && !os.IsNotExist(err) {
  447. return job.Error(err)
  448. }
  449. if p != "" {
  450. source = p
  451. }
  452. binds[source] = struct{}{}
  453. }
  454. // Store all the deleted containers volumes
  455. for _, volumeId := range container.Volumes {
  456. // Skip the volumes mounted from external
  457. // bind mounts here will will be evaluated for a symlink
  458. if _, exists := binds[volumeId]; exists {
  459. continue
  460. }
  461. volumeId = getVolumeId(volumeId)
  462. volumes[volumeId] = struct{}{}
  463. }
  464. // Retrieve all volumes from all remaining containers
  465. for _, container := range srv.daemon.List() {
  466. for _, containerVolumeId := range container.Volumes {
  467. containerVolumeId = getVolumeId(containerVolumeId)
  468. usedVolumes[containerVolumeId] = container
  469. }
  470. }
  471. for volumeId := range volumes {
  472. // If the requested volu
  473. if c, exists := usedVolumes[volumeId]; exists {
  474. log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
  475. continue
  476. }
  477. if err := srv.daemon.Volumes().Delete(volumeId); err != nil {
  478. return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
  479. }
  480. }
  481. }
  482. } else {
  483. return job.Errorf("No such container: %s", name)
  484. }
  485. return engine.StatusOK
  486. }
  487. func (srv *Server) setHostConfig(container *daemon.Container, hostConfig *runconfig.HostConfig) error {
  488. // Validate the HostConfig binds. Make sure that:
  489. // the source exists
  490. for _, bind := range hostConfig.Binds {
  491. splitBind := strings.Split(bind, ":")
  492. source := splitBind[0]
  493. // ensure the source exists on the host
  494. _, err := os.Stat(source)
  495. if err != nil && os.IsNotExist(err) {
  496. err = os.MkdirAll(source, 0755)
  497. if err != nil {
  498. return fmt.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error())
  499. }
  500. }
  501. }
  502. // Register any links from the host config before starting the container
  503. if err := srv.daemon.RegisterLinks(container, hostConfig); err != nil {
  504. return err
  505. }
  506. container.SetHostConfig(hostConfig)
  507. container.ToDisk()
  508. return nil
  509. }
  510. func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
  511. if len(job.Args) < 1 {
  512. return job.Errorf("Usage: %s container_id", job.Name)
  513. }
  514. var (
  515. name = job.Args[0]
  516. daemon = srv.daemon
  517. container = daemon.Get(name)
  518. )
  519. if container == nil {
  520. return job.Errorf("No such container: %s", name)
  521. }
  522. if container.State.IsRunning() {
  523. return job.Errorf("Container already started")
  524. }
  525. // If no environment was set, then no hostconfig was passed.
  526. if len(job.Environ()) > 0 {
  527. hostConfig := runconfig.ContainerHostConfigFromJob(job)
  528. if err := srv.setHostConfig(container, hostConfig); err != nil {
  529. return job.Error(err)
  530. }
  531. }
  532. if err := container.Start(); err != nil {
  533. return job.Errorf("Cannot start container %s: %s", name, err)
  534. }
  535. srv.LogEvent("start", container.ID, daemon.Repositories().ImageName(container.Image))
  536. return engine.StatusOK
  537. }
  538. func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
  539. if len(job.Args) != 1 {
  540. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  541. }
  542. var (
  543. name = job.Args[0]
  544. t = 10
  545. )
  546. if job.EnvExists("t") {
  547. t = job.GetenvInt("t")
  548. }
  549. if container := srv.daemon.Get(name); container != nil {
  550. if !container.State.IsRunning() {
  551. return job.Errorf("Container already stopped")
  552. }
  553. if err := container.Stop(int(t)); err != nil {
  554. return job.Errorf("Cannot stop container %s: %s\n", name, err)
  555. }
  556. srv.LogEvent("stop", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  557. } else {
  558. return job.Errorf("No such container: %s\n", name)
  559. }
  560. return engine.StatusOK
  561. }
  562. func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
  563. if len(job.Args) != 1 {
  564. return job.Errorf("Usage: %s", job.Name)
  565. }
  566. name := job.Args[0]
  567. if container := srv.daemon.Get(name); container != nil {
  568. status, _ := container.State.WaitStop(-1 * time.Second)
  569. job.Printf("%d\n", status)
  570. return engine.StatusOK
  571. }
  572. return job.Errorf("%s: no such container: %s", job.Name, name)
  573. }
  574. func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
  575. if len(job.Args) != 3 {
  576. return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
  577. }
  578. name := job.Args[0]
  579. height, err := strconv.Atoi(job.Args[1])
  580. if err != nil {
  581. return job.Error(err)
  582. }
  583. width, err := strconv.Atoi(job.Args[2])
  584. if err != nil {
  585. return job.Error(err)
  586. }
  587. if container := srv.daemon.Get(name); container != nil {
  588. if err := container.Resize(height, width); err != nil {
  589. return job.Error(err)
  590. }
  591. return engine.StatusOK
  592. }
  593. return job.Errorf("No such container: %s", name)
  594. }
  595. func (srv *Server) ContainerLogs(job *engine.Job) engine.Status {
  596. if len(job.Args) != 1 {
  597. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  598. }
  599. var (
  600. name = job.Args[0]
  601. stdout = job.GetenvBool("stdout")
  602. stderr = job.GetenvBool("stderr")
  603. tail = job.Getenv("tail")
  604. follow = job.GetenvBool("follow")
  605. times = job.GetenvBool("timestamps")
  606. lines = -1
  607. format string
  608. )
  609. if !(stdout || stderr) {
  610. return job.Errorf("You must choose at least one stream")
  611. }
  612. if times {
  613. format = time.RFC3339Nano
  614. }
  615. if tail == "" {
  616. tail = "all"
  617. }
  618. container := srv.daemon.Get(name)
  619. if container == nil {
  620. return job.Errorf("No such container: %s", name)
  621. }
  622. cLog, err := container.ReadLog("json")
  623. if err != nil && os.IsNotExist(err) {
  624. // Legacy logs
  625. utils.Debugf("Old logs format")
  626. if stdout {
  627. cLog, err := container.ReadLog("stdout")
  628. if err != nil {
  629. utils.Errorf("Error reading logs (stdout): %s", err)
  630. } else if _, err := io.Copy(job.Stdout, cLog); err != nil {
  631. utils.Errorf("Error streaming logs (stdout): %s", err)
  632. }
  633. }
  634. if stderr {
  635. cLog, err := container.ReadLog("stderr")
  636. if err != nil {
  637. utils.Errorf("Error reading logs (stderr): %s", err)
  638. } else if _, err := io.Copy(job.Stderr, cLog); err != nil {
  639. utils.Errorf("Error streaming logs (stderr): %s", err)
  640. }
  641. }
  642. } else if err != nil {
  643. utils.Errorf("Error reading logs (json): %s", err)
  644. } else {
  645. if tail != "all" {
  646. var err error
  647. lines, err = strconv.Atoi(tail)
  648. if err != nil {
  649. utils.Errorf("Failed to parse tail %s, error: %v, show all logs", err)
  650. lines = -1
  651. }
  652. }
  653. if lines != 0 {
  654. if lines > 0 {
  655. f := cLog.(*os.File)
  656. ls, err := tailfile.TailFile(f, lines)
  657. if err != nil {
  658. return job.Error(err)
  659. }
  660. tmp := bytes.NewBuffer([]byte{})
  661. for _, l := range ls {
  662. fmt.Fprintf(tmp, "%s\n", l)
  663. }
  664. cLog = tmp
  665. }
  666. dec := json.NewDecoder(cLog)
  667. for {
  668. l := &utils.JSONLog{}
  669. if err := dec.Decode(l); err == io.EOF {
  670. break
  671. } else if err != nil {
  672. utils.Errorf("Error streaming logs: %s", err)
  673. break
  674. }
  675. logLine := l.Log
  676. if times {
  677. logLine = fmt.Sprintf("%s %s", l.Created.Format(format), logLine)
  678. }
  679. if l.Stream == "stdout" && stdout {
  680. fmt.Fprintf(job.Stdout, "%s", logLine)
  681. }
  682. if l.Stream == "stderr" && stderr {
  683. fmt.Fprintf(job.Stderr, "%s", logLine)
  684. }
  685. }
  686. }
  687. }
  688. if follow {
  689. errors := make(chan error, 2)
  690. if stdout {
  691. stdoutPipe := container.StdoutLogPipe()
  692. go func() {
  693. errors <- utils.WriteLog(stdoutPipe, job.Stdout, format)
  694. }()
  695. }
  696. if stderr {
  697. stderrPipe := container.StderrLogPipe()
  698. go func() {
  699. errors <- utils.WriteLog(stderrPipe, job.Stderr, format)
  700. }()
  701. }
  702. err := <-errors
  703. if err != nil {
  704. utils.Errorf("%s", err)
  705. }
  706. }
  707. return engine.StatusOK
  708. }
  709. func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
  710. if len(job.Args) != 2 {
  711. return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
  712. }
  713. var (
  714. name = job.Args[0]
  715. resource = job.Args[1]
  716. )
  717. if container := srv.daemon.Get(name); container != nil {
  718. data, err := container.Copy(resource)
  719. if err != nil {
  720. return job.Error(err)
  721. }
  722. defer data.Close()
  723. if _, err := io.Copy(job.Stdout, data); err != nil {
  724. return job.Error(err)
  725. }
  726. return engine.StatusOK
  727. }
  728. return job.Errorf("No such container: %s", name)
  729. }