server.go 69 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481
  1. // DEPRECATION NOTICE. PLEASE DO NOT ADD ANYTHING TO THIS FILE.
  2. //
  3. // server/server.go is deprecated. We are working on breaking it up into smaller, cleaner
  4. // pieces which will be easier to find and test. This will help make the code less
  5. // redundant and more readable.
  6. //
  7. // Contributors, please don't add anything to server/server.go, unless it has the explicit
  8. // goal of helping the deprecation effort.
  9. //
  10. // Maintainers, please refuse patches which add code to server/server.go.
  11. //
  12. // Instead try the following files:
  13. // * For code related to local image management, try graph/
  14. // * For code related to image downloading, uploading, remote search etc, try registry/
  15. // * For code related to the docker daemon, try daemon/
  16. // * For small utilities which could potentially be useful outside of Docker, try pkg/
  17. // * For miscalleneous "util" functions which are docker-specific, try encapsulating them
  18. // inside one of the subsystems above. If you really think they should be more widely
  19. // available, are you sure you can't remove the docker dependencies and move them to
  20. // pkg? In last resort, you can add them to utils/ (but please try not to).
  21. package server
  22. import (
  23. "bytes"
  24. "encoding/json"
  25. "fmt"
  26. "io"
  27. "io/ioutil"
  28. "log"
  29. "net"
  30. "net/http"
  31. "net/url"
  32. "os"
  33. "os/exec"
  34. gosignal "os/signal"
  35. "path"
  36. "path/filepath"
  37. "runtime"
  38. "strconv"
  39. "strings"
  40. "sync"
  41. "sync/atomic"
  42. "syscall"
  43. "time"
  44. "github.com/dotcloud/docker/archive"
  45. "github.com/dotcloud/docker/daemon"
  46. "github.com/dotcloud/docker/daemonconfig"
  47. "github.com/dotcloud/docker/dockerversion"
  48. "github.com/dotcloud/docker/engine"
  49. "github.com/dotcloud/docker/graph"
  50. "github.com/dotcloud/docker/image"
  51. "github.com/dotcloud/docker/pkg/graphdb"
  52. "github.com/dotcloud/docker/pkg/signal"
  53. "github.com/dotcloud/docker/pkg/tailfile"
  54. "github.com/dotcloud/docker/registry"
  55. "github.com/dotcloud/docker/runconfig"
  56. "github.com/dotcloud/docker/utils"
  57. "github.com/dotcloud/docker/utils/filters"
  58. )
  59. func (srv *Server) handlerWrap(h engine.Handler) engine.Handler {
  60. return func(job *engine.Job) engine.Status {
  61. if !srv.IsRunning() {
  62. return job.Errorf("Server is not running")
  63. }
  64. srv.tasks.Add(1)
  65. defer srv.tasks.Done()
  66. return h(job)
  67. }
  68. }
  69. // jobInitApi runs the remote api server `srv` as a daemon,
  70. // Only one api server can run at the same time - this is enforced by a pidfile.
  71. // The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
  72. func InitServer(job *engine.Job) engine.Status {
  73. job.Logf("Creating server")
  74. srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job))
  75. if err != nil {
  76. return job.Error(err)
  77. }
  78. if srv.daemon.Config().Pidfile != "" {
  79. job.Logf("Creating pidfile")
  80. if err := utils.CreatePidFile(srv.daemon.Config().Pidfile); err != nil {
  81. // FIXME: do we need fatal here instead of returning a job error?
  82. log.Fatal(err)
  83. }
  84. }
  85. job.Logf("Setting up signal traps")
  86. c := make(chan os.Signal, 1)
  87. gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
  88. go func() {
  89. interruptCount := uint32(0)
  90. for sig := range c {
  91. go func(sig os.Signal) {
  92. log.Printf("Received signal '%v', starting shutdown of docker...\n", sig)
  93. switch sig {
  94. case os.Interrupt, syscall.SIGTERM:
  95. // If the user really wants to interrupt, let him do so.
  96. if atomic.LoadUint32(&interruptCount) < 3 {
  97. atomic.AddUint32(&interruptCount, 1)
  98. // Initiate the cleanup only once
  99. if atomic.LoadUint32(&interruptCount) == 1 {
  100. utils.RemovePidFile(srv.daemon.Config().Pidfile)
  101. srv.Close()
  102. } else {
  103. return
  104. }
  105. } else {
  106. log.Printf("Force shutdown of docker, interrupting cleanup\n")
  107. }
  108. case syscall.SIGQUIT:
  109. }
  110. os.Exit(128 + int(sig.(syscall.Signal)))
  111. }(sig)
  112. }
  113. }()
  114. job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
  115. job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
  116. for name, handler := range map[string]engine.Handler{
  117. "export": srv.ContainerExport,
  118. "create": srv.ContainerCreate,
  119. "stop": srv.ContainerStop,
  120. "restart": srv.ContainerRestart,
  121. "start": srv.ContainerStart,
  122. "kill": srv.ContainerKill,
  123. "pause": srv.ContainerPause,
  124. "unpause": srv.ContainerUnpause,
  125. "wait": srv.ContainerWait,
  126. "tag": srv.ImageTag, // FIXME merge with "image_tag"
  127. "resize": srv.ContainerResize,
  128. "commit": srv.ContainerCommit,
  129. "info": srv.DockerInfo,
  130. "container_delete": srv.ContainerDestroy,
  131. "image_export": srv.ImageExport,
  132. "images": srv.Images,
  133. "history": srv.ImageHistory,
  134. "viz": srv.ImagesViz,
  135. "container_copy": srv.ContainerCopy,
  136. "attach": srv.ContainerAttach,
  137. "logs": srv.ContainerLogs,
  138. "changes": srv.ContainerChanges,
  139. "top": srv.ContainerTop,
  140. "load": srv.ImageLoad,
  141. "build": srv.Build,
  142. "pull": srv.ImagePull,
  143. "import": srv.ImageImport,
  144. "image_delete": srv.ImageDelete,
  145. "events": srv.Events,
  146. "push": srv.ImagePush,
  147. "containers": srv.Containers,
  148. } {
  149. if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil {
  150. return job.Error(err)
  151. }
  152. }
  153. // Install image-related commands from the image subsystem.
  154. // See `graph/service.go`
  155. if err := srv.daemon.Repositories().Install(job.Eng); err != nil {
  156. return job.Error(err)
  157. }
  158. // Install daemon-related commands from the daemon subsystem.
  159. // See `daemon/`
  160. if err := srv.daemon.Install(job.Eng); err != nil {
  161. return job.Error(err)
  162. }
  163. srv.SetRunning(true)
  164. return engine.StatusOK
  165. }
  166. func (srv *Server) ContainerPause(job *engine.Job) engine.Status {
  167. if len(job.Args) != 1 {
  168. return job.Errorf("Usage: %s CONTAINER", job.Name)
  169. }
  170. name := job.Args[0]
  171. container := srv.daemon.Get(name)
  172. if container == nil {
  173. return job.Errorf("No such container: %s", name)
  174. }
  175. if err := container.Pause(); err != nil {
  176. return job.Errorf("Cannot pause container %s: %s", name, err)
  177. }
  178. return engine.StatusOK
  179. }
  180. func (srv *Server) ContainerUnpause(job *engine.Job) engine.Status {
  181. if n := len(job.Args); n < 1 || n > 2 {
  182. return job.Errorf("Usage: %s CONTAINER", job.Name)
  183. }
  184. name := job.Args[0]
  185. container := srv.daemon.Get(name)
  186. if container == nil {
  187. return job.Errorf("No such container: %s", name)
  188. }
  189. if err := container.Unpause(); err != nil {
  190. return job.Errorf("Cannot unpause container %s: %s", name, err)
  191. }
  192. return engine.StatusOK
  193. }
  194. // ContainerKill send signal to the container
  195. // If no signal is given (sig 0), then Kill with SIGKILL and wait
  196. // for the container to exit.
  197. // If a signal is given, then just send it to the container and return.
  198. func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
  199. if n := len(job.Args); n < 1 || n > 2 {
  200. return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
  201. }
  202. var (
  203. name = job.Args[0]
  204. sig uint64
  205. err error
  206. )
  207. // If we have a signal, look at it. Otherwise, do nothing
  208. if len(job.Args) == 2 && job.Args[1] != "" {
  209. // Check if we passed the signal as a number:
  210. // The largest legal signal is 31, so let's parse on 5 bits
  211. sig, err = strconv.ParseUint(job.Args[1], 10, 5)
  212. if err != nil {
  213. // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL")
  214. sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")])
  215. if sig == 0 {
  216. return job.Errorf("Invalid signal: %s", job.Args[1])
  217. }
  218. }
  219. }
  220. if container := srv.daemon.Get(name); container != nil {
  221. // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
  222. if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
  223. if err := container.Kill(); err != nil {
  224. return job.Errorf("Cannot kill container %s: %s", name, err)
  225. }
  226. srv.LogEvent("kill", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  227. } else {
  228. // Otherwise, just send the requested signal
  229. if err := container.KillSig(int(sig)); err != nil {
  230. return job.Errorf("Cannot kill container %s: %s", name, err)
  231. }
  232. // FIXME: Add event for signals
  233. }
  234. } else {
  235. return job.Errorf("No such container: %s", name)
  236. }
  237. return engine.StatusOK
  238. }
  239. func (srv *Server) Events(job *engine.Job) engine.Status {
  240. if len(job.Args) != 0 {
  241. return job.Errorf("Usage: %s", job.Name)
  242. }
  243. var (
  244. since = job.GetenvInt64("since")
  245. until = job.GetenvInt64("until")
  246. timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now()))
  247. )
  248. // If no until, disable timeout
  249. if until == 0 {
  250. timeout.Stop()
  251. }
  252. listener := make(chan utils.JSONMessage)
  253. srv.eventPublisher.Subscribe(listener)
  254. defer srv.eventPublisher.Unsubscribe(listener)
  255. // When sending an event JSON serialization errors are ignored, but all
  256. // other errors lead to the eviction of the listener.
  257. sendEvent := func(event *utils.JSONMessage) error {
  258. if b, err := json.Marshal(event); err == nil {
  259. if _, err = job.Stdout.Write(b); err != nil {
  260. return err
  261. }
  262. }
  263. return nil
  264. }
  265. job.Stdout.Write(nil)
  266. // Resend every event in the [since, until] time interval.
  267. if since != 0 {
  268. for _, event := range srv.GetEvents() {
  269. if event.Time >= since && (event.Time <= until || until == 0) {
  270. if err := sendEvent(&event); err != nil {
  271. return job.Error(err)
  272. }
  273. }
  274. }
  275. }
  276. for {
  277. select {
  278. case event, ok := <-listener:
  279. if !ok {
  280. return engine.StatusOK
  281. }
  282. if err := sendEvent(&event); err != nil {
  283. return job.Error(err)
  284. }
  285. case <-timeout.C:
  286. return engine.StatusOK
  287. }
  288. }
  289. }
  290. func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
  291. if len(job.Args) != 1 {
  292. return job.Errorf("Usage: %s container_id", job.Name)
  293. }
  294. name := job.Args[0]
  295. if container := srv.daemon.Get(name); container != nil {
  296. data, err := container.Export()
  297. if err != nil {
  298. return job.Errorf("%s: %s", name, err)
  299. }
  300. defer data.Close()
  301. // Stream the entire contents of the container (basically a volatile snapshot)
  302. if _, err := io.Copy(job.Stdout, data); err != nil {
  303. return job.Errorf("%s: %s", name, err)
  304. }
  305. // FIXME: factor job-specific LogEvent to engine.Job.Run()
  306. srv.LogEvent("export", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  307. return engine.StatusOK
  308. }
  309. return job.Errorf("No such container: %s", name)
  310. }
  311. // ImageExport exports all images with the given tag. All versions
  312. // containing the same tag are exported. The resulting output is an
  313. // uncompressed tar ball.
  314. // name is the set of tags to export.
  315. // out is the writer where the images are written to.
  316. func (srv *Server) ImageExport(job *engine.Job) engine.Status {
  317. if len(job.Args) != 1 {
  318. return job.Errorf("Usage: %s IMAGE\n", job.Name)
  319. }
  320. name := job.Args[0]
  321. // get image json
  322. tempdir, err := ioutil.TempDir("", "docker-export-")
  323. if err != nil {
  324. return job.Error(err)
  325. }
  326. defer os.RemoveAll(tempdir)
  327. utils.Debugf("Serializing %s", name)
  328. rootRepo, err := srv.daemon.Repositories().Get(name)
  329. if err != nil {
  330. return job.Error(err)
  331. }
  332. if rootRepo != nil {
  333. for _, id := range rootRepo {
  334. if err := srv.exportImage(job.Eng, id, tempdir); err != nil {
  335. return job.Error(err)
  336. }
  337. }
  338. // write repositories
  339. rootRepoMap := map[string]graph.Repository{}
  340. rootRepoMap[name] = rootRepo
  341. rootRepoJson, _ := json.Marshal(rootRepoMap)
  342. if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil {
  343. return job.Error(err)
  344. }
  345. } else {
  346. if err := srv.exportImage(job.Eng, name, tempdir); err != nil {
  347. return job.Error(err)
  348. }
  349. }
  350. fs, err := archive.Tar(tempdir, archive.Uncompressed)
  351. if err != nil {
  352. return job.Error(err)
  353. }
  354. defer fs.Close()
  355. if _, err := io.Copy(job.Stdout, fs); err != nil {
  356. return job.Error(err)
  357. }
  358. utils.Debugf("End Serializing %s", name)
  359. return engine.StatusOK
  360. }
  361. func (srv *Server) exportImage(eng *engine.Engine, name, tempdir string) error {
  362. for n := name; n != ""; {
  363. // temporary directory
  364. tmpImageDir := path.Join(tempdir, n)
  365. if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil {
  366. if os.IsExist(err) {
  367. return nil
  368. }
  369. return err
  370. }
  371. var version = "1.0"
  372. var versionBuf = []byte(version)
  373. if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil {
  374. return err
  375. }
  376. // serialize json
  377. json, err := os.Create(path.Join(tmpImageDir, "json"))
  378. if err != nil {
  379. return err
  380. }
  381. job := eng.Job("image_inspect", n)
  382. job.SetenvBool("raw", true)
  383. job.Stdout.Add(json)
  384. if err := job.Run(); err != nil {
  385. return err
  386. }
  387. // serialize filesystem
  388. fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar"))
  389. if err != nil {
  390. return err
  391. }
  392. job = eng.Job("image_tarlayer", n)
  393. job.Stdout.Add(fsTar)
  394. if err := job.Run(); err != nil {
  395. return err
  396. }
  397. // find parent
  398. job = eng.Job("image_get", n)
  399. info, _ := job.Stdout.AddEnv()
  400. if err := job.Run(); err != nil {
  401. return err
  402. }
  403. n = info.Get("Parent")
  404. }
  405. return nil
  406. }
  407. func (srv *Server) Build(job *engine.Job) engine.Status {
  408. if len(job.Args) != 0 {
  409. return job.Errorf("Usage: %s\n", job.Name)
  410. }
  411. var (
  412. remoteURL = job.Getenv("remote")
  413. repoName = job.Getenv("t")
  414. suppressOutput = job.GetenvBool("q")
  415. noCache = job.GetenvBool("nocache")
  416. rm = job.GetenvBool("rm")
  417. forceRm = job.GetenvBool("forcerm")
  418. authConfig = &registry.AuthConfig{}
  419. configFile = &registry.ConfigFile{}
  420. tag string
  421. context io.ReadCloser
  422. )
  423. job.GetenvJson("authConfig", authConfig)
  424. job.GetenvJson("configFile", configFile)
  425. repoName, tag = utils.ParseRepositoryTag(repoName)
  426. if remoteURL == "" {
  427. context = ioutil.NopCloser(job.Stdin)
  428. } else if utils.IsGIT(remoteURL) {
  429. if !strings.HasPrefix(remoteURL, "git://") {
  430. remoteURL = "https://" + remoteURL
  431. }
  432. root, err := ioutil.TempDir("", "docker-build-git")
  433. if err != nil {
  434. return job.Error(err)
  435. }
  436. defer os.RemoveAll(root)
  437. if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
  438. return job.Errorf("Error trying to use git: %s (%s)", err, output)
  439. }
  440. c, err := archive.Tar(root, archive.Uncompressed)
  441. if err != nil {
  442. return job.Error(err)
  443. }
  444. context = c
  445. } else if utils.IsURL(remoteURL) {
  446. f, err := utils.Download(remoteURL)
  447. if err != nil {
  448. return job.Error(err)
  449. }
  450. defer f.Body.Close()
  451. dockerFile, err := ioutil.ReadAll(f.Body)
  452. if err != nil {
  453. return job.Error(err)
  454. }
  455. c, err := archive.Generate("Dockerfile", string(dockerFile))
  456. if err != nil {
  457. return job.Error(err)
  458. }
  459. context = c
  460. }
  461. defer context.Close()
  462. sf := utils.NewStreamFormatter(job.GetenvBool("json"))
  463. b := NewBuildFile(srv,
  464. &utils.StdoutFormater{
  465. Writer: job.Stdout,
  466. StreamFormatter: sf,
  467. },
  468. &utils.StderrFormater{
  469. Writer: job.Stdout,
  470. StreamFormatter: sf,
  471. },
  472. !suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile)
  473. id, err := b.Build(context)
  474. if err != nil {
  475. return job.Error(err)
  476. }
  477. if repoName != "" {
  478. srv.daemon.Repositories().Set(repoName, tag, id, false)
  479. }
  480. return engine.StatusOK
  481. }
  482. // Loads a set of images into the repository. This is the complementary of ImageExport.
  483. // The input stream is an uncompressed tar ball containing images and metadata.
  484. func (srv *Server) ImageLoad(job *engine.Job) engine.Status {
  485. tmpImageDir, err := ioutil.TempDir("", "docker-import-")
  486. if err != nil {
  487. return job.Error(err)
  488. }
  489. defer os.RemoveAll(tmpImageDir)
  490. var (
  491. repoTarFile = path.Join(tmpImageDir, "repo.tar")
  492. repoDir = path.Join(tmpImageDir, "repo")
  493. )
  494. tarFile, err := os.Create(repoTarFile)
  495. if err != nil {
  496. return job.Error(err)
  497. }
  498. if _, err := io.Copy(tarFile, job.Stdin); err != nil {
  499. return job.Error(err)
  500. }
  501. tarFile.Close()
  502. repoFile, err := os.Open(repoTarFile)
  503. if err != nil {
  504. return job.Error(err)
  505. }
  506. if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
  507. return job.Error(err)
  508. }
  509. if err := archive.Untar(repoFile, repoDir, nil); err != nil {
  510. return job.Error(err)
  511. }
  512. dirs, err := ioutil.ReadDir(repoDir)
  513. if err != nil {
  514. return job.Error(err)
  515. }
  516. for _, d := range dirs {
  517. if d.IsDir() {
  518. if err := srv.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil {
  519. return job.Error(err)
  520. }
  521. }
  522. }
  523. repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories"))
  524. if err == nil {
  525. repositories := map[string]graph.Repository{}
  526. if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
  527. return job.Error(err)
  528. }
  529. for imageName, tagMap := range repositories {
  530. for tag, address := range tagMap {
  531. if err := srv.daemon.Repositories().Set(imageName, tag, address, true); err != nil {
  532. return job.Error(err)
  533. }
  534. }
  535. }
  536. } else if !os.IsNotExist(err) {
  537. return job.Error(err)
  538. }
  539. return engine.StatusOK
  540. }
  541. func (srv *Server) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error {
  542. if err := eng.Job("image_get", address).Run(); err != nil {
  543. utils.Debugf("Loading %s", address)
  544. imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
  545. if err != nil {
  546. utils.Debugf("Error reading json", err)
  547. return err
  548. }
  549. layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
  550. if err != nil {
  551. utils.Debugf("Error reading embedded tar", err)
  552. return err
  553. }
  554. img, err := image.NewImgJSON(imageJson)
  555. if err != nil {
  556. utils.Debugf("Error unmarshalling json", err)
  557. return err
  558. }
  559. if img.Parent != "" {
  560. if !srv.daemon.Graph().Exists(img.Parent) {
  561. if err := srv.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil {
  562. return err
  563. }
  564. }
  565. }
  566. if err := srv.daemon.Graph().Register(imageJson, layer, img); err != nil {
  567. return err
  568. }
  569. }
  570. utils.Debugf("Completed processing %s", address)
  571. return nil
  572. }
  573. func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
  574. images, _ := srv.daemon.Graph().Map()
  575. if images == nil {
  576. return engine.StatusOK
  577. }
  578. job.Stdout.Write([]byte("digraph docker {\n"))
  579. var (
  580. parentImage *image.Image
  581. err error
  582. )
  583. for _, image := range images {
  584. parentImage, err = image.GetParent()
  585. if err != nil {
  586. return job.Errorf("Error while getting parent image: %v", err)
  587. }
  588. if parentImage != nil {
  589. job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
  590. } else {
  591. job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n"))
  592. }
  593. }
  594. for id, repos := range srv.daemon.Repositories().GetRepoRefs() {
  595. job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n"))
  596. }
  597. job.Stdout.Write([]byte(" base [style=invisible]\n}\n"))
  598. return engine.StatusOK
  599. }
  600. func (srv *Server) Images(job *engine.Job) engine.Status {
  601. var (
  602. allImages map[string]*image.Image
  603. err error
  604. filt_tagged = true
  605. )
  606. imageFilters, err := filters.FromParam(job.Getenv("filters"))
  607. if err != nil {
  608. return job.Error(err)
  609. }
  610. if i, ok := imageFilters["dangling"]; ok {
  611. for _, value := range i {
  612. if strings.ToLower(value) == "true" {
  613. filt_tagged = false
  614. }
  615. }
  616. }
  617. if job.GetenvBool("all") && filt_tagged {
  618. allImages, err = srv.daemon.Graph().Map()
  619. } else {
  620. allImages, err = srv.daemon.Graph().Heads()
  621. }
  622. if err != nil {
  623. return job.Error(err)
  624. }
  625. lookup := make(map[string]*engine.Env)
  626. srv.daemon.Repositories().Lock()
  627. for name, repository := range srv.daemon.Repositories().Repositories {
  628. if job.Getenv("filter") != "" {
  629. if match, _ := path.Match(job.Getenv("filter"), name); !match {
  630. continue
  631. }
  632. }
  633. for tag, id := range repository {
  634. image, err := srv.daemon.Graph().Get(id)
  635. if err != nil {
  636. log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
  637. continue
  638. }
  639. if out, exists := lookup[id]; exists {
  640. if filt_tagged {
  641. out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag)))
  642. }
  643. } else {
  644. // get the boolean list for if only the untagged images are requested
  645. delete(allImages, id)
  646. if filt_tagged {
  647. out := &engine.Env{}
  648. out.Set("ParentId", image.Parent)
  649. out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)})
  650. out.Set("Id", image.ID)
  651. out.SetInt64("Created", image.Created.Unix())
  652. out.SetInt64("Size", image.Size)
  653. out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
  654. lookup[id] = out
  655. }
  656. }
  657. }
  658. }
  659. srv.daemon.Repositories().Unlock()
  660. outs := engine.NewTable("Created", len(lookup))
  661. for _, value := range lookup {
  662. outs.Add(value)
  663. }
  664. // Display images which aren't part of a repository/tag
  665. if job.Getenv("filter") == "" {
  666. for _, image := range allImages {
  667. out := &engine.Env{}
  668. out.Set("ParentId", image.Parent)
  669. out.SetList("RepoTags", []string{"<none>:<none>"})
  670. out.Set("Id", image.ID)
  671. out.SetInt64("Created", image.Created.Unix())
  672. out.SetInt64("Size", image.Size)
  673. out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
  674. outs.Add(out)
  675. }
  676. }
  677. outs.ReverseSort()
  678. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  679. return job.Error(err)
  680. }
  681. return engine.StatusOK
  682. }
  683. func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
  684. images, _ := srv.daemon.Graph().Map()
  685. var imgcount int
  686. if images == nil {
  687. imgcount = 0
  688. } else {
  689. imgcount = len(images)
  690. }
  691. kernelVersion := "<unknown>"
  692. if kv, err := utils.GetKernelVersion(); err == nil {
  693. kernelVersion = kv.String()
  694. }
  695. // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
  696. initPath := utils.DockerInitPath("")
  697. if initPath == "" {
  698. // if that fails, we'll just return the path from the daemon
  699. initPath = srv.daemon.SystemInitPath()
  700. }
  701. v := &engine.Env{}
  702. v.SetInt("Containers", len(srv.daemon.List()))
  703. v.SetInt("Images", imgcount)
  704. v.Set("Driver", srv.daemon.GraphDriver().String())
  705. v.SetJson("DriverStatus", srv.daemon.GraphDriver().Status())
  706. v.SetBool("MemoryLimit", srv.daemon.SystemConfig().MemoryLimit)
  707. v.SetBool("SwapLimit", srv.daemon.SystemConfig().SwapLimit)
  708. v.SetBool("IPv4Forwarding", !srv.daemon.SystemConfig().IPv4ForwardingDisabled)
  709. v.SetBool("Debug", os.Getenv("DEBUG") != "")
  710. v.SetInt("NFd", utils.GetTotalUsedFds())
  711. v.SetInt("NGoroutines", runtime.NumGoroutine())
  712. v.Set("ExecutionDriver", srv.daemon.ExecutionDriver().Name())
  713. v.SetInt("NEventsListener", srv.eventPublisher.SubscribersCount())
  714. v.Set("KernelVersion", kernelVersion)
  715. v.Set("IndexServerAddress", registry.IndexServerAddress())
  716. v.Set("InitSha1", dockerversion.INITSHA1)
  717. v.Set("InitPath", initPath)
  718. v.SetList("Sockets", srv.daemon.Sockets)
  719. if _, err := v.WriteTo(job.Stdout); err != nil {
  720. return job.Error(err)
  721. }
  722. return engine.StatusOK
  723. }
  724. func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
  725. if n := len(job.Args); n != 1 {
  726. return job.Errorf("Usage: %s IMAGE", job.Name)
  727. }
  728. name := job.Args[0]
  729. foundImage, err := srv.daemon.Repositories().LookupImage(name)
  730. if err != nil {
  731. return job.Error(err)
  732. }
  733. lookupMap := make(map[string][]string)
  734. for name, repository := range srv.daemon.Repositories().Repositories {
  735. for tag, id := range repository {
  736. // If the ID already has a reverse lookup, do not update it unless for "latest"
  737. if _, exists := lookupMap[id]; !exists {
  738. lookupMap[id] = []string{}
  739. }
  740. lookupMap[id] = append(lookupMap[id], name+":"+tag)
  741. }
  742. }
  743. outs := engine.NewTable("Created", 0)
  744. err = foundImage.WalkHistory(func(img *image.Image) error {
  745. out := &engine.Env{}
  746. out.Set("Id", img.ID)
  747. out.SetInt64("Created", img.Created.Unix())
  748. out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " "))
  749. out.SetList("Tags", lookupMap[img.ID])
  750. out.SetInt64("Size", img.Size)
  751. outs.Add(out)
  752. return nil
  753. })
  754. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  755. return job.Error(err)
  756. }
  757. return engine.StatusOK
  758. }
  759. func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
  760. if len(job.Args) != 1 && len(job.Args) != 2 {
  761. return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
  762. }
  763. var (
  764. name = job.Args[0]
  765. psArgs = "-ef"
  766. )
  767. if len(job.Args) == 2 && job.Args[1] != "" {
  768. psArgs = job.Args[1]
  769. }
  770. if container := srv.daemon.Get(name); container != nil {
  771. if !container.State.IsRunning() {
  772. return job.Errorf("Container %s is not running", name)
  773. }
  774. pids, err := srv.daemon.ExecutionDriver().GetPidsForContainer(container.ID)
  775. if err != nil {
  776. return job.Error(err)
  777. }
  778. output, err := exec.Command("ps", psArgs).Output()
  779. if err != nil {
  780. return job.Errorf("Error running ps: %s", err)
  781. }
  782. lines := strings.Split(string(output), "\n")
  783. header := strings.Fields(lines[0])
  784. out := &engine.Env{}
  785. out.SetList("Titles", header)
  786. pidIndex := -1
  787. for i, name := range header {
  788. if name == "PID" {
  789. pidIndex = i
  790. }
  791. }
  792. if pidIndex == -1 {
  793. return job.Errorf("Couldn't find PID field in ps output")
  794. }
  795. processes := [][]string{}
  796. for _, line := range lines[1:] {
  797. if len(line) == 0 {
  798. continue
  799. }
  800. fields := strings.Fields(line)
  801. p, err := strconv.Atoi(fields[pidIndex])
  802. if err != nil {
  803. return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
  804. }
  805. for _, pid := range pids {
  806. if pid == p {
  807. // Make sure number of fields equals number of header titles
  808. // merging "overhanging" fields
  809. process := fields[:len(header)-1]
  810. process = append(process, strings.Join(fields[len(header)-1:], " "))
  811. processes = append(processes, process)
  812. }
  813. }
  814. }
  815. out.SetJson("Processes", processes)
  816. out.WriteTo(job.Stdout)
  817. return engine.StatusOK
  818. }
  819. return job.Errorf("No such container: %s", name)
  820. }
  821. func (srv *Server) ContainerChanges(job *engine.Job) engine.Status {
  822. if n := len(job.Args); n != 1 {
  823. return job.Errorf("Usage: %s CONTAINER", job.Name)
  824. }
  825. name := job.Args[0]
  826. if container := srv.daemon.Get(name); container != nil {
  827. outs := engine.NewTable("", 0)
  828. changes, err := container.Changes()
  829. if err != nil {
  830. return job.Error(err)
  831. }
  832. for _, change := range changes {
  833. out := &engine.Env{}
  834. if err := out.Import(change); err != nil {
  835. return job.Error(err)
  836. }
  837. outs.Add(out)
  838. }
  839. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  840. return job.Error(err)
  841. }
  842. } else {
  843. return job.Errorf("No such container: %s", name)
  844. }
  845. return engine.StatusOK
  846. }
  847. func (srv *Server) Containers(job *engine.Job) engine.Status {
  848. var (
  849. foundBefore bool
  850. displayed int
  851. all = job.GetenvBool("all")
  852. since = job.Getenv("since")
  853. before = job.Getenv("before")
  854. n = job.GetenvInt("limit")
  855. size = job.GetenvBool("size")
  856. )
  857. outs := engine.NewTable("Created", 0)
  858. names := map[string][]string{}
  859. srv.daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
  860. names[e.ID()] = append(names[e.ID()], p)
  861. return nil
  862. }, -1)
  863. var beforeCont, sinceCont *daemon.Container
  864. if before != "" {
  865. beforeCont = srv.daemon.Get(before)
  866. if beforeCont == nil {
  867. return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
  868. }
  869. }
  870. if since != "" {
  871. sinceCont = srv.daemon.Get(since)
  872. if sinceCont == nil {
  873. return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
  874. }
  875. }
  876. for _, container := range srv.daemon.List() {
  877. if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
  878. continue
  879. }
  880. if before != "" && !foundBefore {
  881. if container.ID == beforeCont.ID {
  882. foundBefore = true
  883. }
  884. continue
  885. }
  886. if n > 0 && displayed == n {
  887. break
  888. }
  889. if since != "" {
  890. if container.ID == sinceCont.ID {
  891. break
  892. }
  893. }
  894. displayed++
  895. out := &engine.Env{}
  896. out.Set("Id", container.ID)
  897. out.SetList("Names", names[container.ID])
  898. out.Set("Image", srv.daemon.Repositories().ImageName(container.Image))
  899. if len(container.Args) > 0 {
  900. args := []string{}
  901. for _, arg := range container.Args {
  902. if strings.Contains(arg, " ") {
  903. args = append(args, fmt.Sprintf("'%s'", arg))
  904. } else {
  905. args = append(args, arg)
  906. }
  907. }
  908. argsAsString := strings.Join(args, " ")
  909. out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString))
  910. } else {
  911. out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
  912. }
  913. out.SetInt64("Created", container.Created.Unix())
  914. out.Set("Status", container.State.String())
  915. str, err := container.NetworkSettings.PortMappingAPI().ToListString()
  916. if err != nil {
  917. return job.Error(err)
  918. }
  919. out.Set("Ports", str)
  920. if size {
  921. sizeRw, sizeRootFs := container.GetSize()
  922. out.SetInt64("SizeRw", sizeRw)
  923. out.SetInt64("SizeRootFs", sizeRootFs)
  924. }
  925. outs.Add(out)
  926. }
  927. outs.ReverseSort()
  928. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  929. return job.Error(err)
  930. }
  931. return engine.StatusOK
  932. }
  933. func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
  934. if len(job.Args) != 1 {
  935. return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
  936. }
  937. name := job.Args[0]
  938. container := srv.daemon.Get(name)
  939. if container == nil {
  940. return job.Errorf("No such container: %s", name)
  941. }
  942. var (
  943. config = container.Config
  944. newConfig runconfig.Config
  945. )
  946. if err := job.GetenvJson("config", &newConfig); err != nil {
  947. return job.Error(err)
  948. }
  949. if err := runconfig.Merge(&newConfig, config); err != nil {
  950. return job.Error(err)
  951. }
  952. img, err := srv.daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig)
  953. if err != nil {
  954. return job.Error(err)
  955. }
  956. job.Printf("%s\n", img.ID)
  957. return engine.StatusOK
  958. }
  959. func (srv *Server) ImageTag(job *engine.Job) engine.Status {
  960. if len(job.Args) != 2 && len(job.Args) != 3 {
  961. return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
  962. }
  963. var tag string
  964. if len(job.Args) == 3 {
  965. tag = job.Args[2]
  966. }
  967. if err := srv.daemon.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
  968. return job.Error(err)
  969. }
  970. return engine.StatusOK
  971. }
  972. func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error {
  973. history, err := r.GetRemoteHistory(imgID, endpoint, token)
  974. if err != nil {
  975. return err
  976. }
  977. out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil))
  978. // FIXME: Try to stream the images?
  979. // FIXME: Launch the getRemoteImage() in goroutines
  980. for i := len(history) - 1; i >= 0; i-- {
  981. id := history[i]
  982. // ensure no two downloads of the same layer happen at the same time
  983. if c, err := srv.poolAdd("pull", "layer:"+id); err != nil {
  984. utils.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
  985. <-c
  986. }
  987. defer srv.poolRemove("pull", "layer:"+id)
  988. if !srv.daemon.Graph().Exists(id) {
  989. out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
  990. var (
  991. imgJSON []byte
  992. imgSize int
  993. err error
  994. img *image.Image
  995. )
  996. retries := 5
  997. for j := 1; j <= retries; j++ {
  998. imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
  999. if err != nil && j == retries {
  1000. out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
  1001. return err
  1002. } else if err != nil {
  1003. time.Sleep(time.Duration(j) * 500 * time.Millisecond)
  1004. continue
  1005. }
  1006. img, err = image.NewImgJSON(imgJSON)
  1007. if err != nil && j == retries {
  1008. out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
  1009. return fmt.Errorf("Failed to parse json: %s", err)
  1010. } else if err != nil {
  1011. time.Sleep(time.Duration(j) * 500 * time.Millisecond)
  1012. continue
  1013. } else {
  1014. break
  1015. }
  1016. }
  1017. for j := 1; j <= retries; j++ {
  1018. // Get the layer
  1019. status := "Pulling fs layer"
  1020. if j > 1 {
  1021. status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
  1022. }
  1023. out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil))
  1024. layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
  1025. if uerr, ok := err.(*url.Error); ok {
  1026. err = uerr.Err
  1027. }
  1028. if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
  1029. time.Sleep(time.Duration(j) * 500 * time.Millisecond)
  1030. continue
  1031. } else if err != nil {
  1032. out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
  1033. return err
  1034. }
  1035. defer layer.Close()
  1036. err = srv.daemon.Graph().Register(imgJSON,
  1037. utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"),
  1038. img)
  1039. if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
  1040. time.Sleep(time.Duration(j) * 500 * time.Millisecond)
  1041. continue
  1042. } else if err != nil {
  1043. out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
  1044. return err
  1045. } else {
  1046. break
  1047. }
  1048. }
  1049. }
  1050. out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil))
  1051. }
  1052. return nil
  1053. }
  1054. func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error {
  1055. out.Write(sf.FormatStatus("", "Pulling repository %s", localName))
  1056. repoData, err := r.GetRepositoryData(remoteName)
  1057. if err != nil {
  1058. return err
  1059. }
  1060. utils.Debugf("Retrieving the tag list")
  1061. tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens)
  1062. if err != nil {
  1063. utils.Errorf("%v", err)
  1064. return err
  1065. }
  1066. for tag, id := range tagsList {
  1067. repoData.ImgList[id] = &registry.ImgData{
  1068. ID: id,
  1069. Tag: tag,
  1070. Checksum: "",
  1071. }
  1072. }
  1073. utils.Debugf("Registering tags")
  1074. // If no tag has been specified, pull them all
  1075. if askedTag == "" {
  1076. for tag, id := range tagsList {
  1077. repoData.ImgList[id].Tag = tag
  1078. }
  1079. } else {
  1080. // Otherwise, check that the tag exists and use only that one
  1081. id, exists := tagsList[askedTag]
  1082. if !exists {
  1083. return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName)
  1084. }
  1085. repoData.ImgList[id].Tag = askedTag
  1086. }
  1087. errors := make(chan error)
  1088. for _, image := range repoData.ImgList {
  1089. downloadImage := func(img *registry.ImgData) {
  1090. if askedTag != "" && img.Tag != askedTag {
  1091. utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
  1092. if parallel {
  1093. errors <- nil
  1094. }
  1095. return
  1096. }
  1097. if img.Tag == "" {
  1098. utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
  1099. if parallel {
  1100. errors <- nil
  1101. }
  1102. return
  1103. }
  1104. // ensure no two downloads of the same image happen at the same time
  1105. if c, err := srv.poolAdd("pull", "img:"+img.ID); err != nil {
  1106. if c != nil {
  1107. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
  1108. <-c
  1109. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
  1110. } else {
  1111. utils.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
  1112. }
  1113. if parallel {
  1114. errors <- nil
  1115. }
  1116. return
  1117. }
  1118. defer srv.poolRemove("pull", "img:"+img.ID)
  1119. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil))
  1120. success := false
  1121. var lastErr error
  1122. for _, ep := range repoData.Endpoints {
  1123. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil))
  1124. if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
  1125. // It's not ideal that only the last error is returned, it would be better to concatenate the errors.
  1126. // As the error is also given to the output stream the user will see the error.
  1127. lastErr = err
  1128. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil))
  1129. continue
  1130. }
  1131. success = true
  1132. break
  1133. }
  1134. if !success {
  1135. err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, localName, lastErr)
  1136. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), err.Error(), nil))
  1137. if parallel {
  1138. errors <- err
  1139. return
  1140. }
  1141. }
  1142. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
  1143. if parallel {
  1144. errors <- nil
  1145. }
  1146. }
  1147. if parallel {
  1148. go downloadImage(image)
  1149. } else {
  1150. downloadImage(image)
  1151. }
  1152. }
  1153. if parallel {
  1154. var lastError error
  1155. for i := 0; i < len(repoData.ImgList); i++ {
  1156. if err := <-errors; err != nil {
  1157. lastError = err
  1158. }
  1159. }
  1160. if lastError != nil {
  1161. return lastError
  1162. }
  1163. }
  1164. for tag, id := range tagsList {
  1165. if askedTag != "" && tag != askedTag {
  1166. continue
  1167. }
  1168. if err := srv.daemon.Repositories().Set(localName, tag, id, true); err != nil {
  1169. return err
  1170. }
  1171. }
  1172. return nil
  1173. }
  1174. func (srv *Server) poolAdd(kind, key string) (chan struct{}, error) {
  1175. srv.Lock()
  1176. defer srv.Unlock()
  1177. if c, exists := srv.pullingPool[key]; exists {
  1178. return c, fmt.Errorf("pull %s is already in progress", key)
  1179. }
  1180. if c, exists := srv.pushingPool[key]; exists {
  1181. return c, fmt.Errorf("push %s is already in progress", key)
  1182. }
  1183. c := make(chan struct{})
  1184. switch kind {
  1185. case "pull":
  1186. srv.pullingPool[key] = c
  1187. case "push":
  1188. srv.pushingPool[key] = c
  1189. default:
  1190. return nil, fmt.Errorf("Unknown pool type")
  1191. }
  1192. return c, nil
  1193. }
  1194. func (srv *Server) poolRemove(kind, key string) error {
  1195. srv.Lock()
  1196. defer srv.Unlock()
  1197. switch kind {
  1198. case "pull":
  1199. if c, exists := srv.pullingPool[key]; exists {
  1200. close(c)
  1201. delete(srv.pullingPool, key)
  1202. }
  1203. case "push":
  1204. if c, exists := srv.pushingPool[key]; exists {
  1205. close(c)
  1206. delete(srv.pushingPool, key)
  1207. }
  1208. default:
  1209. return fmt.Errorf("Unknown pool type")
  1210. }
  1211. return nil
  1212. }
  1213. func (srv *Server) ImagePull(job *engine.Job) engine.Status {
  1214. if n := len(job.Args); n != 1 && n != 2 {
  1215. return job.Errorf("Usage: %s IMAGE [TAG]", job.Name)
  1216. }
  1217. var (
  1218. localName = job.Args[0]
  1219. tag string
  1220. sf = utils.NewStreamFormatter(job.GetenvBool("json"))
  1221. authConfig = &registry.AuthConfig{}
  1222. metaHeaders map[string][]string
  1223. )
  1224. if len(job.Args) > 1 {
  1225. tag = job.Args[1]
  1226. }
  1227. job.GetenvJson("authConfig", authConfig)
  1228. job.GetenvJson("metaHeaders", &metaHeaders)
  1229. c, err := srv.poolAdd("pull", localName+":"+tag)
  1230. if err != nil {
  1231. if c != nil {
  1232. // Another pull of the same repository is already taking place; just wait for it to finish
  1233. job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
  1234. <-c
  1235. return engine.StatusOK
  1236. }
  1237. return job.Error(err)
  1238. }
  1239. defer srv.poolRemove("pull", localName+":"+tag)
  1240. // Resolve the Repository name from fqn to endpoint + name
  1241. hostname, remoteName, err := registry.ResolveRepositoryName(localName)
  1242. if err != nil {
  1243. return job.Error(err)
  1244. }
  1245. endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
  1246. if err != nil {
  1247. return job.Error(err)
  1248. }
  1249. r, err := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
  1250. if err != nil {
  1251. return job.Error(err)
  1252. }
  1253. if endpoint == registry.IndexServerAddress() {
  1254. // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
  1255. localName = remoteName
  1256. }
  1257. if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil {
  1258. return job.Error(err)
  1259. }
  1260. return engine.StatusOK
  1261. }
  1262. // Retrieve the all the images to be uploaded in the correct order
  1263. func (srv *Server) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) {
  1264. var (
  1265. imageList []string
  1266. imagesSeen map[string]bool = make(map[string]bool)
  1267. tagsByImage map[string][]string = make(map[string][]string)
  1268. )
  1269. for tag, id := range localRepo {
  1270. if requestedTag != "" && requestedTag != tag {
  1271. continue
  1272. }
  1273. var imageListForThisTag []string
  1274. tagsByImage[id] = append(tagsByImage[id], tag)
  1275. for img, err := srv.daemon.Graph().Get(id); img != nil; img, err = img.GetParent() {
  1276. if err != nil {
  1277. return nil, nil, err
  1278. }
  1279. if imagesSeen[img.ID] {
  1280. // This image is already on the list, we can ignore it and all its parents
  1281. break
  1282. }
  1283. imagesSeen[img.ID] = true
  1284. imageListForThisTag = append(imageListForThisTag, img.ID)
  1285. }
  1286. // reverse the image list for this tag (so the "most"-parent image is first)
  1287. for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 {
  1288. imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i]
  1289. }
  1290. // append to main image list
  1291. imageList = append(imageList, imageListForThisTag...)
  1292. }
  1293. if len(imageList) == 0 {
  1294. return nil, nil, fmt.Errorf("No images found for the requested repository / tag")
  1295. }
  1296. utils.Debugf("Image list: %v", imageList)
  1297. utils.Debugf("Tags by image: %v", tagsByImage)
  1298. return imageList, tagsByImage, nil
  1299. }
  1300. func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error {
  1301. out = utils.NewWriteFlusher(out)
  1302. utils.Debugf("Local repo: %s", localRepo)
  1303. imgList, tagsByImage, err := srv.getImageList(localRepo, tag)
  1304. if err != nil {
  1305. return err
  1306. }
  1307. out.Write(sf.FormatStatus("", "Sending image list"))
  1308. var (
  1309. repoData *registry.RepositoryData
  1310. imageIndex []*registry.ImgData
  1311. )
  1312. for _, imgId := range imgList {
  1313. if tags, exists := tagsByImage[imgId]; exists {
  1314. // If an image has tags you must add an entry in the image index
  1315. // for each tag
  1316. for _, tag := range tags {
  1317. imageIndex = append(imageIndex, &registry.ImgData{
  1318. ID: imgId,
  1319. Tag: tag,
  1320. })
  1321. }
  1322. } else {
  1323. // If the image does not have a tag it still needs to be sent to the
  1324. // registry with an empty tag so that it is accociated with the repository
  1325. imageIndex = append(imageIndex, &registry.ImgData{
  1326. ID: imgId,
  1327. Tag: "",
  1328. })
  1329. }
  1330. }
  1331. utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo)
  1332. for _, data := range imageIndex {
  1333. utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag)
  1334. }
  1335. // Register all the images in a repository with the registry
  1336. // If an image is not in this list it will not be associated with the repository
  1337. repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil)
  1338. if err != nil {
  1339. return err
  1340. }
  1341. nTag := 1
  1342. if tag == "" {
  1343. nTag = len(localRepo)
  1344. }
  1345. for _, ep := range repoData.Endpoints {
  1346. out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag))
  1347. for _, imgId := range imgList {
  1348. if r.LookupRemoteImage(imgId, ep, repoData.Tokens) {
  1349. out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId)))
  1350. } else {
  1351. if _, err := srv.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil {
  1352. // FIXME: Continue on error?
  1353. return err
  1354. }
  1355. }
  1356. for _, tag := range tagsByImage[imgId] {
  1357. out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag))
  1358. if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil {
  1359. return err
  1360. }
  1361. }
  1362. }
  1363. }
  1364. if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil {
  1365. return err
  1366. }
  1367. return nil
  1368. }
  1369. func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
  1370. out = utils.NewWriteFlusher(out)
  1371. jsonRaw, err := ioutil.ReadFile(path.Join(srv.daemon.Graph().Root, imgID, "json"))
  1372. if err != nil {
  1373. return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
  1374. }
  1375. out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil))
  1376. imgData := &registry.ImgData{
  1377. ID: imgID,
  1378. }
  1379. // Send the json
  1380. if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
  1381. if err == registry.ErrAlreadyExists {
  1382. out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
  1383. return "", nil
  1384. }
  1385. return "", err
  1386. }
  1387. layerData, err := srv.daemon.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out)
  1388. if err != nil {
  1389. return "", fmt.Errorf("Failed to generate layer archive: %s", err)
  1390. }
  1391. defer os.RemoveAll(layerData.Name())
  1392. // Send the layer
  1393. utils.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
  1394. checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw)
  1395. if err != nil {
  1396. return "", err
  1397. }
  1398. imgData.Checksum = checksum
  1399. imgData.ChecksumPayload = checksumPayload
  1400. // Send the checksum
  1401. if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
  1402. return "", err
  1403. }
  1404. out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil))
  1405. return imgData.Checksum, nil
  1406. }
  1407. // FIXME: Allow to interrupt current push when new push of same image is done.
  1408. func (srv *Server) ImagePush(job *engine.Job) engine.Status {
  1409. if n := len(job.Args); n != 1 {
  1410. return job.Errorf("Usage: %s IMAGE", job.Name)
  1411. }
  1412. var (
  1413. localName = job.Args[0]
  1414. sf = utils.NewStreamFormatter(job.GetenvBool("json"))
  1415. authConfig = &registry.AuthConfig{}
  1416. metaHeaders map[string][]string
  1417. )
  1418. tag := job.Getenv("tag")
  1419. job.GetenvJson("authConfig", authConfig)
  1420. job.GetenvJson("metaHeaders", &metaHeaders)
  1421. if _, err := srv.poolAdd("push", localName); err != nil {
  1422. return job.Error(err)
  1423. }
  1424. defer srv.poolRemove("push", localName)
  1425. // Resolve the Repository name from fqn to endpoint + name
  1426. hostname, remoteName, err := registry.ResolveRepositoryName(localName)
  1427. if err != nil {
  1428. return job.Error(err)
  1429. }
  1430. endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
  1431. if err != nil {
  1432. return job.Error(err)
  1433. }
  1434. img, err := srv.daemon.Graph().Get(localName)
  1435. r, err2 := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false)
  1436. if err2 != nil {
  1437. return job.Error(err2)
  1438. }
  1439. if err != nil {
  1440. reposLen := 1
  1441. if tag == "" {
  1442. reposLen = len(srv.daemon.Repositories().Repositories[localName])
  1443. }
  1444. job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
  1445. // If it fails, try to get the repository
  1446. if localRepo, exists := srv.daemon.Repositories().Repositories[localName]; exists {
  1447. if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil {
  1448. return job.Error(err)
  1449. }
  1450. return engine.StatusOK
  1451. }
  1452. return job.Error(err)
  1453. }
  1454. var token []string
  1455. job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName))
  1456. if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil {
  1457. return job.Error(err)
  1458. }
  1459. return engine.StatusOK
  1460. }
  1461. func (srv *Server) ImageImport(job *engine.Job) engine.Status {
  1462. if n := len(job.Args); n != 2 && n != 3 {
  1463. return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name)
  1464. }
  1465. var (
  1466. src = job.Args[0]
  1467. repo = job.Args[1]
  1468. tag string
  1469. sf = utils.NewStreamFormatter(job.GetenvBool("json"))
  1470. archive archive.ArchiveReader
  1471. resp *http.Response
  1472. )
  1473. if len(job.Args) > 2 {
  1474. tag = job.Args[2]
  1475. }
  1476. if src == "-" {
  1477. archive = job.Stdin
  1478. } else {
  1479. u, err := url.Parse(src)
  1480. if err != nil {
  1481. return job.Error(err)
  1482. }
  1483. if u.Scheme == "" {
  1484. u.Scheme = "http"
  1485. u.Host = src
  1486. u.Path = ""
  1487. }
  1488. job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u))
  1489. // Download with curl (pretty progress bar)
  1490. // If curl is not available, fallback to http.Get()
  1491. resp, err = utils.Download(u.String())
  1492. if err != nil {
  1493. return job.Error(err)
  1494. }
  1495. progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing")
  1496. defer progressReader.Close()
  1497. archive = progressReader
  1498. }
  1499. img, err := srv.daemon.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil)
  1500. if err != nil {
  1501. return job.Error(err)
  1502. }
  1503. // Optionally register the image at REPO/TAG
  1504. if repo != "" {
  1505. if err := srv.daemon.Repositories().Set(repo, tag, img.ID, true); err != nil {
  1506. return job.Error(err)
  1507. }
  1508. }
  1509. job.Stdout.Write(sf.FormatStatus("", img.ID))
  1510. return engine.StatusOK
  1511. }
  1512. func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
  1513. var name string
  1514. if len(job.Args) == 1 {
  1515. name = job.Args[0]
  1516. } else if len(job.Args) > 1 {
  1517. return job.Errorf("Usage: %s", job.Name)
  1518. }
  1519. config := runconfig.ContainerConfigFromJob(job)
  1520. if config.Memory != 0 && config.Memory < 524288 {
  1521. return job.Errorf("Minimum memory limit allowed is 512k")
  1522. }
  1523. if config.Memory > 0 && !srv.daemon.SystemConfig().MemoryLimit {
  1524. job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
  1525. config.Memory = 0
  1526. }
  1527. if config.Memory > 0 && !srv.daemon.SystemConfig().SwapLimit {
  1528. job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
  1529. config.MemorySwap = -1
  1530. }
  1531. container, buildWarnings, err := srv.daemon.Create(config, name)
  1532. if err != nil {
  1533. if srv.daemon.Graph().IsNotExist(err) {
  1534. _, tag := utils.ParseRepositoryTag(config.Image)
  1535. if tag == "" {
  1536. tag = graph.DEFAULTTAG
  1537. }
  1538. return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
  1539. }
  1540. return job.Error(err)
  1541. }
  1542. if !container.Config.NetworkDisabled && srv.daemon.SystemConfig().IPv4ForwardingDisabled {
  1543. job.Errorf("IPv4 forwarding is disabled.\n")
  1544. }
  1545. srv.LogEvent("create", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  1546. // FIXME: this is necessary because daemon.Create might return a nil container
  1547. // with a non-nil error. This should not happen! Once it's fixed we
  1548. // can remove this workaround.
  1549. if container != nil {
  1550. job.Printf("%s\n", container.ID)
  1551. }
  1552. for _, warning := range buildWarnings {
  1553. job.Errorf("%s\n", warning)
  1554. }
  1555. return engine.StatusOK
  1556. }
  1557. func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
  1558. if len(job.Args) != 1 {
  1559. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  1560. }
  1561. var (
  1562. name = job.Args[0]
  1563. t = 10
  1564. )
  1565. if job.EnvExists("t") {
  1566. t = job.GetenvInt("t")
  1567. }
  1568. if container := srv.daemon.Get(name); container != nil {
  1569. if err := container.Restart(int(t)); err != nil {
  1570. return job.Errorf("Cannot restart container %s: %s\n", name, err)
  1571. }
  1572. srv.LogEvent("restart", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  1573. } else {
  1574. return job.Errorf("No such container: %s\n", name)
  1575. }
  1576. return engine.StatusOK
  1577. }
  1578. func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
  1579. if len(job.Args) != 1 {
  1580. return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
  1581. }
  1582. name := job.Args[0]
  1583. removeVolume := job.GetenvBool("removeVolume")
  1584. removeLink := job.GetenvBool("removeLink")
  1585. forceRemove := job.GetenvBool("forceRemove")
  1586. container := srv.daemon.Get(name)
  1587. if removeLink {
  1588. if container == nil {
  1589. return job.Errorf("No such link: %s", name)
  1590. }
  1591. name, err := daemon.GetFullContainerName(name)
  1592. if err != nil {
  1593. job.Error(err)
  1594. }
  1595. parent, n := path.Split(name)
  1596. if parent == "/" {
  1597. return job.Errorf("Conflict, cannot remove the default name of the container")
  1598. }
  1599. pe := srv.daemon.ContainerGraph().Get(parent)
  1600. if pe == nil {
  1601. return job.Errorf("Cannot get parent %s for name %s", parent, name)
  1602. }
  1603. parentContainer := srv.daemon.Get(pe.ID())
  1604. if parentContainer != nil {
  1605. parentContainer.DisableLink(n)
  1606. }
  1607. if err := srv.daemon.ContainerGraph().Delete(name); err != nil {
  1608. return job.Error(err)
  1609. }
  1610. return engine.StatusOK
  1611. }
  1612. if container != nil {
  1613. if container.State.IsRunning() {
  1614. if forceRemove {
  1615. if err := container.Stop(5); err != nil {
  1616. return job.Errorf("Could not stop running container, cannot remove - %v", err)
  1617. }
  1618. } else {
  1619. return job.Errorf("Impossible to remove a running container, please stop it first or use -f")
  1620. }
  1621. }
  1622. if err := srv.daemon.Destroy(container); err != nil {
  1623. return job.Errorf("Cannot destroy container %s: %s", name, err)
  1624. }
  1625. srv.LogEvent("destroy", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  1626. if removeVolume {
  1627. var (
  1628. volumes = make(map[string]struct{})
  1629. binds = make(map[string]struct{})
  1630. usedVolumes = make(map[string]*daemon.Container)
  1631. )
  1632. // the volume id is always the base of the path
  1633. getVolumeId := func(p string) string {
  1634. return filepath.Base(strings.TrimSuffix(p, "/layer"))
  1635. }
  1636. // populate bind map so that they can be skipped and not removed
  1637. for _, bind := range container.HostConfig().Binds {
  1638. source := strings.Split(bind, ":")[0]
  1639. // TODO: refactor all volume stuff, all of it
  1640. // it is very important that we eval the link or comparing the keys to container.Volumes will not work
  1641. //
  1642. // eval symlink can fail, ref #5244 if we receive an is not exist error we can ignore it
  1643. p, err := filepath.EvalSymlinks(source)
  1644. if err != nil && !os.IsNotExist(err) {
  1645. return job.Error(err)
  1646. }
  1647. if p != "" {
  1648. source = p
  1649. }
  1650. binds[source] = struct{}{}
  1651. }
  1652. // Store all the deleted containers volumes
  1653. for _, volumeId := range container.Volumes {
  1654. // Skip the volumes mounted from external
  1655. // bind mounts here will will be evaluated for a symlink
  1656. if _, exists := binds[volumeId]; exists {
  1657. continue
  1658. }
  1659. volumeId = getVolumeId(volumeId)
  1660. volumes[volumeId] = struct{}{}
  1661. }
  1662. // Retrieve all volumes from all remaining containers
  1663. for _, container := range srv.daemon.List() {
  1664. for _, containerVolumeId := range container.Volumes {
  1665. containerVolumeId = getVolumeId(containerVolumeId)
  1666. usedVolumes[containerVolumeId] = container
  1667. }
  1668. }
  1669. for volumeId := range volumes {
  1670. // If the requested volu
  1671. if c, exists := usedVolumes[volumeId]; exists {
  1672. log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
  1673. continue
  1674. }
  1675. if err := srv.daemon.Volumes().Delete(volumeId); err != nil {
  1676. return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
  1677. }
  1678. }
  1679. }
  1680. } else {
  1681. return job.Errorf("No such container: %s", name)
  1682. }
  1683. return engine.StatusOK
  1684. }
  1685. func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, noprune bool) error {
  1686. var (
  1687. repoName, tag string
  1688. tags = []string{}
  1689. tagDeleted bool
  1690. )
  1691. repoName, tag = utils.ParseRepositoryTag(name)
  1692. if tag == "" {
  1693. tag = graph.DEFAULTTAG
  1694. }
  1695. img, err := srv.daemon.Repositories().LookupImage(name)
  1696. if err != nil {
  1697. if r, _ := srv.daemon.Repositories().Get(repoName); r != nil {
  1698. return fmt.Errorf("No such image: %s:%s", repoName, tag)
  1699. }
  1700. return fmt.Errorf("No such image: %s", name)
  1701. }
  1702. if strings.Contains(img.ID, name) {
  1703. repoName = ""
  1704. tag = ""
  1705. }
  1706. byParents, err := srv.daemon.Graph().ByParent()
  1707. if err != nil {
  1708. return err
  1709. }
  1710. //If delete by id, see if the id belong only to one repository
  1711. if repoName == "" {
  1712. for _, repoAndTag := range srv.daemon.Repositories().ByID()[img.ID] {
  1713. parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
  1714. if repoName == "" || repoName == parsedRepo {
  1715. repoName = parsedRepo
  1716. if parsedTag != "" {
  1717. tags = append(tags, parsedTag)
  1718. }
  1719. } else if repoName != parsedRepo && !force {
  1720. // the id belongs to multiple repos, like base:latest and user:test,
  1721. // in that case return conflict
  1722. return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name)
  1723. }
  1724. }
  1725. } else {
  1726. tags = append(tags, tag)
  1727. }
  1728. if !first && len(tags) > 0 {
  1729. return nil
  1730. }
  1731. //Untag the current image
  1732. for _, tag := range tags {
  1733. tagDeleted, err = srv.daemon.Repositories().Delete(repoName, tag)
  1734. if err != nil {
  1735. return err
  1736. }
  1737. if tagDeleted {
  1738. out := &engine.Env{}
  1739. out.Set("Untagged", repoName+":"+tag)
  1740. imgs.Add(out)
  1741. srv.LogEvent("untag", img.ID, "")
  1742. }
  1743. }
  1744. tags = srv.daemon.Repositories().ByID()[img.ID]
  1745. if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
  1746. if len(byParents[img.ID]) == 0 {
  1747. if err := srv.canDeleteImage(img.ID, force, tagDeleted); err != nil {
  1748. return err
  1749. }
  1750. if err := srv.daemon.Repositories().DeleteAll(img.ID); err != nil {
  1751. return err
  1752. }
  1753. if err := srv.daemon.Graph().Delete(img.ID); err != nil {
  1754. return err
  1755. }
  1756. out := &engine.Env{}
  1757. out.Set("Deleted", img.ID)
  1758. imgs.Add(out)
  1759. srv.LogEvent("delete", img.ID, "")
  1760. if img.Parent != "" && !noprune {
  1761. err := srv.DeleteImage(img.Parent, imgs, false, force, noprune)
  1762. if first {
  1763. return err
  1764. }
  1765. }
  1766. }
  1767. }
  1768. return nil
  1769. }
  1770. func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
  1771. if n := len(job.Args); n != 1 {
  1772. return job.Errorf("Usage: %s IMAGE", job.Name)
  1773. }
  1774. imgs := engine.NewTable("", 0)
  1775. if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil {
  1776. return job.Error(err)
  1777. }
  1778. if len(imgs.Data) == 0 {
  1779. return job.Errorf("Conflict, %s wasn't deleted", job.Args[0])
  1780. }
  1781. if _, err := imgs.WriteListTo(job.Stdout); err != nil {
  1782. return job.Error(err)
  1783. }
  1784. return engine.StatusOK
  1785. }
  1786. func (srv *Server) canDeleteImage(imgID string, force, untagged bool) error {
  1787. var message string
  1788. if untagged {
  1789. message = " (docker untagged the image)"
  1790. }
  1791. for _, container := range srv.daemon.List() {
  1792. parent, err := srv.daemon.Repositories().LookupImage(container.Image)
  1793. if err != nil {
  1794. return err
  1795. }
  1796. if err := parent.WalkHistory(func(p *image.Image) error {
  1797. if imgID == p.ID {
  1798. if container.State.IsRunning() {
  1799. if force {
  1800. return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it%s, stop it and retry", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
  1801. }
  1802. return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it%s, stop it and use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
  1803. } else if !force {
  1804. return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it%s, use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
  1805. }
  1806. }
  1807. return nil
  1808. }); err != nil {
  1809. return err
  1810. }
  1811. }
  1812. return nil
  1813. }
  1814. func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
  1815. // Retrieve all images
  1816. images, err := srv.daemon.Graph().Map()
  1817. if err != nil {
  1818. return nil, err
  1819. }
  1820. // Store the tree in a map of map (map[parentId][childId])
  1821. imageMap := make(map[string]map[string]struct{})
  1822. for _, img := range images {
  1823. if _, exists := imageMap[img.Parent]; !exists {
  1824. imageMap[img.Parent] = make(map[string]struct{})
  1825. }
  1826. imageMap[img.Parent][img.ID] = struct{}{}
  1827. }
  1828. // Loop on the children of the given image and check the config
  1829. var match *image.Image
  1830. for elem := range imageMap[imgID] {
  1831. img, err := srv.daemon.Graph().Get(elem)
  1832. if err != nil {
  1833. return nil, err
  1834. }
  1835. if runconfig.Compare(&img.ContainerConfig, config) {
  1836. if match == nil || match.Created.Before(img.Created) {
  1837. match = img
  1838. }
  1839. }
  1840. }
  1841. return match, nil
  1842. }
  1843. func (srv *Server) setHostConfig(container *daemon.Container, hostConfig *runconfig.HostConfig) error {
  1844. // Validate the HostConfig binds. Make sure that:
  1845. // the source exists
  1846. for _, bind := range hostConfig.Binds {
  1847. splitBind := strings.Split(bind, ":")
  1848. source := splitBind[0]
  1849. // ensure the source exists on the host
  1850. _, err := os.Stat(source)
  1851. if err != nil && os.IsNotExist(err) {
  1852. err = os.MkdirAll(source, 0755)
  1853. if err != nil {
  1854. return fmt.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error())
  1855. }
  1856. }
  1857. }
  1858. // Register any links from the host config before starting the container
  1859. if err := srv.daemon.RegisterLinks(container, hostConfig); err != nil {
  1860. return err
  1861. }
  1862. container.SetHostConfig(hostConfig)
  1863. container.ToDisk()
  1864. return nil
  1865. }
  1866. func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
  1867. if len(job.Args) < 1 {
  1868. return job.Errorf("Usage: %s container_id", job.Name)
  1869. }
  1870. var (
  1871. name = job.Args[0]
  1872. daemon = srv.daemon
  1873. container = daemon.Get(name)
  1874. )
  1875. if container == nil {
  1876. return job.Errorf("No such container: %s", name)
  1877. }
  1878. if container.State.IsRunning() {
  1879. return job.Errorf("Container already started")
  1880. }
  1881. // If no environment was set, then no hostconfig was passed.
  1882. if len(job.Environ()) > 0 {
  1883. hostConfig := runconfig.ContainerHostConfigFromJob(job)
  1884. if err := srv.setHostConfig(container, hostConfig); err != nil {
  1885. return job.Error(err)
  1886. }
  1887. }
  1888. if err := container.Start(); err != nil {
  1889. return job.Errorf("Cannot start container %s: %s", name, err)
  1890. }
  1891. srv.LogEvent("start", container.ID, daemon.Repositories().ImageName(container.Image))
  1892. return engine.StatusOK
  1893. }
  1894. func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
  1895. if len(job.Args) != 1 {
  1896. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  1897. }
  1898. var (
  1899. name = job.Args[0]
  1900. t = 10
  1901. )
  1902. if job.EnvExists("t") {
  1903. t = job.GetenvInt("t")
  1904. }
  1905. if container := srv.daemon.Get(name); container != nil {
  1906. if !container.State.IsRunning() {
  1907. return job.Errorf("Container already stopped")
  1908. }
  1909. if err := container.Stop(int(t)); err != nil {
  1910. return job.Errorf("Cannot stop container %s: %s\n", name, err)
  1911. }
  1912. srv.LogEvent("stop", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  1913. } else {
  1914. return job.Errorf("No such container: %s\n", name)
  1915. }
  1916. return engine.StatusOK
  1917. }
  1918. func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
  1919. if len(job.Args) != 1 {
  1920. return job.Errorf("Usage: %s", job.Name)
  1921. }
  1922. name := job.Args[0]
  1923. if container := srv.daemon.Get(name); container != nil {
  1924. status, _ := container.State.WaitStop(-1 * time.Second)
  1925. job.Printf("%d\n", status)
  1926. return engine.StatusOK
  1927. }
  1928. return job.Errorf("%s: no such container: %s", job.Name, name)
  1929. }
  1930. func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
  1931. if len(job.Args) != 3 {
  1932. return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
  1933. }
  1934. name := job.Args[0]
  1935. height, err := strconv.Atoi(job.Args[1])
  1936. if err != nil {
  1937. return job.Error(err)
  1938. }
  1939. width, err := strconv.Atoi(job.Args[2])
  1940. if err != nil {
  1941. return job.Error(err)
  1942. }
  1943. if container := srv.daemon.Get(name); container != nil {
  1944. if err := container.Resize(height, width); err != nil {
  1945. return job.Error(err)
  1946. }
  1947. return engine.StatusOK
  1948. }
  1949. return job.Errorf("No such container: %s", name)
  1950. }
  1951. func (srv *Server) ContainerLogs(job *engine.Job) engine.Status {
  1952. if len(job.Args) != 1 {
  1953. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  1954. }
  1955. var (
  1956. name = job.Args[0]
  1957. stdout = job.GetenvBool("stdout")
  1958. stderr = job.GetenvBool("stderr")
  1959. tail = job.Getenv("tail")
  1960. follow = job.GetenvBool("follow")
  1961. times = job.GetenvBool("timestamps")
  1962. lines = -1
  1963. format string
  1964. )
  1965. if !(stdout || stderr) {
  1966. return job.Errorf("You must choose at least one stream")
  1967. }
  1968. if times {
  1969. format = time.StampMilli
  1970. }
  1971. if tail == "" {
  1972. tail = "all"
  1973. }
  1974. container := srv.daemon.Get(name)
  1975. if container == nil {
  1976. return job.Errorf("No such container: %s", name)
  1977. }
  1978. cLog, err := container.ReadLog("json")
  1979. if err != nil && os.IsNotExist(err) {
  1980. // Legacy logs
  1981. utils.Debugf("Old logs format")
  1982. if stdout {
  1983. cLog, err := container.ReadLog("stdout")
  1984. if err != nil {
  1985. utils.Errorf("Error reading logs (stdout): %s", err)
  1986. } else if _, err := io.Copy(job.Stdout, cLog); err != nil {
  1987. utils.Errorf("Error streaming logs (stdout): %s", err)
  1988. }
  1989. }
  1990. if stderr {
  1991. cLog, err := container.ReadLog("stderr")
  1992. if err != nil {
  1993. utils.Errorf("Error reading logs (stderr): %s", err)
  1994. } else if _, err := io.Copy(job.Stderr, cLog); err != nil {
  1995. utils.Errorf("Error streaming logs (stderr): %s", err)
  1996. }
  1997. }
  1998. } else if err != nil {
  1999. utils.Errorf("Error reading logs (json): %s", err)
  2000. } else {
  2001. if tail != "all" {
  2002. var err error
  2003. lines, err = strconv.Atoi(tail)
  2004. if err != nil {
  2005. utils.Errorf("Failed to parse tail %s, error: %v, show all logs", err)
  2006. lines = -1
  2007. }
  2008. }
  2009. if lines != 0 {
  2010. if lines > 0 {
  2011. f := cLog.(*os.File)
  2012. ls, err := tailfile.TailFile(f, lines)
  2013. if err != nil {
  2014. return job.Error(err)
  2015. }
  2016. tmp := bytes.NewBuffer([]byte{})
  2017. for _, l := range ls {
  2018. fmt.Fprintf(tmp, "%s\n", l)
  2019. }
  2020. cLog = tmp
  2021. }
  2022. dec := json.NewDecoder(cLog)
  2023. for {
  2024. l := &utils.JSONLog{}
  2025. if err := dec.Decode(l); err == io.EOF {
  2026. break
  2027. } else if err != nil {
  2028. utils.Errorf("Error streaming logs: %s", err)
  2029. break
  2030. }
  2031. logLine := l.Log
  2032. if times {
  2033. logLine = fmt.Sprintf("[%s] %s", l.Created.Format(format), logLine)
  2034. }
  2035. if l.Stream == "stdout" && stdout {
  2036. fmt.Fprintf(job.Stdout, "%s", logLine)
  2037. }
  2038. if l.Stream == "stderr" && stderr {
  2039. fmt.Fprintf(job.Stderr, "%s", logLine)
  2040. }
  2041. }
  2042. }
  2043. }
  2044. if follow {
  2045. errors := make(chan error, 2)
  2046. if stdout {
  2047. stdoutPipe := container.StdoutLogPipe()
  2048. go func() {
  2049. errors <- utils.WriteLog(stdoutPipe, job.Stdout, format)
  2050. }()
  2051. }
  2052. if stderr {
  2053. stderrPipe := container.StderrLogPipe()
  2054. go func() {
  2055. errors <- utils.WriteLog(stderrPipe, job.Stderr, format)
  2056. }()
  2057. }
  2058. err := <-errors
  2059. if err != nil {
  2060. utils.Errorf("%s", err)
  2061. }
  2062. }
  2063. return engine.StatusOK
  2064. }
  2065. func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
  2066. if len(job.Args) != 1 {
  2067. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  2068. }
  2069. var (
  2070. name = job.Args[0]
  2071. logs = job.GetenvBool("logs")
  2072. stream = job.GetenvBool("stream")
  2073. stdin = job.GetenvBool("stdin")
  2074. stdout = job.GetenvBool("stdout")
  2075. stderr = job.GetenvBool("stderr")
  2076. )
  2077. container := srv.daemon.Get(name)
  2078. if container == nil {
  2079. return job.Errorf("No such container: %s", name)
  2080. }
  2081. //logs
  2082. if logs {
  2083. cLog, err := container.ReadLog("json")
  2084. if err != nil && os.IsNotExist(err) {
  2085. // Legacy logs
  2086. utils.Debugf("Old logs format")
  2087. if stdout {
  2088. cLog, err := container.ReadLog("stdout")
  2089. if err != nil {
  2090. utils.Errorf("Error reading logs (stdout): %s", err)
  2091. } else if _, err := io.Copy(job.Stdout, cLog); err != nil {
  2092. utils.Errorf("Error streaming logs (stdout): %s", err)
  2093. }
  2094. }
  2095. if stderr {
  2096. cLog, err := container.ReadLog("stderr")
  2097. if err != nil {
  2098. utils.Errorf("Error reading logs (stderr): %s", err)
  2099. } else if _, err := io.Copy(job.Stderr, cLog); err != nil {
  2100. utils.Errorf("Error streaming logs (stderr): %s", err)
  2101. }
  2102. }
  2103. } else if err != nil {
  2104. utils.Errorf("Error reading logs (json): %s", err)
  2105. } else {
  2106. dec := json.NewDecoder(cLog)
  2107. for {
  2108. l := &utils.JSONLog{}
  2109. if err := dec.Decode(l); err == io.EOF {
  2110. break
  2111. } else if err != nil {
  2112. utils.Errorf("Error streaming logs: %s", err)
  2113. break
  2114. }
  2115. if l.Stream == "stdout" && stdout {
  2116. fmt.Fprintf(job.Stdout, "%s", l.Log)
  2117. }
  2118. if l.Stream == "stderr" && stderr {
  2119. fmt.Fprintf(job.Stderr, "%s", l.Log)
  2120. }
  2121. }
  2122. }
  2123. }
  2124. //stream
  2125. if stream {
  2126. var (
  2127. cStdin io.ReadCloser
  2128. cStdout, cStderr io.Writer
  2129. cStdinCloser io.Closer
  2130. )
  2131. if stdin {
  2132. r, w := io.Pipe()
  2133. go func() {
  2134. defer w.Close()
  2135. defer utils.Debugf("Closing buffered stdin pipe")
  2136. io.Copy(w, job.Stdin)
  2137. }()
  2138. cStdin = r
  2139. cStdinCloser = job.Stdin
  2140. }
  2141. if stdout {
  2142. cStdout = job.Stdout
  2143. }
  2144. if stderr {
  2145. cStderr = job.Stderr
  2146. }
  2147. <-srv.daemon.Attach(container, cStdin, cStdinCloser, cStdout, cStderr)
  2148. // If we are in stdinonce mode, wait for the process to end
  2149. // otherwise, simply return
  2150. if container.Config.StdinOnce && !container.Config.Tty {
  2151. container.State.WaitStop(-1 * time.Second)
  2152. }
  2153. }
  2154. return engine.StatusOK
  2155. }
  2156. func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
  2157. if len(job.Args) != 2 {
  2158. return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
  2159. }
  2160. var (
  2161. name = job.Args[0]
  2162. resource = job.Args[1]
  2163. )
  2164. if container := srv.daemon.Get(name); container != nil {
  2165. data, err := container.Copy(resource)
  2166. if err != nil {
  2167. return job.Error(err)
  2168. }
  2169. defer data.Close()
  2170. if _, err := io.Copy(job.Stdout, data); err != nil {
  2171. return job.Error(err)
  2172. }
  2173. return engine.StatusOK
  2174. }
  2175. return job.Errorf("No such container: %s", name)
  2176. }
  2177. func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
  2178. daemon, err := daemon.NewDaemon(config, eng)
  2179. if err != nil {
  2180. return nil, err
  2181. }
  2182. srv := &Server{
  2183. Eng: eng,
  2184. daemon: daemon,
  2185. pullingPool: make(map[string]chan struct{}),
  2186. pushingPool: make(map[string]chan struct{}),
  2187. events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
  2188. eventPublisher: utils.NewJSONMessagePublisher(),
  2189. }
  2190. daemon.SetServer(srv)
  2191. return srv, nil
  2192. }
  2193. func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
  2194. now := time.Now().UTC().Unix()
  2195. jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
  2196. srv.AddEvent(jm)
  2197. srv.eventPublisher.Publish(jm)
  2198. return &jm
  2199. }
  2200. func (srv *Server) AddEvent(jm utils.JSONMessage) {
  2201. srv.Lock()
  2202. defer srv.Unlock()
  2203. srv.events = append(srv.events, jm)
  2204. }
  2205. func (srv *Server) GetEvents() []utils.JSONMessage {
  2206. srv.RLock()
  2207. defer srv.RUnlock()
  2208. return srv.events
  2209. }
  2210. func (srv *Server) SetRunning(status bool) {
  2211. srv.Lock()
  2212. defer srv.Unlock()
  2213. srv.running = status
  2214. }
  2215. func (srv *Server) IsRunning() bool {
  2216. srv.RLock()
  2217. defer srv.RUnlock()
  2218. return srv.running
  2219. }
  2220. func (srv *Server) Close() error {
  2221. if srv == nil {
  2222. return nil
  2223. }
  2224. srv.SetRunning(false)
  2225. done := make(chan struct{})
  2226. go func() {
  2227. srv.tasks.Wait()
  2228. close(done)
  2229. }()
  2230. select {
  2231. // Waiting server jobs for 15 seconds, shutdown immediately after that time
  2232. case <-time.After(time.Second * 15):
  2233. case <-done:
  2234. }
  2235. if srv.daemon == nil {
  2236. return nil
  2237. }
  2238. return srv.daemon.Close()
  2239. }
  2240. type Server struct {
  2241. sync.RWMutex
  2242. daemon *daemon.Daemon
  2243. pullingPool map[string]chan struct{}
  2244. pushingPool map[string]chan struct{}
  2245. events []utils.JSONMessage
  2246. eventPublisher *utils.JSONMessagePublisher
  2247. Eng *engine.Engine
  2248. running bool
  2249. tasks sync.WaitGroup
  2250. }