server.go 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527
  1. // DEPRECATION NOTICE. PLEASE DO NOT ADD ANYTHING TO THIS FILE.
  2. //
  3. // server/server.go is deprecated. We are working on breaking it up into smaller, cleaner
  4. // pieces which will be easier to find and test. This will help make the code less
  5. // redundant and more readable.
  6. //
  7. // Contributors, please don't add anything to server/server.go, unless it has the explicit
  8. // goal of helping the deprecation effort.
  9. //
  10. // Maintainers, please refuse patches which add code to server/server.go.
  11. //
  12. // Instead try the following files:
  13. // * For code related to local image management, try graph/
  14. // * For code related to image downloading, uploading, remote search etc, try registry/
  15. // * For code related to the docker daemon, try daemon/
  16. // * For small utilities which could potentially be useful outside of Docker, try pkg/
  17. // * For miscalleneous "util" functions which are docker-specific, try encapsulating them
  18. // inside one of the subsystems above. If you really think they should be more widely
  19. // available, are you sure you can't remove the docker dependencies and move them to
  20. // pkg? In last resort, you can add them to utils/ (but please try not to).
  21. package server
  22. import (
  23. "encoding/json"
  24. "fmt"
  25. "io"
  26. "io/ioutil"
  27. "log"
  28. "net"
  29. "net/http"
  30. "net/url"
  31. "os"
  32. "os/exec"
  33. gosignal "os/signal"
  34. "path"
  35. "path/filepath"
  36. "runtime"
  37. "strconv"
  38. "strings"
  39. "sync"
  40. "sync/atomic"
  41. "syscall"
  42. "time"
  43. "github.com/dotcloud/docker/archive"
  44. "github.com/dotcloud/docker/daemon"
  45. "github.com/dotcloud/docker/daemonconfig"
  46. "github.com/dotcloud/docker/dockerversion"
  47. "github.com/dotcloud/docker/engine"
  48. "github.com/dotcloud/docker/graph"
  49. "github.com/dotcloud/docker/image"
  50. "github.com/dotcloud/docker/pkg/graphdb"
  51. "github.com/dotcloud/docker/pkg/signal"
  52. "github.com/dotcloud/docker/registry"
  53. "github.com/dotcloud/docker/runconfig"
  54. "github.com/dotcloud/docker/utils"
  55. "github.com/dotcloud/docker/utils/filters"
  56. )
  57. func (srv *Server) handlerWrap(h engine.Handler) engine.Handler {
  58. return func(job *engine.Job) engine.Status {
  59. if !srv.IsRunning() {
  60. return job.Errorf("Server is not running")
  61. }
  62. srv.tasks.Add(1)
  63. defer srv.tasks.Done()
  64. return h(job)
  65. }
  66. }
  67. // jobInitApi runs the remote api server `srv` as a daemon,
  68. // Only one api server can run at the same time - this is enforced by a pidfile.
  69. // The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
  70. func InitServer(job *engine.Job) engine.Status {
  71. job.Logf("Creating server")
  72. srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job))
  73. if err != nil {
  74. return job.Error(err)
  75. }
  76. if srv.daemon.Config().Pidfile != "" {
  77. job.Logf("Creating pidfile")
  78. if err := utils.CreatePidFile(srv.daemon.Config().Pidfile); err != nil {
  79. // FIXME: do we need fatal here instead of returning a job error?
  80. log.Fatal(err)
  81. }
  82. }
  83. job.Logf("Setting up signal traps")
  84. c := make(chan os.Signal, 1)
  85. gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
  86. go func() {
  87. interruptCount := uint32(0)
  88. for sig := range c {
  89. go func(sig os.Signal) {
  90. log.Printf("Received signal '%v', starting shutdown of docker...\n", sig)
  91. switch sig {
  92. case os.Interrupt, syscall.SIGTERM:
  93. // If the user really wants to interrupt, let him do so.
  94. if atomic.LoadUint32(&interruptCount) < 3 {
  95. atomic.AddUint32(&interruptCount, 1)
  96. // Initiate the cleanup only once
  97. if atomic.LoadUint32(&interruptCount) == 1 {
  98. utils.RemovePidFile(srv.daemon.Config().Pidfile)
  99. srv.Close()
  100. } else {
  101. return
  102. }
  103. } else {
  104. log.Printf("Force shutdown of docker, interrupting cleanup\n")
  105. }
  106. case syscall.SIGQUIT:
  107. }
  108. os.Exit(128 + int(sig.(syscall.Signal)))
  109. }(sig)
  110. }
  111. }()
  112. job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
  113. job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
  114. // FIXME: 'insert' is deprecated and should be removed in a future version.
  115. for name, handler := range map[string]engine.Handler{
  116. "export": srv.ContainerExport,
  117. "create": srv.ContainerCreate,
  118. "stop": srv.ContainerStop,
  119. "restart": srv.ContainerRestart,
  120. "start": srv.ContainerStart,
  121. "kill": srv.ContainerKill,
  122. "pause": srv.ContainerPause,
  123. "unpause": srv.ContainerUnpause,
  124. "wait": srv.ContainerWait,
  125. "tag": srv.ImageTag, // FIXME merge with "image_tag"
  126. "resize": srv.ContainerResize,
  127. "commit": srv.ContainerCommit,
  128. "info": srv.DockerInfo,
  129. "container_delete": srv.ContainerDestroy,
  130. "image_export": srv.ImageExport,
  131. "images": srv.Images,
  132. "history": srv.ImageHistory,
  133. "viz": srv.ImagesViz,
  134. "container_copy": srv.ContainerCopy,
  135. "insert": srv.ImageInsert,
  136. "attach": srv.ContainerAttach,
  137. "logs": srv.ContainerLogs,
  138. "changes": srv.ContainerChanges,
  139. "top": srv.ContainerTop,
  140. "load": srv.ImageLoad,
  141. "build": srv.Build,
  142. "pull": srv.ImagePull,
  143. "import": srv.ImageImport,
  144. "image_delete": srv.ImageDelete,
  145. "events": srv.Events,
  146. "push": srv.ImagePush,
  147. "containers": srv.Containers,
  148. } {
  149. if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil {
  150. return job.Error(err)
  151. }
  152. }
  153. // Install image-related commands from the image subsystem.
  154. // See `graph/service.go`
  155. if err := srv.daemon.Repositories().Install(job.Eng); err != nil {
  156. return job.Error(err)
  157. }
  158. // Install daemon-related commands from the daemon subsystem.
  159. // See `daemon/`
  160. if err := srv.daemon.Install(job.Eng); err != nil {
  161. return job.Error(err)
  162. }
  163. srv.SetRunning(true)
  164. return engine.StatusOK
  165. }
  166. func (srv *Server) ContainerPause(job *engine.Job) engine.Status {
  167. if n := len(job.Args); n < 1 || n > 2 {
  168. return job.Errorf("Usage: %s CONTAINER", job.Name)
  169. }
  170. var (
  171. name = job.Args[0]
  172. )
  173. if container := srv.daemon.Get(name); container != nil {
  174. if err := container.Pause(); err != nil {
  175. return job.Errorf("Cannot pause container %s: %s", name, err)
  176. }
  177. } else {
  178. return job.Errorf("No such container: %s", name)
  179. }
  180. return engine.StatusOK
  181. }
  182. func (srv *Server) ContainerUnpause(job *engine.Job) engine.Status {
  183. if n := len(job.Args); n < 1 || n > 2 {
  184. return job.Errorf("Usage: %s CONTAINER", job.Name)
  185. }
  186. var (
  187. name = job.Args[0]
  188. )
  189. if container := srv.daemon.Get(name); container != nil {
  190. if err := container.Unpause(); err != nil {
  191. return job.Errorf("Cannot unpause container %s: %s", name, err)
  192. }
  193. } else {
  194. return job.Errorf("No such container: %s", name)
  195. }
  196. return engine.StatusOK
  197. }
  198. // ContainerKill send signal to the container
  199. // If no signal is given (sig 0), then Kill with SIGKILL and wait
  200. // for the container to exit.
  201. // If a signal is given, then just send it to the container and return.
  202. func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
  203. if n := len(job.Args); n < 1 || n > 2 {
  204. return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
  205. }
  206. var (
  207. name = job.Args[0]
  208. sig uint64
  209. err error
  210. )
  211. // If we have a signal, look at it. Otherwise, do nothing
  212. if len(job.Args) == 2 && job.Args[1] != "" {
  213. // Check if we passed the signal as a number:
  214. // The largest legal signal is 31, so let's parse on 5 bits
  215. sig, err = strconv.ParseUint(job.Args[1], 10, 5)
  216. if err != nil {
  217. // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL")
  218. sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")])
  219. if sig == 0 {
  220. return job.Errorf("Invalid signal: %s", job.Args[1])
  221. }
  222. }
  223. }
  224. if container := srv.daemon.Get(name); container != nil {
  225. // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
  226. if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
  227. if err := container.Kill(); err != nil {
  228. return job.Errorf("Cannot kill container %s: %s", name, err)
  229. }
  230. srv.LogEvent("kill", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  231. } else {
  232. // Otherwise, just send the requested signal
  233. if err := container.KillSig(int(sig)); err != nil {
  234. return job.Errorf("Cannot kill container %s: %s", name, err)
  235. }
  236. // FIXME: Add event for signals
  237. }
  238. } else {
  239. return job.Errorf("No such container: %s", name)
  240. }
  241. return engine.StatusOK
  242. }
  243. func (srv *Server) EvictListener(from int64) {
  244. srv.Lock()
  245. if old, ok := srv.listeners[from]; ok {
  246. delete(srv.listeners, from)
  247. close(old)
  248. }
  249. srv.Unlock()
  250. }
  251. func (srv *Server) Events(job *engine.Job) engine.Status {
  252. if len(job.Args) != 0 {
  253. return job.Errorf("Usage: %s", job.Name)
  254. }
  255. var (
  256. from = time.Now().UTC().UnixNano()
  257. since = job.GetenvInt64("since")
  258. until = job.GetenvInt64("until")
  259. timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now()))
  260. )
  261. sendEvent := func(event *utils.JSONMessage) error {
  262. b, err := json.Marshal(event)
  263. if err != nil {
  264. return fmt.Errorf("JSON error")
  265. }
  266. _, err = job.Stdout.Write(b)
  267. return err
  268. }
  269. listener := make(chan utils.JSONMessage)
  270. srv.Lock()
  271. if old, ok := srv.listeners[from]; ok {
  272. delete(srv.listeners, from)
  273. close(old)
  274. }
  275. srv.listeners[from] = listener
  276. srv.Unlock()
  277. job.Stdout.Write(nil) // flush
  278. if since != 0 {
  279. // If since, send previous events that happened after the timestamp and until timestamp
  280. for _, event := range srv.GetEvents() {
  281. if event.Time >= since && (event.Time <= until || until == 0) {
  282. err := sendEvent(&event)
  283. if err != nil && err.Error() == "JSON error" {
  284. continue
  285. }
  286. if err != nil {
  287. // On error, evict the listener
  288. srv.EvictListener(from)
  289. return job.Error(err)
  290. }
  291. }
  292. }
  293. }
  294. // If no until, disable timeout
  295. if until == 0 {
  296. timeout.Stop()
  297. }
  298. for {
  299. select {
  300. case event, ok := <-listener:
  301. if !ok { // Channel is closed: listener was evicted
  302. return engine.StatusOK
  303. }
  304. err := sendEvent(&event)
  305. if err != nil && err.Error() == "JSON error" {
  306. continue
  307. }
  308. if err != nil {
  309. // On error, evict the listener
  310. srv.EvictListener(from)
  311. return job.Error(err)
  312. }
  313. case <-timeout.C:
  314. return engine.StatusOK
  315. }
  316. }
  317. return engine.StatusOK
  318. }
  319. func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
  320. if len(job.Args) != 1 {
  321. return job.Errorf("Usage: %s container_id", job.Name)
  322. }
  323. name := job.Args[0]
  324. if container := srv.daemon.Get(name); container != nil {
  325. data, err := container.Export()
  326. if err != nil {
  327. return job.Errorf("%s: %s", name, err)
  328. }
  329. defer data.Close()
  330. // Stream the entire contents of the container (basically a volatile snapshot)
  331. if _, err := io.Copy(job.Stdout, data); err != nil {
  332. return job.Errorf("%s: %s", name, err)
  333. }
  334. // FIXME: factor job-specific LogEvent to engine.Job.Run()
  335. srv.LogEvent("export", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  336. return engine.StatusOK
  337. }
  338. return job.Errorf("No such container: %s", name)
  339. }
  340. // ImageExport exports all images with the given tag. All versions
  341. // containing the same tag are exported. The resulting output is an
  342. // uncompressed tar ball.
  343. // name is the set of tags to export.
  344. // out is the writer where the images are written to.
  345. func (srv *Server) ImageExport(job *engine.Job) engine.Status {
  346. if len(job.Args) != 1 {
  347. return job.Errorf("Usage: %s IMAGE\n", job.Name)
  348. }
  349. name := job.Args[0]
  350. // get image json
  351. tempdir, err := ioutil.TempDir("", "docker-export-")
  352. if err != nil {
  353. return job.Error(err)
  354. }
  355. defer os.RemoveAll(tempdir)
  356. utils.Debugf("Serializing %s", name)
  357. rootRepo, err := srv.daemon.Repositories().Get(name)
  358. if err != nil {
  359. return job.Error(err)
  360. }
  361. if rootRepo != nil {
  362. for _, id := range rootRepo {
  363. if err := srv.exportImage(job.Eng, id, tempdir); err != nil {
  364. return job.Error(err)
  365. }
  366. }
  367. // write repositories
  368. rootRepoMap := map[string]graph.Repository{}
  369. rootRepoMap[name] = rootRepo
  370. rootRepoJson, _ := json.Marshal(rootRepoMap)
  371. if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil {
  372. return job.Error(err)
  373. }
  374. } else {
  375. if err := srv.exportImage(job.Eng, name, tempdir); err != nil {
  376. return job.Error(err)
  377. }
  378. }
  379. fs, err := archive.Tar(tempdir, archive.Uncompressed)
  380. if err != nil {
  381. return job.Error(err)
  382. }
  383. defer fs.Close()
  384. if _, err := io.Copy(job.Stdout, fs); err != nil {
  385. return job.Error(err)
  386. }
  387. utils.Debugf("End Serializing %s", name)
  388. return engine.StatusOK
  389. }
  390. func (srv *Server) exportImage(eng *engine.Engine, name, tempdir string) error {
  391. for n := name; n != ""; {
  392. // temporary directory
  393. tmpImageDir := path.Join(tempdir, n)
  394. if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil {
  395. if os.IsExist(err) {
  396. return nil
  397. }
  398. return err
  399. }
  400. var version = "1.0"
  401. var versionBuf = []byte(version)
  402. if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil {
  403. return err
  404. }
  405. // serialize json
  406. json, err := os.Create(path.Join(tmpImageDir, "json"))
  407. if err != nil {
  408. return err
  409. }
  410. job := eng.Job("image_inspect", n)
  411. job.Stdout.Add(json)
  412. if err := job.Run(); err != nil {
  413. return err
  414. }
  415. // serialize filesystem
  416. fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar"))
  417. if err != nil {
  418. return err
  419. }
  420. job = eng.Job("image_tarlayer", n)
  421. job.Stdout.Add(fsTar)
  422. if err := job.Run(); err != nil {
  423. return err
  424. }
  425. // find parent
  426. job = eng.Job("image_get", n)
  427. info, _ := job.Stdout.AddEnv()
  428. if err := job.Run(); err != nil {
  429. return err
  430. }
  431. n = info.Get("Parent")
  432. }
  433. return nil
  434. }
  435. func (srv *Server) Build(job *engine.Job) engine.Status {
  436. if len(job.Args) != 0 {
  437. return job.Errorf("Usage: %s\n", job.Name)
  438. }
  439. var (
  440. remoteURL = job.Getenv("remote")
  441. repoName = job.Getenv("t")
  442. suppressOutput = job.GetenvBool("q")
  443. noCache = job.GetenvBool("nocache")
  444. rm = job.GetenvBool("rm")
  445. forceRm = job.GetenvBool("forcerm")
  446. authConfig = &registry.AuthConfig{}
  447. configFile = &registry.ConfigFile{}
  448. tag string
  449. context io.ReadCloser
  450. )
  451. job.GetenvJson("authConfig", authConfig)
  452. job.GetenvJson("configFile", configFile)
  453. repoName, tag = utils.ParseRepositoryTag(repoName)
  454. if remoteURL == "" {
  455. context = ioutil.NopCloser(job.Stdin)
  456. } else if utils.IsGIT(remoteURL) {
  457. if !strings.HasPrefix(remoteURL, "git://") {
  458. remoteURL = "https://" + remoteURL
  459. }
  460. root, err := ioutil.TempDir("", "docker-build-git")
  461. if err != nil {
  462. return job.Error(err)
  463. }
  464. defer os.RemoveAll(root)
  465. if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
  466. return job.Errorf("Error trying to use git: %s (%s)", err, output)
  467. }
  468. c, err := archive.Tar(root, archive.Uncompressed)
  469. if err != nil {
  470. return job.Error(err)
  471. }
  472. context = c
  473. } else if utils.IsURL(remoteURL) {
  474. f, err := utils.Download(remoteURL)
  475. if err != nil {
  476. return job.Error(err)
  477. }
  478. defer f.Body.Close()
  479. dockerFile, err := ioutil.ReadAll(f.Body)
  480. if err != nil {
  481. return job.Error(err)
  482. }
  483. c, err := archive.Generate("Dockerfile", string(dockerFile))
  484. if err != nil {
  485. return job.Error(err)
  486. }
  487. context = c
  488. }
  489. defer context.Close()
  490. sf := utils.NewStreamFormatter(job.GetenvBool("json"))
  491. b := NewBuildFile(srv,
  492. &utils.StdoutFormater{
  493. Writer: job.Stdout,
  494. StreamFormatter: sf,
  495. },
  496. &utils.StderrFormater{
  497. Writer: job.Stdout,
  498. StreamFormatter: sf,
  499. },
  500. !suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile)
  501. id, err := b.Build(context)
  502. if err != nil {
  503. return job.Error(err)
  504. }
  505. if repoName != "" {
  506. srv.daemon.Repositories().Set(repoName, tag, id, false)
  507. }
  508. return engine.StatusOK
  509. }
  510. // Loads a set of images into the repository. This is the complementary of ImageExport.
  511. // The input stream is an uncompressed tar ball containing images and metadata.
  512. func (srv *Server) ImageLoad(job *engine.Job) engine.Status {
  513. tmpImageDir, err := ioutil.TempDir("", "docker-import-")
  514. if err != nil {
  515. return job.Error(err)
  516. }
  517. defer os.RemoveAll(tmpImageDir)
  518. var (
  519. repoTarFile = path.Join(tmpImageDir, "repo.tar")
  520. repoDir = path.Join(tmpImageDir, "repo")
  521. )
  522. tarFile, err := os.Create(repoTarFile)
  523. if err != nil {
  524. return job.Error(err)
  525. }
  526. if _, err := io.Copy(tarFile, job.Stdin); err != nil {
  527. return job.Error(err)
  528. }
  529. tarFile.Close()
  530. repoFile, err := os.Open(repoTarFile)
  531. if err != nil {
  532. return job.Error(err)
  533. }
  534. if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
  535. return job.Error(err)
  536. }
  537. if err := archive.Untar(repoFile, repoDir, nil); err != nil {
  538. return job.Error(err)
  539. }
  540. dirs, err := ioutil.ReadDir(repoDir)
  541. if err != nil {
  542. return job.Error(err)
  543. }
  544. for _, d := range dirs {
  545. if d.IsDir() {
  546. if err := srv.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil {
  547. return job.Error(err)
  548. }
  549. }
  550. }
  551. repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories"))
  552. if err == nil {
  553. repositories := map[string]graph.Repository{}
  554. if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
  555. return job.Error(err)
  556. }
  557. for imageName, tagMap := range repositories {
  558. for tag, address := range tagMap {
  559. if err := srv.daemon.Repositories().Set(imageName, tag, address, true); err != nil {
  560. return job.Error(err)
  561. }
  562. }
  563. }
  564. } else if !os.IsNotExist(err) {
  565. return job.Error(err)
  566. }
  567. return engine.StatusOK
  568. }
  569. func (srv *Server) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error {
  570. if err := eng.Job("image_get", address).Run(); err != nil {
  571. utils.Debugf("Loading %s", address)
  572. imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
  573. if err != nil {
  574. utils.Debugf("Error reading json", err)
  575. return err
  576. }
  577. layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
  578. if err != nil {
  579. utils.Debugf("Error reading embedded tar", err)
  580. return err
  581. }
  582. img, err := image.NewImgJSON(imageJson)
  583. if err != nil {
  584. utils.Debugf("Error unmarshalling json", err)
  585. return err
  586. }
  587. if img.Parent != "" {
  588. if !srv.daemon.Graph().Exists(img.Parent) {
  589. if err := srv.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil {
  590. return err
  591. }
  592. }
  593. }
  594. if err := srv.daemon.Graph().Register(imageJson, layer, img); err != nil {
  595. return err
  596. }
  597. }
  598. utils.Debugf("Completed processing %s", address)
  599. return nil
  600. }
  601. // FIXME: 'insert' is deprecated and should be removed in a future version.
  602. func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
  603. fmt.Fprintf(job.Stderr, "Warning: '%s' is deprecated and will be removed in a future version. Please use 'build' and 'ADD' instead.\n", job.Name)
  604. if len(job.Args) != 3 {
  605. return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name)
  606. }
  607. var (
  608. name = job.Args[0]
  609. url = job.Args[1]
  610. path = job.Args[2]
  611. )
  612. sf := utils.NewStreamFormatter(job.GetenvBool("json"))
  613. out := utils.NewWriteFlusher(job.Stdout)
  614. img, err := srv.daemon.Repositories().LookupImage(name)
  615. if err != nil {
  616. return job.Error(err)
  617. }
  618. file, err := utils.Download(url)
  619. if err != nil {
  620. return job.Error(err)
  621. }
  622. defer file.Body.Close()
  623. config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.daemon.SystemConfig())
  624. if err != nil {
  625. return job.Error(err)
  626. }
  627. c, _, err := srv.daemon.Create(config, "")
  628. if err != nil {
  629. return job.Error(err)
  630. }
  631. if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, utils.TruncateID(img.ID), "Downloading"), path); err != nil {
  632. return job.Error(err)
  633. }
  634. // FIXME: Handle custom repo, tag comment, author
  635. img, err = srv.daemon.Commit(c, "", "", img.Comment, img.Author, nil)
  636. if err != nil {
  637. out.Write(sf.FormatError(err))
  638. return engine.StatusErr
  639. }
  640. out.Write(sf.FormatStatus("", img.ID))
  641. return engine.StatusOK
  642. }
  643. func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
  644. images, _ := srv.daemon.Graph().Map()
  645. if images == nil {
  646. return engine.StatusOK
  647. }
  648. job.Stdout.Write([]byte("digraph docker {\n"))
  649. var (
  650. parentImage *image.Image
  651. err error
  652. )
  653. for _, image := range images {
  654. parentImage, err = image.GetParent()
  655. if err != nil {
  656. return job.Errorf("Error while getting parent image: %v", err)
  657. }
  658. if parentImage != nil {
  659. job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
  660. } else {
  661. job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n"))
  662. }
  663. }
  664. for id, repos := range srv.daemon.Repositories().GetRepoRefs() {
  665. job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n"))
  666. }
  667. job.Stdout.Write([]byte(" base [style=invisible]\n}\n"))
  668. return engine.StatusOK
  669. }
  670. func (srv *Server) Images(job *engine.Job) engine.Status {
  671. var (
  672. allImages map[string]*image.Image
  673. err error
  674. filt_tagged = true
  675. )
  676. imageFilters, err := filters.FromParam(job.Getenv("filters"))
  677. if err != nil {
  678. return job.Error(err)
  679. }
  680. if i, ok := imageFilters["dangling"]; ok {
  681. for _, value := range i {
  682. if strings.ToLower(value) == "true" {
  683. filt_tagged = false
  684. }
  685. }
  686. }
  687. if job.GetenvBool("all") && filt_tagged {
  688. allImages, err = srv.daemon.Graph().Map()
  689. } else {
  690. allImages, err = srv.daemon.Graph().Heads()
  691. }
  692. if err != nil {
  693. return job.Error(err)
  694. }
  695. lookup := make(map[string]*engine.Env)
  696. srv.daemon.Repositories().Lock()
  697. for name, repository := range srv.daemon.Repositories().Repositories {
  698. if job.Getenv("filter") != "" {
  699. if match, _ := path.Match(job.Getenv("filter"), name); !match {
  700. continue
  701. }
  702. }
  703. for tag, id := range repository {
  704. image, err := srv.daemon.Graph().Get(id)
  705. if err != nil {
  706. log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
  707. continue
  708. }
  709. if out, exists := lookup[id]; exists {
  710. if filt_tagged {
  711. out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag)))
  712. }
  713. } else {
  714. // get the boolean list for if only the untagged images are requested
  715. delete(allImages, id)
  716. if filt_tagged {
  717. out := &engine.Env{}
  718. out.Set("ParentId", image.Parent)
  719. out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)})
  720. out.Set("Id", image.ID)
  721. out.SetInt64("Created", image.Created.Unix())
  722. out.SetInt64("Size", image.Size)
  723. out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
  724. lookup[id] = out
  725. }
  726. }
  727. }
  728. }
  729. srv.daemon.Repositories().Unlock()
  730. outs := engine.NewTable("Created", len(lookup))
  731. for _, value := range lookup {
  732. outs.Add(value)
  733. }
  734. // Display images which aren't part of a repository/tag
  735. if job.Getenv("filter") == "" {
  736. for _, image := range allImages {
  737. out := &engine.Env{}
  738. out.Set("ParentId", image.Parent)
  739. out.SetList("RepoTags", []string{"<none>:<none>"})
  740. out.Set("Id", image.ID)
  741. out.SetInt64("Created", image.Created.Unix())
  742. out.SetInt64("Size", image.Size)
  743. out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
  744. outs.Add(out)
  745. }
  746. }
  747. outs.ReverseSort()
  748. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  749. return job.Error(err)
  750. }
  751. return engine.StatusOK
  752. }
  753. func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
  754. images, _ := srv.daemon.Graph().Map()
  755. var imgcount int
  756. if images == nil {
  757. imgcount = 0
  758. } else {
  759. imgcount = len(images)
  760. }
  761. kernelVersion := "<unknown>"
  762. if kv, err := utils.GetKernelVersion(); err == nil {
  763. kernelVersion = kv.String()
  764. }
  765. // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
  766. initPath := utils.DockerInitPath("")
  767. if initPath == "" {
  768. // if that fails, we'll just return the path from the daemon
  769. initPath = srv.daemon.SystemInitPath()
  770. }
  771. v := &engine.Env{}
  772. v.SetInt("Containers", len(srv.daemon.List()))
  773. v.SetInt("Images", imgcount)
  774. v.Set("Driver", srv.daemon.GraphDriver().String())
  775. v.SetJson("DriverStatus", srv.daemon.GraphDriver().Status())
  776. v.SetBool("MemoryLimit", srv.daemon.SystemConfig().MemoryLimit)
  777. v.SetBool("SwapLimit", srv.daemon.SystemConfig().SwapLimit)
  778. v.SetBool("IPv4Forwarding", !srv.daemon.SystemConfig().IPv4ForwardingDisabled)
  779. v.SetBool("Debug", os.Getenv("DEBUG") != "")
  780. v.SetInt("NFd", utils.GetTotalUsedFds())
  781. v.SetInt("NGoroutines", runtime.NumGoroutine())
  782. v.Set("ExecutionDriver", srv.daemon.ExecutionDriver().Name())
  783. v.SetInt("NEventsListener", len(srv.listeners))
  784. v.Set("KernelVersion", kernelVersion)
  785. v.Set("IndexServerAddress", registry.IndexServerAddress())
  786. v.Set("InitSha1", dockerversion.INITSHA1)
  787. v.Set("InitPath", initPath)
  788. if _, err := v.WriteTo(job.Stdout); err != nil {
  789. return job.Error(err)
  790. }
  791. return engine.StatusOK
  792. }
  793. func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
  794. if n := len(job.Args); n != 1 {
  795. return job.Errorf("Usage: %s IMAGE", job.Name)
  796. }
  797. name := job.Args[0]
  798. foundImage, err := srv.daemon.Repositories().LookupImage(name)
  799. if err != nil {
  800. return job.Error(err)
  801. }
  802. lookupMap := make(map[string][]string)
  803. for name, repository := range srv.daemon.Repositories().Repositories {
  804. for tag, id := range repository {
  805. // If the ID already has a reverse lookup, do not update it unless for "latest"
  806. if _, exists := lookupMap[id]; !exists {
  807. lookupMap[id] = []string{}
  808. }
  809. lookupMap[id] = append(lookupMap[id], name+":"+tag)
  810. }
  811. }
  812. outs := engine.NewTable("Created", 0)
  813. err = foundImage.WalkHistory(func(img *image.Image) error {
  814. out := &engine.Env{}
  815. out.Set("Id", img.ID)
  816. out.SetInt64("Created", img.Created.Unix())
  817. out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " "))
  818. out.SetList("Tags", lookupMap[img.ID])
  819. out.SetInt64("Size", img.Size)
  820. outs.Add(out)
  821. return nil
  822. })
  823. outs.ReverseSort()
  824. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  825. return job.Error(err)
  826. }
  827. return engine.StatusOK
  828. }
  829. func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
  830. if len(job.Args) != 1 && len(job.Args) != 2 {
  831. return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
  832. }
  833. var (
  834. name = job.Args[0]
  835. psArgs = "-ef"
  836. )
  837. if len(job.Args) == 2 && job.Args[1] != "" {
  838. psArgs = job.Args[1]
  839. }
  840. if container := srv.daemon.Get(name); container != nil {
  841. if !container.State.IsRunning() {
  842. return job.Errorf("Container %s is not running", name)
  843. }
  844. pids, err := srv.daemon.ExecutionDriver().GetPidsForContainer(container.ID)
  845. if err != nil {
  846. return job.Error(err)
  847. }
  848. output, err := exec.Command("ps", psArgs).Output()
  849. if err != nil {
  850. return job.Errorf("Error running ps: %s", err)
  851. }
  852. lines := strings.Split(string(output), "\n")
  853. header := strings.Fields(lines[0])
  854. out := &engine.Env{}
  855. out.SetList("Titles", header)
  856. pidIndex := -1
  857. for i, name := range header {
  858. if name == "PID" {
  859. pidIndex = i
  860. }
  861. }
  862. if pidIndex == -1 {
  863. return job.Errorf("Couldn't find PID field in ps output")
  864. }
  865. processes := [][]string{}
  866. for _, line := range lines[1:] {
  867. if len(line) == 0 {
  868. continue
  869. }
  870. fields := strings.Fields(line)
  871. p, err := strconv.Atoi(fields[pidIndex])
  872. if err != nil {
  873. return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
  874. }
  875. for _, pid := range pids {
  876. if pid == p {
  877. // Make sure number of fields equals number of header titles
  878. // merging "overhanging" fields
  879. process := fields[:len(header)-1]
  880. process = append(process, strings.Join(fields[len(header)-1:], " "))
  881. processes = append(processes, process)
  882. }
  883. }
  884. }
  885. out.SetJson("Processes", processes)
  886. out.WriteTo(job.Stdout)
  887. return engine.StatusOK
  888. }
  889. return job.Errorf("No such container: %s", name)
  890. }
  891. func (srv *Server) ContainerChanges(job *engine.Job) engine.Status {
  892. if n := len(job.Args); n != 1 {
  893. return job.Errorf("Usage: %s CONTAINER", job.Name)
  894. }
  895. name := job.Args[0]
  896. if container := srv.daemon.Get(name); container != nil {
  897. outs := engine.NewTable("", 0)
  898. changes, err := container.Changes()
  899. if err != nil {
  900. return job.Error(err)
  901. }
  902. for _, change := range changes {
  903. out := &engine.Env{}
  904. if err := out.Import(change); err != nil {
  905. return job.Error(err)
  906. }
  907. outs.Add(out)
  908. }
  909. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  910. return job.Error(err)
  911. }
  912. } else {
  913. return job.Errorf("No such container: %s", name)
  914. }
  915. return engine.StatusOK
  916. }
  917. func (srv *Server) Containers(job *engine.Job) engine.Status {
  918. var (
  919. foundBefore bool
  920. displayed int
  921. all = job.GetenvBool("all")
  922. since = job.Getenv("since")
  923. before = job.Getenv("before")
  924. n = job.GetenvInt("limit")
  925. size = job.GetenvBool("size")
  926. )
  927. outs := engine.NewTable("Created", 0)
  928. names := map[string][]string{}
  929. srv.daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
  930. names[e.ID()] = append(names[e.ID()], p)
  931. return nil
  932. }, -1)
  933. var beforeCont, sinceCont *daemon.Container
  934. if before != "" {
  935. beforeCont = srv.daemon.Get(before)
  936. if beforeCont == nil {
  937. return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
  938. }
  939. }
  940. if since != "" {
  941. sinceCont = srv.daemon.Get(since)
  942. if sinceCont == nil {
  943. return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
  944. }
  945. }
  946. for _, container := range srv.daemon.List() {
  947. if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
  948. continue
  949. }
  950. if before != "" && !foundBefore {
  951. if container.ID == beforeCont.ID {
  952. foundBefore = true
  953. }
  954. continue
  955. }
  956. if n > 0 && displayed == n {
  957. break
  958. }
  959. if since != "" {
  960. if container.ID == sinceCont.ID {
  961. break
  962. }
  963. }
  964. displayed++
  965. out := &engine.Env{}
  966. out.Set("Id", container.ID)
  967. out.SetList("Names", names[container.ID])
  968. out.Set("Image", srv.daemon.Repositories().ImageName(container.Image))
  969. if len(container.Args) > 0 {
  970. args := []string{}
  971. for _, arg := range container.Args {
  972. if strings.Contains(arg, " ") {
  973. args = append(args, fmt.Sprintf("'%s'", arg))
  974. } else {
  975. args = append(args, arg)
  976. }
  977. }
  978. argsAsString := strings.Join(args, " ")
  979. out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString))
  980. } else {
  981. out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
  982. }
  983. out.SetInt64("Created", container.Created.Unix())
  984. out.Set("Status", container.State.String())
  985. str, err := container.NetworkSettings.PortMappingAPI().ToListString()
  986. if err != nil {
  987. return job.Error(err)
  988. }
  989. out.Set("Ports", str)
  990. if size {
  991. sizeRw, sizeRootFs := container.GetSize()
  992. out.SetInt64("SizeRw", sizeRw)
  993. out.SetInt64("SizeRootFs", sizeRootFs)
  994. }
  995. outs.Add(out)
  996. }
  997. outs.ReverseSort()
  998. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  999. return job.Error(err)
  1000. }
  1001. return engine.StatusOK
  1002. }
  1003. func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
  1004. if len(job.Args) != 1 {
  1005. return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
  1006. }
  1007. name := job.Args[0]
  1008. container := srv.daemon.Get(name)
  1009. if container == nil {
  1010. return job.Errorf("No such container: %s", name)
  1011. }
  1012. var (
  1013. config = container.Config
  1014. newConfig runconfig.Config
  1015. )
  1016. if err := job.GetenvJson("config", &newConfig); err != nil {
  1017. return job.Error(err)
  1018. }
  1019. if err := runconfig.Merge(&newConfig, config); err != nil {
  1020. return job.Error(err)
  1021. }
  1022. img, err := srv.daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig)
  1023. if err != nil {
  1024. return job.Error(err)
  1025. }
  1026. job.Printf("%s\n", img.ID)
  1027. return engine.StatusOK
  1028. }
  1029. func (srv *Server) ImageTag(job *engine.Job) engine.Status {
  1030. if len(job.Args) != 2 && len(job.Args) != 3 {
  1031. return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
  1032. }
  1033. var tag string
  1034. if len(job.Args) == 3 {
  1035. tag = job.Args[2]
  1036. }
  1037. if err := srv.daemon.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
  1038. return job.Error(err)
  1039. }
  1040. return engine.StatusOK
  1041. }
  1042. func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error {
  1043. history, err := r.GetRemoteHistory(imgID, endpoint, token)
  1044. if err != nil {
  1045. return err
  1046. }
  1047. out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil))
  1048. // FIXME: Try to stream the images?
  1049. // FIXME: Launch the getRemoteImage() in goroutines
  1050. for i := len(history) - 1; i >= 0; i-- {
  1051. id := history[i]
  1052. // ensure no two downloads of the same layer happen at the same time
  1053. if c, err := srv.poolAdd("pull", "layer:"+id); err != nil {
  1054. utils.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
  1055. <-c
  1056. }
  1057. defer srv.poolRemove("pull", "layer:"+id)
  1058. if !srv.daemon.Graph().Exists(id) {
  1059. out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
  1060. var (
  1061. imgJSON []byte
  1062. imgSize int
  1063. err error
  1064. img *image.Image
  1065. )
  1066. retries := 5
  1067. for j := 1; j <= retries; j++ {
  1068. imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
  1069. if err != nil && j == retries {
  1070. out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
  1071. return err
  1072. } else if err != nil {
  1073. time.Sleep(time.Duration(j) * 500 * time.Millisecond)
  1074. continue
  1075. }
  1076. img, err = image.NewImgJSON(imgJSON)
  1077. if err != nil && j == retries {
  1078. out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
  1079. return fmt.Errorf("Failed to parse json: %s", err)
  1080. } else if err != nil {
  1081. time.Sleep(time.Duration(j) * 500 * time.Millisecond)
  1082. continue
  1083. } else {
  1084. break
  1085. }
  1086. }
  1087. for j := 1; j <= retries; j++ {
  1088. // Get the layer
  1089. status := "Pulling fs layer"
  1090. if j > 1 {
  1091. status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
  1092. }
  1093. out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil))
  1094. layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
  1095. if uerr, ok := err.(*url.Error); ok {
  1096. err = uerr.Err
  1097. }
  1098. if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
  1099. time.Sleep(time.Duration(j) * 500 * time.Millisecond)
  1100. continue
  1101. } else if err != nil {
  1102. out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
  1103. return err
  1104. }
  1105. defer layer.Close()
  1106. err = srv.daemon.Graph().Register(imgJSON,
  1107. utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"),
  1108. img)
  1109. if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
  1110. time.Sleep(time.Duration(j) * 500 * time.Millisecond)
  1111. continue
  1112. } else if err != nil {
  1113. out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
  1114. return err
  1115. } else {
  1116. break
  1117. }
  1118. }
  1119. }
  1120. out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil))
  1121. }
  1122. return nil
  1123. }
  1124. func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error {
  1125. out.Write(sf.FormatStatus("", "Pulling repository %s", localName))
  1126. repoData, err := r.GetRepositoryData(remoteName)
  1127. if err != nil {
  1128. return err
  1129. }
  1130. utils.Debugf("Retrieving the tag list")
  1131. tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens)
  1132. if err != nil {
  1133. utils.Errorf("%v", err)
  1134. return err
  1135. }
  1136. for tag, id := range tagsList {
  1137. repoData.ImgList[id] = &registry.ImgData{
  1138. ID: id,
  1139. Tag: tag,
  1140. Checksum: "",
  1141. }
  1142. }
  1143. utils.Debugf("Registering tags")
  1144. // If no tag has been specified, pull them all
  1145. if askedTag == "" {
  1146. for tag, id := range tagsList {
  1147. repoData.ImgList[id].Tag = tag
  1148. }
  1149. } else {
  1150. // Otherwise, check that the tag exists and use only that one
  1151. id, exists := tagsList[askedTag]
  1152. if !exists {
  1153. return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName)
  1154. }
  1155. repoData.ImgList[id].Tag = askedTag
  1156. }
  1157. errors := make(chan error)
  1158. for _, image := range repoData.ImgList {
  1159. downloadImage := func(img *registry.ImgData) {
  1160. if askedTag != "" && img.Tag != askedTag {
  1161. utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
  1162. if parallel {
  1163. errors <- nil
  1164. }
  1165. return
  1166. }
  1167. if img.Tag == "" {
  1168. utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
  1169. if parallel {
  1170. errors <- nil
  1171. }
  1172. return
  1173. }
  1174. // ensure no two downloads of the same image happen at the same time
  1175. if c, err := srv.poolAdd("pull", "img:"+img.ID); err != nil {
  1176. if c != nil {
  1177. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
  1178. <-c
  1179. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
  1180. } else {
  1181. utils.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
  1182. }
  1183. if parallel {
  1184. errors <- nil
  1185. }
  1186. return
  1187. }
  1188. defer srv.poolRemove("pull", "img:"+img.ID)
  1189. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil))
  1190. success := false
  1191. var lastErr error
  1192. for _, ep := range repoData.Endpoints {
  1193. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil))
  1194. if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
  1195. // It's not ideal that only the last error is returned, it would be better to concatenate the errors.
  1196. // As the error is also given to the output stream the user will see the error.
  1197. lastErr = err
  1198. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil))
  1199. continue
  1200. }
  1201. success = true
  1202. break
  1203. }
  1204. if !success {
  1205. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, %s", img.Tag, localName, lastErr), nil))
  1206. if parallel {
  1207. errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
  1208. return
  1209. }
  1210. }
  1211. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
  1212. if parallel {
  1213. errors <- nil
  1214. }
  1215. }
  1216. if parallel {
  1217. go downloadImage(image)
  1218. } else {
  1219. downloadImage(image)
  1220. }
  1221. }
  1222. if parallel {
  1223. var lastError error
  1224. for i := 0; i < len(repoData.ImgList); i++ {
  1225. if err := <-errors; err != nil {
  1226. lastError = err
  1227. }
  1228. }
  1229. if lastError != nil {
  1230. return lastError
  1231. }
  1232. }
  1233. for tag, id := range tagsList {
  1234. if askedTag != "" && tag != askedTag {
  1235. continue
  1236. }
  1237. if err := srv.daemon.Repositories().Set(localName, tag, id, true); err != nil {
  1238. return err
  1239. }
  1240. }
  1241. return nil
  1242. }
  1243. func (srv *Server) poolAdd(kind, key string) (chan struct{}, error) {
  1244. srv.Lock()
  1245. defer srv.Unlock()
  1246. if c, exists := srv.pullingPool[key]; exists {
  1247. return c, fmt.Errorf("pull %s is already in progress", key)
  1248. }
  1249. if c, exists := srv.pushingPool[key]; exists {
  1250. return c, fmt.Errorf("push %s is already in progress", key)
  1251. }
  1252. c := make(chan struct{})
  1253. switch kind {
  1254. case "pull":
  1255. srv.pullingPool[key] = c
  1256. case "push":
  1257. srv.pushingPool[key] = c
  1258. default:
  1259. return nil, fmt.Errorf("Unknown pool type")
  1260. }
  1261. return c, nil
  1262. }
  1263. func (srv *Server) poolRemove(kind, key string) error {
  1264. srv.Lock()
  1265. defer srv.Unlock()
  1266. switch kind {
  1267. case "pull":
  1268. if c, exists := srv.pullingPool[key]; exists {
  1269. close(c)
  1270. delete(srv.pullingPool, key)
  1271. }
  1272. case "push":
  1273. if c, exists := srv.pushingPool[key]; exists {
  1274. close(c)
  1275. delete(srv.pushingPool, key)
  1276. }
  1277. default:
  1278. return fmt.Errorf("Unknown pool type")
  1279. }
  1280. return nil
  1281. }
  1282. func (srv *Server) ImagePull(job *engine.Job) engine.Status {
  1283. if n := len(job.Args); n != 1 && n != 2 {
  1284. return job.Errorf("Usage: %s IMAGE [TAG]", job.Name)
  1285. }
  1286. var (
  1287. localName = job.Args[0]
  1288. tag string
  1289. sf = utils.NewStreamFormatter(job.GetenvBool("json"))
  1290. authConfig = &registry.AuthConfig{}
  1291. metaHeaders map[string][]string
  1292. )
  1293. if len(job.Args) > 1 {
  1294. tag = job.Args[1]
  1295. }
  1296. job.GetenvJson("authConfig", authConfig)
  1297. job.GetenvJson("metaHeaders", metaHeaders)
  1298. c, err := srv.poolAdd("pull", localName+":"+tag)
  1299. if err != nil {
  1300. if c != nil {
  1301. // Another pull of the same repository is already taking place; just wait for it to finish
  1302. job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
  1303. <-c
  1304. return engine.StatusOK
  1305. }
  1306. return job.Error(err)
  1307. }
  1308. defer srv.poolRemove("pull", localName+":"+tag)
  1309. // Resolve the Repository name from fqn to endpoint + name
  1310. hostname, remoteName, err := registry.ResolveRepositoryName(localName)
  1311. if err != nil {
  1312. return job.Error(err)
  1313. }
  1314. endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
  1315. if err != nil {
  1316. return job.Error(err)
  1317. }
  1318. r, err := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint)
  1319. if err != nil {
  1320. return job.Error(err)
  1321. }
  1322. if endpoint == registry.IndexServerAddress() {
  1323. // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
  1324. localName = remoteName
  1325. }
  1326. if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil {
  1327. return job.Error(err)
  1328. }
  1329. return engine.StatusOK
  1330. }
  1331. // Retrieve the all the images to be uploaded in the correct order
  1332. func (srv *Server) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) {
  1333. var (
  1334. imageList []string
  1335. imagesSeen map[string]bool = make(map[string]bool)
  1336. tagsByImage map[string][]string = make(map[string][]string)
  1337. )
  1338. for tag, id := range localRepo {
  1339. if requestedTag != "" && requestedTag != tag {
  1340. continue
  1341. }
  1342. var imageListForThisTag []string
  1343. tagsByImage[id] = append(tagsByImage[id], tag)
  1344. for img, err := srv.daemon.Graph().Get(id); img != nil; img, err = img.GetParent() {
  1345. if err != nil {
  1346. return nil, nil, err
  1347. }
  1348. if imagesSeen[img.ID] {
  1349. // This image is already on the list, we can ignore it and all its parents
  1350. break
  1351. }
  1352. imagesSeen[img.ID] = true
  1353. imageListForThisTag = append(imageListForThisTag, img.ID)
  1354. }
  1355. // reverse the image list for this tag (so the "most"-parent image is first)
  1356. for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 {
  1357. imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i]
  1358. }
  1359. // append to main image list
  1360. imageList = append(imageList, imageListForThisTag...)
  1361. }
  1362. if len(imageList) == 0 {
  1363. return nil, nil, fmt.Errorf("No images found for the requested repository / tag")
  1364. }
  1365. utils.Debugf("Image list: %v", imageList)
  1366. utils.Debugf("Tags by image: %v", tagsByImage)
  1367. return imageList, tagsByImage, nil
  1368. }
  1369. func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error {
  1370. out = utils.NewWriteFlusher(out)
  1371. utils.Debugf("Local repo: %s", localRepo)
  1372. imgList, tagsByImage, err := srv.getImageList(localRepo, tag)
  1373. if err != nil {
  1374. return err
  1375. }
  1376. out.Write(sf.FormatStatus("", "Sending image list"))
  1377. var (
  1378. repoData *registry.RepositoryData
  1379. imageIndex []*registry.ImgData
  1380. )
  1381. for _, imgId := range imgList {
  1382. if tags, exists := tagsByImage[imgId]; exists {
  1383. // If an image has tags you must add an entry in the image index
  1384. // for each tag
  1385. for _, tag := range tags {
  1386. imageIndex = append(imageIndex, &registry.ImgData{
  1387. ID: imgId,
  1388. Tag: tag,
  1389. })
  1390. }
  1391. } else {
  1392. // If the image does not have a tag it still needs to be sent to the
  1393. // registry with an empty tag so that it is accociated with the repository
  1394. imageIndex = append(imageIndex, &registry.ImgData{
  1395. ID: imgId,
  1396. Tag: "",
  1397. })
  1398. }
  1399. }
  1400. utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo)
  1401. for _, data := range imageIndex {
  1402. utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag)
  1403. }
  1404. // Register all the images in a repository with the registry
  1405. // If an image is not in this list it will not be associated with the repository
  1406. repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil)
  1407. if err != nil {
  1408. return err
  1409. }
  1410. nTag := 1
  1411. if tag == "" {
  1412. nTag = len(localRepo)
  1413. }
  1414. for _, ep := range repoData.Endpoints {
  1415. out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag))
  1416. for _, imgId := range imgList {
  1417. if r.LookupRemoteImage(imgId, ep, repoData.Tokens) {
  1418. out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId)))
  1419. } else {
  1420. if _, err := srv.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil {
  1421. // FIXME: Continue on error?
  1422. return err
  1423. }
  1424. }
  1425. for _, tag := range tagsByImage[imgId] {
  1426. out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag))
  1427. if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil {
  1428. return err
  1429. }
  1430. }
  1431. }
  1432. }
  1433. if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil {
  1434. return err
  1435. }
  1436. return nil
  1437. }
  1438. func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
  1439. out = utils.NewWriteFlusher(out)
  1440. jsonRaw, err := ioutil.ReadFile(path.Join(srv.daemon.Graph().Root, imgID, "json"))
  1441. if err != nil {
  1442. return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
  1443. }
  1444. out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil))
  1445. imgData := &registry.ImgData{
  1446. ID: imgID,
  1447. }
  1448. // Send the json
  1449. if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
  1450. if err == registry.ErrAlreadyExists {
  1451. out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
  1452. return "", nil
  1453. }
  1454. return "", err
  1455. }
  1456. layerData, err := srv.daemon.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out)
  1457. if err != nil {
  1458. return "", fmt.Errorf("Failed to generate layer archive: %s", err)
  1459. }
  1460. defer os.RemoveAll(layerData.Name())
  1461. // Send the layer
  1462. utils.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
  1463. checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw)
  1464. if err != nil {
  1465. return "", err
  1466. }
  1467. imgData.Checksum = checksum
  1468. imgData.ChecksumPayload = checksumPayload
  1469. // Send the checksum
  1470. if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
  1471. return "", err
  1472. }
  1473. out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil))
  1474. return imgData.Checksum, nil
  1475. }
  1476. // FIXME: Allow to interrupt current push when new push of same image is done.
  1477. func (srv *Server) ImagePush(job *engine.Job) engine.Status {
  1478. if n := len(job.Args); n != 1 {
  1479. return job.Errorf("Usage: %s IMAGE", job.Name)
  1480. }
  1481. var (
  1482. localName = job.Args[0]
  1483. sf = utils.NewStreamFormatter(job.GetenvBool("json"))
  1484. authConfig = &registry.AuthConfig{}
  1485. metaHeaders map[string][]string
  1486. )
  1487. tag := job.Getenv("tag")
  1488. job.GetenvJson("authConfig", authConfig)
  1489. job.GetenvJson("metaHeaders", metaHeaders)
  1490. if _, err := srv.poolAdd("push", localName); err != nil {
  1491. return job.Error(err)
  1492. }
  1493. defer srv.poolRemove("push", localName)
  1494. // Resolve the Repository name from fqn to endpoint + name
  1495. hostname, remoteName, err := registry.ResolveRepositoryName(localName)
  1496. if err != nil {
  1497. return job.Error(err)
  1498. }
  1499. endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
  1500. if err != nil {
  1501. return job.Error(err)
  1502. }
  1503. img, err := srv.daemon.Graph().Get(localName)
  1504. r, err2 := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint)
  1505. if err2 != nil {
  1506. return job.Error(err2)
  1507. }
  1508. if err != nil {
  1509. reposLen := 1
  1510. if tag == "" {
  1511. reposLen = len(srv.daemon.Repositories().Repositories[localName])
  1512. }
  1513. job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
  1514. // If it fails, try to get the repository
  1515. if localRepo, exists := srv.daemon.Repositories().Repositories[localName]; exists {
  1516. if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil {
  1517. return job.Error(err)
  1518. }
  1519. return engine.StatusOK
  1520. }
  1521. return job.Error(err)
  1522. }
  1523. var token []string
  1524. job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName))
  1525. if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil {
  1526. return job.Error(err)
  1527. }
  1528. return engine.StatusOK
  1529. }
  1530. func (srv *Server) ImageImport(job *engine.Job) engine.Status {
  1531. if n := len(job.Args); n != 2 && n != 3 {
  1532. return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name)
  1533. }
  1534. var (
  1535. src = job.Args[0]
  1536. repo = job.Args[1]
  1537. tag string
  1538. sf = utils.NewStreamFormatter(job.GetenvBool("json"))
  1539. archive archive.ArchiveReader
  1540. resp *http.Response
  1541. )
  1542. if len(job.Args) > 2 {
  1543. tag = job.Args[2]
  1544. }
  1545. if src == "-" {
  1546. archive = job.Stdin
  1547. } else {
  1548. u, err := url.Parse(src)
  1549. if err != nil {
  1550. return job.Error(err)
  1551. }
  1552. if u.Scheme == "" {
  1553. u.Scheme = "http"
  1554. u.Host = src
  1555. u.Path = ""
  1556. }
  1557. job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u))
  1558. // Download with curl (pretty progress bar)
  1559. // If curl is not available, fallback to http.Get()
  1560. resp, err = utils.Download(u.String())
  1561. if err != nil {
  1562. return job.Error(err)
  1563. }
  1564. progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing")
  1565. defer progressReader.Close()
  1566. archive = progressReader
  1567. }
  1568. img, err := srv.daemon.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil)
  1569. if err != nil {
  1570. return job.Error(err)
  1571. }
  1572. // Optionally register the image at REPO/TAG
  1573. if repo != "" {
  1574. if err := srv.daemon.Repositories().Set(repo, tag, img.ID, true); err != nil {
  1575. return job.Error(err)
  1576. }
  1577. }
  1578. job.Stdout.Write(sf.FormatStatus("", img.ID))
  1579. return engine.StatusOK
  1580. }
  1581. func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
  1582. var name string
  1583. if len(job.Args) == 1 {
  1584. name = job.Args[0]
  1585. } else if len(job.Args) > 1 {
  1586. return job.Errorf("Usage: %s", job.Name)
  1587. }
  1588. config := runconfig.ContainerConfigFromJob(job)
  1589. if config.Memory != 0 && config.Memory < 524288 {
  1590. return job.Errorf("Minimum memory limit allowed is 512k")
  1591. }
  1592. if config.Memory > 0 && !srv.daemon.SystemConfig().MemoryLimit {
  1593. job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
  1594. config.Memory = 0
  1595. }
  1596. if config.Memory > 0 && !srv.daemon.SystemConfig().SwapLimit {
  1597. job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
  1598. config.MemorySwap = -1
  1599. }
  1600. container, buildWarnings, err := srv.daemon.Create(config, name)
  1601. if err != nil {
  1602. if srv.daemon.Graph().IsNotExist(err) {
  1603. _, tag := utils.ParseRepositoryTag(config.Image)
  1604. if tag == "" {
  1605. tag = graph.DEFAULTTAG
  1606. }
  1607. return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
  1608. }
  1609. return job.Error(err)
  1610. }
  1611. if !container.Config.NetworkDisabled && srv.daemon.SystemConfig().IPv4ForwardingDisabled {
  1612. job.Errorf("IPv4 forwarding is disabled.\n")
  1613. }
  1614. srv.LogEvent("create", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  1615. // FIXME: this is necessary because daemon.Create might return a nil container
  1616. // with a non-nil error. This should not happen! Once it's fixed we
  1617. // can remove this workaround.
  1618. if container != nil {
  1619. job.Printf("%s\n", container.ID)
  1620. }
  1621. for _, warning := range buildWarnings {
  1622. job.Errorf("%s\n", warning)
  1623. }
  1624. return engine.StatusOK
  1625. }
  1626. func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
  1627. if len(job.Args) != 1 {
  1628. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  1629. }
  1630. var (
  1631. name = job.Args[0]
  1632. t = 10
  1633. )
  1634. if job.EnvExists("t") {
  1635. t = job.GetenvInt("t")
  1636. }
  1637. if container := srv.daemon.Get(name); container != nil {
  1638. if err := container.Restart(int(t)); err != nil {
  1639. return job.Errorf("Cannot restart container %s: %s\n", name, err)
  1640. }
  1641. srv.LogEvent("restart", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  1642. } else {
  1643. return job.Errorf("No such container: %s\n", name)
  1644. }
  1645. return engine.StatusOK
  1646. }
  1647. func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
  1648. if len(job.Args) != 1 {
  1649. return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
  1650. }
  1651. name := job.Args[0]
  1652. removeVolume := job.GetenvBool("removeVolume")
  1653. removeLink := job.GetenvBool("removeLink")
  1654. forceRemove := job.GetenvBool("forceRemove")
  1655. container := srv.daemon.Get(name)
  1656. if removeLink {
  1657. if container == nil {
  1658. return job.Errorf("No such link: %s", name)
  1659. }
  1660. name, err := daemon.GetFullContainerName(name)
  1661. if err != nil {
  1662. job.Error(err)
  1663. }
  1664. parent, n := path.Split(name)
  1665. if parent == "/" {
  1666. return job.Errorf("Conflict, cannot remove the default name of the container")
  1667. }
  1668. pe := srv.daemon.ContainerGraph().Get(parent)
  1669. if pe == nil {
  1670. return job.Errorf("Cannot get parent %s for name %s", parent, name)
  1671. }
  1672. parentContainer := srv.daemon.Get(pe.ID())
  1673. if parentContainer != nil {
  1674. parentContainer.DisableLink(n)
  1675. }
  1676. if err := srv.daemon.ContainerGraph().Delete(name); err != nil {
  1677. return job.Error(err)
  1678. }
  1679. return engine.StatusOK
  1680. }
  1681. if container != nil {
  1682. if container.State.IsRunning() {
  1683. if forceRemove {
  1684. if err := container.Stop(5); err != nil {
  1685. return job.Errorf("Could not stop running container, cannot remove - %v", err)
  1686. }
  1687. } else {
  1688. return job.Errorf("Impossible to remove a running container, please stop it first or use -f")
  1689. }
  1690. }
  1691. if err := srv.daemon.Destroy(container); err != nil {
  1692. return job.Errorf("Cannot destroy container %s: %s", name, err)
  1693. }
  1694. srv.LogEvent("destroy", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  1695. if removeVolume {
  1696. var (
  1697. volumes = make(map[string]struct{})
  1698. binds = make(map[string]struct{})
  1699. usedVolumes = make(map[string]*daemon.Container)
  1700. )
  1701. // the volume id is always the base of the path
  1702. getVolumeId := func(p string) string {
  1703. return filepath.Base(strings.TrimSuffix(p, "/layer"))
  1704. }
  1705. // populate bind map so that they can be skipped and not removed
  1706. for _, bind := range container.HostConfig().Binds {
  1707. source := strings.Split(bind, ":")[0]
  1708. // TODO: refactor all volume stuff, all of it
  1709. // it is very important that we eval the link or comparing the keys to container.Volumes will not work
  1710. //
  1711. // eval symlink can fail, ref #5244 if we receive an is not exist error we can ignore it
  1712. p, err := filepath.EvalSymlinks(source)
  1713. if err != nil && !os.IsNotExist(err) {
  1714. return job.Error(err)
  1715. }
  1716. if p != "" {
  1717. source = p
  1718. }
  1719. binds[source] = struct{}{}
  1720. }
  1721. // Store all the deleted containers volumes
  1722. for _, volumeId := range container.Volumes {
  1723. // Skip the volumes mounted from external
  1724. // bind mounts here will will be evaluated for a symlink
  1725. if _, exists := binds[volumeId]; exists {
  1726. continue
  1727. }
  1728. volumeId = getVolumeId(volumeId)
  1729. volumes[volumeId] = struct{}{}
  1730. }
  1731. // Retrieve all volumes from all remaining containers
  1732. for _, container := range srv.daemon.List() {
  1733. for _, containerVolumeId := range container.Volumes {
  1734. containerVolumeId = getVolumeId(containerVolumeId)
  1735. usedVolumes[containerVolumeId] = container
  1736. }
  1737. }
  1738. for volumeId := range volumes {
  1739. // If the requested volu
  1740. if c, exists := usedVolumes[volumeId]; exists {
  1741. log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
  1742. continue
  1743. }
  1744. if err := srv.daemon.Volumes().Delete(volumeId); err != nil {
  1745. return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
  1746. }
  1747. }
  1748. }
  1749. } else {
  1750. return job.Errorf("No such container: %s", name)
  1751. }
  1752. return engine.StatusOK
  1753. }
  1754. func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, noprune bool) error {
  1755. var (
  1756. repoName, tag string
  1757. tags = []string{}
  1758. tagDeleted bool
  1759. )
  1760. repoName, tag = utils.ParseRepositoryTag(name)
  1761. if tag == "" {
  1762. tag = graph.DEFAULTTAG
  1763. }
  1764. img, err := srv.daemon.Repositories().LookupImage(name)
  1765. if err != nil {
  1766. if r, _ := srv.daemon.Repositories().Get(repoName); r != nil {
  1767. return fmt.Errorf("No such image: %s:%s", repoName, tag)
  1768. }
  1769. return fmt.Errorf("No such image: %s", name)
  1770. }
  1771. if strings.Contains(img.ID, name) {
  1772. repoName = ""
  1773. tag = ""
  1774. }
  1775. byParents, err := srv.daemon.Graph().ByParent()
  1776. if err != nil {
  1777. return err
  1778. }
  1779. //If delete by id, see if the id belong only to one repository
  1780. if repoName == "" {
  1781. for _, repoAndTag := range srv.daemon.Repositories().ByID()[img.ID] {
  1782. parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
  1783. if repoName == "" || repoName == parsedRepo {
  1784. repoName = parsedRepo
  1785. if parsedTag != "" {
  1786. tags = append(tags, parsedTag)
  1787. }
  1788. } else if repoName != parsedRepo && !force {
  1789. // the id belongs to multiple repos, like base:latest and user:test,
  1790. // in that case return conflict
  1791. return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name)
  1792. }
  1793. }
  1794. } else {
  1795. tags = append(tags, tag)
  1796. }
  1797. if !first && len(tags) > 0 {
  1798. return nil
  1799. }
  1800. //Untag the current image
  1801. for _, tag := range tags {
  1802. tagDeleted, err = srv.daemon.Repositories().Delete(repoName, tag)
  1803. if err != nil {
  1804. return err
  1805. }
  1806. if tagDeleted {
  1807. out := &engine.Env{}
  1808. out.Set("Untagged", repoName+":"+tag)
  1809. imgs.Add(out)
  1810. srv.LogEvent("untag", img.ID, "")
  1811. }
  1812. }
  1813. tags = srv.daemon.Repositories().ByID()[img.ID]
  1814. if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
  1815. if len(byParents[img.ID]) == 0 {
  1816. if err := srv.canDeleteImage(img.ID, force, tagDeleted); err != nil {
  1817. return err
  1818. }
  1819. if err := srv.daemon.Repositories().DeleteAll(img.ID); err != nil {
  1820. return err
  1821. }
  1822. if err := srv.daemon.Graph().Delete(img.ID); err != nil {
  1823. return err
  1824. }
  1825. out := &engine.Env{}
  1826. out.Set("Deleted", img.ID)
  1827. imgs.Add(out)
  1828. srv.LogEvent("delete", img.ID, "")
  1829. if img.Parent != "" && !noprune {
  1830. err := srv.DeleteImage(img.Parent, imgs, false, force, noprune)
  1831. if first {
  1832. return err
  1833. }
  1834. }
  1835. }
  1836. }
  1837. return nil
  1838. }
  1839. func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
  1840. if n := len(job.Args); n != 1 {
  1841. return job.Errorf("Usage: %s IMAGE", job.Name)
  1842. }
  1843. imgs := engine.NewTable("", 0)
  1844. if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil {
  1845. return job.Error(err)
  1846. }
  1847. if len(imgs.Data) == 0 {
  1848. return job.Errorf("Conflict, %s wasn't deleted", job.Args[0])
  1849. }
  1850. if _, err := imgs.WriteListTo(job.Stdout); err != nil {
  1851. return job.Error(err)
  1852. }
  1853. return engine.StatusOK
  1854. }
  1855. func (srv *Server) canDeleteImage(imgID string, force, untagged bool) error {
  1856. var message string
  1857. if untagged {
  1858. message = " (docker untagged the image)"
  1859. }
  1860. for _, container := range srv.daemon.List() {
  1861. parent, err := srv.daemon.Repositories().LookupImage(container.Image)
  1862. if err != nil {
  1863. return err
  1864. }
  1865. if err := parent.WalkHistory(func(p *image.Image) error {
  1866. if imgID == p.ID {
  1867. if container.State.IsRunning() {
  1868. if force {
  1869. return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it%s, stop it and retry", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
  1870. }
  1871. return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it%s, stop it and use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
  1872. } else if !force {
  1873. return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it%s, use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
  1874. }
  1875. }
  1876. return nil
  1877. }); err != nil {
  1878. return err
  1879. }
  1880. }
  1881. return nil
  1882. }
  1883. func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
  1884. // Retrieve all images
  1885. images, err := srv.daemon.Graph().Map()
  1886. if err != nil {
  1887. return nil, err
  1888. }
  1889. // Store the tree in a map of map (map[parentId][childId])
  1890. imageMap := make(map[string]map[string]struct{})
  1891. for _, img := range images {
  1892. if _, exists := imageMap[img.Parent]; !exists {
  1893. imageMap[img.Parent] = make(map[string]struct{})
  1894. }
  1895. imageMap[img.Parent][img.ID] = struct{}{}
  1896. }
  1897. // Loop on the children of the given image and check the config
  1898. var match *image.Image
  1899. for elem := range imageMap[imgID] {
  1900. img, err := srv.daemon.Graph().Get(elem)
  1901. if err != nil {
  1902. return nil, err
  1903. }
  1904. if runconfig.Compare(&img.ContainerConfig, config) {
  1905. if match == nil || match.Created.Before(img.Created) {
  1906. match = img
  1907. }
  1908. }
  1909. }
  1910. return match, nil
  1911. }
  1912. func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
  1913. if len(job.Args) < 1 {
  1914. return job.Errorf("Usage: %s container_id", job.Name)
  1915. }
  1916. var (
  1917. name = job.Args[0]
  1918. daemon = srv.daemon
  1919. container = daemon.Get(name)
  1920. )
  1921. if container == nil {
  1922. return job.Errorf("No such container: %s", name)
  1923. }
  1924. // If no environment was set, then no hostconfig was passed.
  1925. if len(job.Environ()) > 0 {
  1926. hostConfig := runconfig.ContainerHostConfigFromJob(job)
  1927. // Validate the HostConfig binds. Make sure that:
  1928. // 1) the source of a bind mount isn't /
  1929. // The bind mount "/:/foo" isn't allowed.
  1930. // 2) Check that the source exists
  1931. // The source to be bind mounted must exist.
  1932. for _, bind := range hostConfig.Binds {
  1933. splitBind := strings.Split(bind, ":")
  1934. source := splitBind[0]
  1935. // refuse to bind mount "/" to the container
  1936. if source == "/" {
  1937. return job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind)
  1938. }
  1939. // ensure the source exists on the host
  1940. _, err := os.Stat(source)
  1941. if err != nil && os.IsNotExist(err) {
  1942. err = os.MkdirAll(source, 0755)
  1943. if err != nil {
  1944. return job.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error())
  1945. }
  1946. }
  1947. }
  1948. // Register any links from the host config before starting the container
  1949. if err := srv.daemon.RegisterLinks(container, hostConfig); err != nil {
  1950. return job.Error(err)
  1951. }
  1952. container.SetHostConfig(hostConfig)
  1953. container.ToDisk()
  1954. }
  1955. if err := container.Start(); err != nil {
  1956. return job.Errorf("Cannot start container %s: %s", name, err)
  1957. }
  1958. srv.LogEvent("start", container.ID, daemon.Repositories().ImageName(container.Image))
  1959. return engine.StatusOK
  1960. }
  1961. func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
  1962. if len(job.Args) != 1 {
  1963. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  1964. }
  1965. var (
  1966. name = job.Args[0]
  1967. t = 10
  1968. )
  1969. if job.EnvExists("t") {
  1970. t = job.GetenvInt("t")
  1971. }
  1972. if container := srv.daemon.Get(name); container != nil {
  1973. if err := container.Stop(int(t)); err != nil {
  1974. return job.Errorf("Cannot stop container %s: %s\n", name, err)
  1975. }
  1976. srv.LogEvent("stop", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  1977. } else {
  1978. return job.Errorf("No such container: %s\n", name)
  1979. }
  1980. return engine.StatusOK
  1981. }
  1982. func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
  1983. if len(job.Args) != 1 {
  1984. return job.Errorf("Usage: %s", job.Name)
  1985. }
  1986. name := job.Args[0]
  1987. if container := srv.daemon.Get(name); container != nil {
  1988. status := container.Wait()
  1989. job.Printf("%d\n", status)
  1990. return engine.StatusOK
  1991. }
  1992. return job.Errorf("%s: no such container: %s", job.Name, name)
  1993. }
  1994. func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
  1995. if len(job.Args) != 3 {
  1996. return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
  1997. }
  1998. name := job.Args[0]
  1999. height, err := strconv.Atoi(job.Args[1])
  2000. if err != nil {
  2001. return job.Error(err)
  2002. }
  2003. width, err := strconv.Atoi(job.Args[2])
  2004. if err != nil {
  2005. return job.Error(err)
  2006. }
  2007. if container := srv.daemon.Get(name); container != nil {
  2008. if err := container.Resize(height, width); err != nil {
  2009. return job.Error(err)
  2010. }
  2011. return engine.StatusOK
  2012. }
  2013. return job.Errorf("No such container: %s", name)
  2014. }
  2015. func (srv *Server) ContainerLogs(job *engine.Job) engine.Status {
  2016. if len(job.Args) != 1 {
  2017. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  2018. }
  2019. var (
  2020. name = job.Args[0]
  2021. stdout = job.GetenvBool("stdout")
  2022. stderr = job.GetenvBool("stderr")
  2023. follow = job.GetenvBool("follow")
  2024. times = job.GetenvBool("timestamps")
  2025. format string
  2026. )
  2027. if !(stdout || stderr) {
  2028. return job.Errorf("You must choose at least one stream")
  2029. }
  2030. if times {
  2031. format = time.StampMilli
  2032. }
  2033. container := srv.daemon.Get(name)
  2034. if container == nil {
  2035. return job.Errorf("No such container: %s", name)
  2036. }
  2037. cLog, err := container.ReadLog("json")
  2038. if err != nil && os.IsNotExist(err) {
  2039. // Legacy logs
  2040. utils.Debugf("Old logs format")
  2041. if stdout {
  2042. cLog, err := container.ReadLog("stdout")
  2043. if err != nil {
  2044. utils.Errorf("Error reading logs (stdout): %s", err)
  2045. } else if _, err := io.Copy(job.Stdout, cLog); err != nil {
  2046. utils.Errorf("Error streaming logs (stdout): %s", err)
  2047. }
  2048. }
  2049. if stderr {
  2050. cLog, err := container.ReadLog("stderr")
  2051. if err != nil {
  2052. utils.Errorf("Error reading logs (stderr): %s", err)
  2053. } else if _, err := io.Copy(job.Stderr, cLog); err != nil {
  2054. utils.Errorf("Error streaming logs (stderr): %s", err)
  2055. }
  2056. }
  2057. } else if err != nil {
  2058. utils.Errorf("Error reading logs (json): %s", err)
  2059. } else {
  2060. dec := json.NewDecoder(cLog)
  2061. for {
  2062. l := &utils.JSONLog{}
  2063. if err := dec.Decode(l); err == io.EOF {
  2064. break
  2065. } else if err != nil {
  2066. utils.Errorf("Error streaming logs: %s", err)
  2067. break
  2068. }
  2069. logLine := l.Log
  2070. if times {
  2071. logLine = fmt.Sprintf("[%s] %s", l.Created.Format(format), logLine)
  2072. }
  2073. if l.Stream == "stdout" && stdout {
  2074. fmt.Fprintf(job.Stdout, "%s", logLine)
  2075. }
  2076. if l.Stream == "stderr" && stderr {
  2077. fmt.Fprintf(job.Stderr, "%s", logLine)
  2078. }
  2079. }
  2080. }
  2081. if follow {
  2082. errors := make(chan error, 2)
  2083. if stdout {
  2084. stdoutPipe := container.StdoutLogPipe()
  2085. go func() {
  2086. errors <- utils.WriteLog(stdoutPipe, job.Stdout, format)
  2087. }()
  2088. }
  2089. if stderr {
  2090. stderrPipe := container.StderrLogPipe()
  2091. go func() {
  2092. errors <- utils.WriteLog(stderrPipe, job.Stderr, format)
  2093. }()
  2094. }
  2095. err := <-errors
  2096. if err != nil {
  2097. utils.Errorf("%s", err)
  2098. }
  2099. }
  2100. return engine.StatusOK
  2101. }
  2102. func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
  2103. if len(job.Args) != 1 {
  2104. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  2105. }
  2106. var (
  2107. name = job.Args[0]
  2108. logs = job.GetenvBool("logs")
  2109. stream = job.GetenvBool("stream")
  2110. stdin = job.GetenvBool("stdin")
  2111. stdout = job.GetenvBool("stdout")
  2112. stderr = job.GetenvBool("stderr")
  2113. )
  2114. container := srv.daemon.Get(name)
  2115. if container == nil {
  2116. return job.Errorf("No such container: %s", name)
  2117. }
  2118. //logs
  2119. if logs {
  2120. cLog, err := container.ReadLog("json")
  2121. if err != nil && os.IsNotExist(err) {
  2122. // Legacy logs
  2123. utils.Debugf("Old logs format")
  2124. if stdout {
  2125. cLog, err := container.ReadLog("stdout")
  2126. if err != nil {
  2127. utils.Errorf("Error reading logs (stdout): %s", err)
  2128. } else if _, err := io.Copy(job.Stdout, cLog); err != nil {
  2129. utils.Errorf("Error streaming logs (stdout): %s", err)
  2130. }
  2131. }
  2132. if stderr {
  2133. cLog, err := container.ReadLog("stderr")
  2134. if err != nil {
  2135. utils.Errorf("Error reading logs (stderr): %s", err)
  2136. } else if _, err := io.Copy(job.Stderr, cLog); err != nil {
  2137. utils.Errorf("Error streaming logs (stderr): %s", err)
  2138. }
  2139. }
  2140. } else if err != nil {
  2141. utils.Errorf("Error reading logs (json): %s", err)
  2142. } else {
  2143. dec := json.NewDecoder(cLog)
  2144. for {
  2145. l := &utils.JSONLog{}
  2146. if err := dec.Decode(l); err == io.EOF {
  2147. break
  2148. } else if err != nil {
  2149. utils.Errorf("Error streaming logs: %s", err)
  2150. break
  2151. }
  2152. if l.Stream == "stdout" && stdout {
  2153. fmt.Fprintf(job.Stdout, "%s", l.Log)
  2154. }
  2155. if l.Stream == "stderr" && stderr {
  2156. fmt.Fprintf(job.Stderr, "%s", l.Log)
  2157. }
  2158. }
  2159. }
  2160. }
  2161. //stream
  2162. if stream {
  2163. var (
  2164. cStdin io.ReadCloser
  2165. cStdout, cStderr io.Writer
  2166. cStdinCloser io.Closer
  2167. )
  2168. if stdin {
  2169. r, w := io.Pipe()
  2170. go func() {
  2171. defer w.Close()
  2172. defer utils.Debugf("Closing buffered stdin pipe")
  2173. io.Copy(w, job.Stdin)
  2174. }()
  2175. cStdin = r
  2176. cStdinCloser = job.Stdin
  2177. }
  2178. if stdout {
  2179. cStdout = job.Stdout
  2180. }
  2181. if stderr {
  2182. cStderr = job.Stderr
  2183. }
  2184. <-srv.daemon.Attach(container, cStdin, cStdinCloser, cStdout, cStderr)
  2185. // If we are in stdinonce mode, wait for the process to end
  2186. // otherwise, simply return
  2187. if container.Config.StdinOnce && !container.Config.Tty {
  2188. container.Wait()
  2189. }
  2190. }
  2191. return engine.StatusOK
  2192. }
  2193. func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
  2194. if len(job.Args) != 2 {
  2195. return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
  2196. }
  2197. var (
  2198. name = job.Args[0]
  2199. resource = job.Args[1]
  2200. )
  2201. if container := srv.daemon.Get(name); container != nil {
  2202. data, err := container.Copy(resource)
  2203. if err != nil {
  2204. return job.Error(err)
  2205. }
  2206. defer data.Close()
  2207. if _, err := io.Copy(job.Stdout, data); err != nil {
  2208. return job.Error(err)
  2209. }
  2210. return engine.StatusOK
  2211. }
  2212. return job.Errorf("No such container: %s", name)
  2213. }
  2214. func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
  2215. daemon, err := daemon.NewDaemon(config, eng)
  2216. if err != nil {
  2217. return nil, err
  2218. }
  2219. srv := &Server{
  2220. Eng: eng,
  2221. daemon: daemon,
  2222. pullingPool: make(map[string]chan struct{}),
  2223. pushingPool: make(map[string]chan struct{}),
  2224. events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
  2225. listeners: make(map[int64]chan utils.JSONMessage),
  2226. }
  2227. daemon.SetServer(srv)
  2228. return srv, nil
  2229. }
  2230. func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
  2231. now := time.Now().UTC().Unix()
  2232. jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
  2233. srv.AddEvent(jm)
  2234. for _, c := range srv.listeners {
  2235. select { // non blocking channel
  2236. case c <- jm:
  2237. default:
  2238. }
  2239. }
  2240. return &jm
  2241. }
  2242. func (srv *Server) AddEvent(jm utils.JSONMessage) {
  2243. srv.Lock()
  2244. defer srv.Unlock()
  2245. srv.events = append(srv.events, jm)
  2246. }
  2247. func (srv *Server) GetEvents() []utils.JSONMessage {
  2248. srv.RLock()
  2249. defer srv.RUnlock()
  2250. return srv.events
  2251. }
  2252. func (srv *Server) SetRunning(status bool) {
  2253. srv.Lock()
  2254. defer srv.Unlock()
  2255. srv.running = status
  2256. }
  2257. func (srv *Server) IsRunning() bool {
  2258. srv.RLock()
  2259. defer srv.RUnlock()
  2260. return srv.running
  2261. }
  2262. func (srv *Server) Close() error {
  2263. if srv == nil {
  2264. return nil
  2265. }
  2266. srv.SetRunning(false)
  2267. done := make(chan struct{})
  2268. go func() {
  2269. srv.tasks.Wait()
  2270. close(done)
  2271. }()
  2272. select {
  2273. // Waiting server jobs for 15 seconds, shutdown immediately after that time
  2274. case <-time.After(time.Second * 15):
  2275. case <-done:
  2276. }
  2277. if srv.daemon == nil {
  2278. return nil
  2279. }
  2280. return srv.daemon.Close()
  2281. }
  2282. type Server struct {
  2283. sync.RWMutex
  2284. daemon *daemon.Daemon
  2285. pullingPool map[string]chan struct{}
  2286. pushingPool map[string]chan struct{}
  2287. events []utils.JSONMessage
  2288. listeners map[int64]chan utils.JSONMessage
  2289. Eng *engine.Engine
  2290. running bool
  2291. tasks sync.WaitGroup
  2292. }