server.go 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500
  1. // DEPRECATION NOTICE. PLEASE DO NOT ADD ANYTHING TO THIS FILE.
  2. //
  3. // server/server.go is deprecated. We are working on breaking it up into smaller, cleaner
  4. // pieces which will be easier to find and test. This will help make the code less
  5. // redundant and more readable.
  6. //
  7. // Contributors, please don't add anything to server/server.go, unless it has the explicit
  8. // goal of helping the deprecation effort.
  9. //
  10. // Maintainers, please refuse patches which add code to server/server.go.
  11. //
  12. // Instead try the following files:
  13. // * For code related to local image management, try graph/
  14. // * For code related to image downloading, uploading, remote search etc, try registry/
  15. // * For code related to the docker daemon, try daemon/
  16. // * For small utilities which could potentially be useful outside of Docker, try pkg/
  17. // * For miscalleneous "util" functions which are docker-specific, try encapsulating them
  18. // inside one of the subsystems above. If you really think they should be more widely
  19. // available, are you sure you can't remove the docker dependencies and move them to
  20. // pkg? In last resort, you can add them to utils/ (but please try not to).
  21. package server
  22. import (
  23. "encoding/json"
  24. "fmt"
  25. "io"
  26. "io/ioutil"
  27. "log"
  28. "net/http"
  29. "net/url"
  30. "os"
  31. "os/exec"
  32. gosignal "os/signal"
  33. "path"
  34. "path/filepath"
  35. "runtime"
  36. "strconv"
  37. "strings"
  38. "sync"
  39. "syscall"
  40. "time"
  41. "github.com/dotcloud/docker/archive"
  42. "github.com/dotcloud/docker/daemon"
  43. "github.com/dotcloud/docker/daemonconfig"
  44. "github.com/dotcloud/docker/dockerversion"
  45. "github.com/dotcloud/docker/engine"
  46. "github.com/dotcloud/docker/graph"
  47. "github.com/dotcloud/docker/image"
  48. "github.com/dotcloud/docker/pkg/graphdb"
  49. "github.com/dotcloud/docker/pkg/signal"
  50. "github.com/dotcloud/docker/registry"
  51. "github.com/dotcloud/docker/runconfig"
  52. "github.com/dotcloud/docker/utils"
  53. )
  54. // jobInitApi runs the remote api server `srv` as a daemon,
  55. // Only one api server can run at the same time - this is enforced by a pidfile.
  56. // The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
  57. func InitServer(job *engine.Job) engine.Status {
  58. job.Logf("Creating server")
  59. srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job))
  60. if err != nil {
  61. return job.Error(err)
  62. }
  63. if srv.daemon.Config().Pidfile != "" {
  64. job.Logf("Creating pidfile")
  65. if err := utils.CreatePidFile(srv.daemon.Config().Pidfile); err != nil {
  66. // FIXME: do we need fatal here instead of returning a job error?
  67. log.Fatal(err)
  68. }
  69. }
  70. job.Logf("Setting up signal traps")
  71. c := make(chan os.Signal, 1)
  72. gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
  73. go func() {
  74. interruptCount := 0
  75. for sig := range c {
  76. go func() {
  77. log.Printf("Received signal '%v', starting shutdown of docker...\n", sig)
  78. switch sig {
  79. case os.Interrupt, syscall.SIGTERM:
  80. // If the user really wants to interrupt, let him do so.
  81. if interruptCount < 3 {
  82. interruptCount++
  83. // Initiate the cleanup only once
  84. if interruptCount == 1 {
  85. utils.RemovePidFile(srv.daemon.Config().Pidfile)
  86. srv.Close()
  87. } else {
  88. return
  89. }
  90. } else {
  91. log.Printf("Force shutdown of docker, interrupting cleanup\n")
  92. }
  93. case syscall.SIGQUIT:
  94. }
  95. os.Exit(128 + int(sig.(syscall.Signal)))
  96. }()
  97. }
  98. }()
  99. job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
  100. job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
  101. // FIXME: 'insert' is deprecated and should be removed in a future version.
  102. for name, handler := range map[string]engine.Handler{
  103. "export": srv.ContainerExport,
  104. "create": srv.ContainerCreate,
  105. "stop": srv.ContainerStop,
  106. "restart": srv.ContainerRestart,
  107. "start": srv.ContainerStart,
  108. "kill": srv.ContainerKill,
  109. "wait": srv.ContainerWait,
  110. "tag": srv.ImageTag, // FIXME merge with "image_tag"
  111. "resize": srv.ContainerResize,
  112. "commit": srv.ContainerCommit,
  113. "info": srv.DockerInfo,
  114. "container_delete": srv.ContainerDestroy,
  115. "image_export": srv.ImageExport,
  116. "images": srv.Images,
  117. "history": srv.ImageHistory,
  118. "viz": srv.ImagesViz,
  119. "container_copy": srv.ContainerCopy,
  120. "insert": srv.ImageInsert,
  121. "attach": srv.ContainerAttach,
  122. "logs": srv.ContainerLogs,
  123. "changes": srv.ContainerChanges,
  124. "top": srv.ContainerTop,
  125. "load": srv.ImageLoad,
  126. "build": srv.Build,
  127. "pull": srv.ImagePull,
  128. "import": srv.ImageImport,
  129. "image_delete": srv.ImageDelete,
  130. "inspect": srv.JobInspect,
  131. "events": srv.Events,
  132. "push": srv.ImagePush,
  133. "containers": srv.Containers,
  134. } {
  135. if err := job.Eng.Register(name, handler); err != nil {
  136. return job.Error(err)
  137. }
  138. }
  139. // Install image-related commands from the image subsystem.
  140. // See `graph/service.go`
  141. if err := srv.daemon.Repositories().Install(job.Eng); err != nil {
  142. return job.Error(err)
  143. }
  144. return engine.StatusOK
  145. }
  146. // ContainerKill send signal to the container
  147. // If no signal is given (sig 0), then Kill with SIGKILL and wait
  148. // for the container to exit.
  149. // If a signal is given, then just send it to the container and return.
  150. func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
  151. if n := len(job.Args); n < 1 || n > 2 {
  152. return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
  153. }
  154. var (
  155. name = job.Args[0]
  156. sig uint64
  157. err error
  158. )
  159. // If we have a signal, look at it. Otherwise, do nothing
  160. if len(job.Args) == 2 && job.Args[1] != "" {
  161. // Check if we passed the signal as a number:
  162. // The largest legal signal is 31, so let's parse on 5 bits
  163. sig, err = strconv.ParseUint(job.Args[1], 10, 5)
  164. if err != nil {
  165. // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL")
  166. sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")])
  167. if sig == 0 {
  168. return job.Errorf("Invalid signal: %s", job.Args[1])
  169. }
  170. }
  171. }
  172. if container := srv.daemon.Get(name); container != nil {
  173. // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
  174. if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
  175. if err := container.Kill(); err != nil {
  176. return job.Errorf("Cannot kill container %s: %s", name, err)
  177. }
  178. srv.LogEvent("kill", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  179. } else {
  180. // Otherwise, just send the requested signal
  181. if err := container.KillSig(int(sig)); err != nil {
  182. return job.Errorf("Cannot kill container %s: %s", name, err)
  183. }
  184. // FIXME: Add event for signals
  185. }
  186. } else {
  187. return job.Errorf("No such container: %s", name)
  188. }
  189. return engine.StatusOK
  190. }
  191. func (srv *Server) EvictListener(from int64) {
  192. srv.Lock()
  193. if old, ok := srv.listeners[from]; ok {
  194. delete(srv.listeners, from)
  195. close(old)
  196. }
  197. srv.Unlock()
  198. }
  199. func (srv *Server) Events(job *engine.Job) engine.Status {
  200. if len(job.Args) != 0 {
  201. return job.Errorf("Usage: %s", job.Name)
  202. }
  203. var (
  204. from = time.Now().UTC().UnixNano()
  205. since = job.GetenvInt64("since")
  206. until = job.GetenvInt64("until")
  207. timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now()))
  208. )
  209. sendEvent := func(event *utils.JSONMessage) error {
  210. b, err := json.Marshal(event)
  211. if err != nil {
  212. return fmt.Errorf("JSON error")
  213. }
  214. _, err = job.Stdout.Write(b)
  215. return err
  216. }
  217. listener := make(chan utils.JSONMessage)
  218. srv.Lock()
  219. if old, ok := srv.listeners[from]; ok {
  220. delete(srv.listeners, from)
  221. close(old)
  222. }
  223. srv.listeners[from] = listener
  224. srv.Unlock()
  225. job.Stdout.Write(nil) // flush
  226. if since != 0 {
  227. // If since, send previous events that happened after the timestamp and until timestamp
  228. for _, event := range srv.GetEvents() {
  229. if event.Time >= since && (event.Time <= until || until == 0) {
  230. err := sendEvent(&event)
  231. if err != nil && err.Error() == "JSON error" {
  232. continue
  233. }
  234. if err != nil {
  235. // On error, evict the listener
  236. srv.EvictListener(from)
  237. return job.Error(err)
  238. }
  239. }
  240. }
  241. }
  242. // If no until, disable timeout
  243. if until == 0 {
  244. timeout.Stop()
  245. }
  246. for {
  247. select {
  248. case event, ok := <-listener:
  249. if !ok { // Channel is closed: listener was evicted
  250. return engine.StatusOK
  251. }
  252. err := sendEvent(&event)
  253. if err != nil && err.Error() == "JSON error" {
  254. continue
  255. }
  256. if err != nil {
  257. // On error, evict the listener
  258. srv.EvictListener(from)
  259. return job.Error(err)
  260. }
  261. case <-timeout.C:
  262. return engine.StatusOK
  263. }
  264. }
  265. return engine.StatusOK
  266. }
  267. func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
  268. if len(job.Args) != 1 {
  269. return job.Errorf("Usage: %s container_id", job.Name)
  270. }
  271. name := job.Args[0]
  272. if container := srv.daemon.Get(name); container != nil {
  273. data, err := container.Export()
  274. if err != nil {
  275. return job.Errorf("%s: %s", name, err)
  276. }
  277. defer data.Close()
  278. // Stream the entire contents of the container (basically a volatile snapshot)
  279. if _, err := io.Copy(job.Stdout, data); err != nil {
  280. return job.Errorf("%s: %s", name, err)
  281. }
  282. // FIXME: factor job-specific LogEvent to engine.Job.Run()
  283. srv.LogEvent("export", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  284. return engine.StatusOK
  285. }
  286. return job.Errorf("No such container: %s", name)
  287. }
  288. // ImageExport exports all images with the given tag. All versions
  289. // containing the same tag are exported. The resulting output is an
  290. // uncompressed tar ball.
  291. // name is the set of tags to export.
  292. // out is the writer where the images are written to.
  293. func (srv *Server) ImageExport(job *engine.Job) engine.Status {
  294. if len(job.Args) != 1 {
  295. return job.Errorf("Usage: %s IMAGE\n", job.Name)
  296. }
  297. name := job.Args[0]
  298. // get image json
  299. tempdir, err := ioutil.TempDir("", "docker-export-")
  300. if err != nil {
  301. return job.Error(err)
  302. }
  303. defer os.RemoveAll(tempdir)
  304. utils.Debugf("Serializing %s", name)
  305. rootRepo, err := srv.daemon.Repositories().Get(name)
  306. if err != nil {
  307. return job.Error(err)
  308. }
  309. if rootRepo != nil {
  310. for _, id := range rootRepo {
  311. image, err := srv.ImageInspect(id)
  312. if err != nil {
  313. return job.Error(err)
  314. }
  315. if err := srv.exportImage(image, tempdir); err != nil {
  316. return job.Error(err)
  317. }
  318. }
  319. // write repositories
  320. rootRepoMap := map[string]graph.Repository{}
  321. rootRepoMap[name] = rootRepo
  322. rootRepoJson, _ := json.Marshal(rootRepoMap)
  323. if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil {
  324. return job.Error(err)
  325. }
  326. } else {
  327. image, err := srv.ImageInspect(name)
  328. if err != nil {
  329. return job.Error(err)
  330. }
  331. if err := srv.exportImage(image, tempdir); err != nil {
  332. return job.Error(err)
  333. }
  334. }
  335. fs, err := archive.Tar(tempdir, archive.Uncompressed)
  336. if err != nil {
  337. return job.Error(err)
  338. }
  339. defer fs.Close()
  340. if _, err := io.Copy(job.Stdout, fs); err != nil {
  341. return job.Error(err)
  342. }
  343. return engine.StatusOK
  344. }
  345. func (srv *Server) exportImage(img *image.Image, tempdir string) error {
  346. for i := img; i != nil; {
  347. // temporary directory
  348. tmpImageDir := path.Join(tempdir, i.ID)
  349. if err := os.Mkdir(tmpImageDir, os.FileMode(0755)); err != nil {
  350. if os.IsExist(err) {
  351. return nil
  352. }
  353. return err
  354. }
  355. var version = "1.0"
  356. var versionBuf = []byte(version)
  357. if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.FileMode(0644)); err != nil {
  358. return err
  359. }
  360. // serialize json
  361. b, err := json.Marshal(i)
  362. if err != nil {
  363. return err
  364. }
  365. if err := ioutil.WriteFile(path.Join(tmpImageDir, "json"), b, os.FileMode(0644)); err != nil {
  366. return err
  367. }
  368. // serialize filesystem
  369. fs, err := i.TarLayer()
  370. if err != nil {
  371. return err
  372. }
  373. defer fs.Close()
  374. fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar"))
  375. if err != nil {
  376. return err
  377. }
  378. if written, err := io.Copy(fsTar, fs); err != nil {
  379. return err
  380. } else {
  381. utils.Debugf("rendered layer for %s of [%d] size", i.ID, written)
  382. }
  383. if err = fsTar.Close(); err != nil {
  384. return err
  385. }
  386. // find parent
  387. if i.Parent != "" {
  388. i, err = srv.ImageInspect(i.Parent)
  389. if err != nil {
  390. return err
  391. }
  392. } else {
  393. i = nil
  394. }
  395. }
  396. return nil
  397. }
  398. func (srv *Server) Build(job *engine.Job) engine.Status {
  399. if len(job.Args) != 0 {
  400. return job.Errorf("Usage: %s\n", job.Name)
  401. }
  402. var (
  403. remoteURL = job.Getenv("remote")
  404. repoName = job.Getenv("t")
  405. suppressOutput = job.GetenvBool("q")
  406. noCache = job.GetenvBool("nocache")
  407. rm = job.GetenvBool("rm")
  408. authConfig = &registry.AuthConfig{}
  409. configFile = &registry.ConfigFile{}
  410. tag string
  411. context io.ReadCloser
  412. )
  413. job.GetenvJson("authConfig", authConfig)
  414. job.GetenvJson("configFile", configFile)
  415. repoName, tag = utils.ParseRepositoryTag(repoName)
  416. if remoteURL == "" {
  417. context = ioutil.NopCloser(job.Stdin)
  418. } else if utils.IsGIT(remoteURL) {
  419. if !strings.HasPrefix(remoteURL, "git://") {
  420. remoteURL = "https://" + remoteURL
  421. }
  422. root, err := ioutil.TempDir("", "docker-build-git")
  423. if err != nil {
  424. return job.Error(err)
  425. }
  426. defer os.RemoveAll(root)
  427. if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
  428. return job.Errorf("Error trying to use git: %s (%s)", err, output)
  429. }
  430. c, err := archive.Tar(root, archive.Uncompressed)
  431. if err != nil {
  432. return job.Error(err)
  433. }
  434. context = c
  435. } else if utils.IsURL(remoteURL) {
  436. f, err := utils.Download(remoteURL)
  437. if err != nil {
  438. return job.Error(err)
  439. }
  440. defer f.Body.Close()
  441. dockerFile, err := ioutil.ReadAll(f.Body)
  442. if err != nil {
  443. return job.Error(err)
  444. }
  445. c, err := archive.Generate("Dockerfile", string(dockerFile))
  446. if err != nil {
  447. return job.Error(err)
  448. }
  449. context = c
  450. }
  451. defer context.Close()
  452. sf := utils.NewStreamFormatter(job.GetenvBool("json"))
  453. b := NewBuildFile(srv,
  454. &utils.StdoutFormater{
  455. Writer: job.Stdout,
  456. StreamFormatter: sf,
  457. },
  458. &utils.StderrFormater{
  459. Writer: job.Stdout,
  460. StreamFormatter: sf,
  461. },
  462. !suppressOutput, !noCache, rm, job.Stdout, sf, authConfig, configFile)
  463. id, err := b.Build(context)
  464. if err != nil {
  465. return job.Error(err)
  466. }
  467. if repoName != "" {
  468. srv.daemon.Repositories().Set(repoName, tag, id, false)
  469. }
  470. return engine.StatusOK
  471. }
  472. // Loads a set of images into the repository. This is the complementary of ImageExport.
  473. // The input stream is an uncompressed tar ball containing images and metadata.
  474. func (srv *Server) ImageLoad(job *engine.Job) engine.Status {
  475. tmpImageDir, err := ioutil.TempDir("", "docker-import-")
  476. if err != nil {
  477. return job.Error(err)
  478. }
  479. defer os.RemoveAll(tmpImageDir)
  480. var (
  481. repoTarFile = path.Join(tmpImageDir, "repo.tar")
  482. repoDir = path.Join(tmpImageDir, "repo")
  483. )
  484. tarFile, err := os.Create(repoTarFile)
  485. if err != nil {
  486. return job.Error(err)
  487. }
  488. if _, err := io.Copy(tarFile, job.Stdin); err != nil {
  489. return job.Error(err)
  490. }
  491. tarFile.Close()
  492. repoFile, err := os.Open(repoTarFile)
  493. if err != nil {
  494. return job.Error(err)
  495. }
  496. if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
  497. return job.Error(err)
  498. }
  499. if err := archive.Untar(repoFile, repoDir, nil); err != nil {
  500. return job.Error(err)
  501. }
  502. dirs, err := ioutil.ReadDir(repoDir)
  503. if err != nil {
  504. return job.Error(err)
  505. }
  506. for _, d := range dirs {
  507. if d.IsDir() {
  508. if err := srv.recursiveLoad(d.Name(), tmpImageDir); err != nil {
  509. return job.Error(err)
  510. }
  511. }
  512. }
  513. repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories"))
  514. if err == nil {
  515. repositories := map[string]graph.Repository{}
  516. if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
  517. return job.Error(err)
  518. }
  519. for imageName, tagMap := range repositories {
  520. for tag, address := range tagMap {
  521. if err := srv.daemon.Repositories().Set(imageName, tag, address, true); err != nil {
  522. return job.Error(err)
  523. }
  524. }
  525. }
  526. } else if !os.IsNotExist(err) {
  527. return job.Error(err)
  528. }
  529. return engine.StatusOK
  530. }
  531. func (srv *Server) recursiveLoad(address, tmpImageDir string) error {
  532. if _, err := srv.ImageInspect(address); err != nil {
  533. utils.Debugf("Loading %s", address)
  534. imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
  535. if err != nil {
  536. utils.Debugf("Error reading json", err)
  537. return err
  538. }
  539. layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
  540. if err != nil {
  541. utils.Debugf("Error reading embedded tar", err)
  542. return err
  543. }
  544. img, err := image.NewImgJSON(imageJson)
  545. if err != nil {
  546. utils.Debugf("Error unmarshalling json", err)
  547. return err
  548. }
  549. if img.Parent != "" {
  550. if !srv.daemon.Graph().Exists(img.Parent) {
  551. if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil {
  552. return err
  553. }
  554. }
  555. }
  556. if err := srv.daemon.Graph().Register(imageJson, layer, img); err != nil {
  557. return err
  558. }
  559. }
  560. utils.Debugf("Completed processing %s", address)
  561. return nil
  562. }
  563. // FIXME: 'insert' is deprecated and should be removed in a future version.
  564. func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
  565. fmt.Fprintf(job.Stderr, "Warning: '%s' is deprecated and will be removed in a future version. Please use 'build' and 'ADD' instead.\n", job.Name)
  566. if len(job.Args) != 3 {
  567. return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name)
  568. }
  569. var (
  570. name = job.Args[0]
  571. url = job.Args[1]
  572. path = job.Args[2]
  573. )
  574. sf := utils.NewStreamFormatter(job.GetenvBool("json"))
  575. out := utils.NewWriteFlusher(job.Stdout)
  576. img, err := srv.daemon.Repositories().LookupImage(name)
  577. if err != nil {
  578. return job.Error(err)
  579. }
  580. file, err := utils.Download(url)
  581. if err != nil {
  582. return job.Error(err)
  583. }
  584. defer file.Body.Close()
  585. config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.daemon.SystemConfig())
  586. if err != nil {
  587. return job.Error(err)
  588. }
  589. c, _, err := srv.daemon.Create(config, "")
  590. if err != nil {
  591. return job.Error(err)
  592. }
  593. if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, utils.TruncateID(img.ID), "Downloading"), path); err != nil {
  594. return job.Error(err)
  595. }
  596. // FIXME: Handle custom repo, tag comment, author
  597. img, err = srv.daemon.Commit(c, "", "", img.Comment, img.Author, nil)
  598. if err != nil {
  599. out.Write(sf.FormatError(err))
  600. return engine.StatusErr
  601. }
  602. out.Write(sf.FormatStatus("", img.ID))
  603. return engine.StatusOK
  604. }
  605. func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
  606. images, _ := srv.daemon.Graph().Map()
  607. if images == nil {
  608. return engine.StatusOK
  609. }
  610. job.Stdout.Write([]byte("digraph docker {\n"))
  611. var (
  612. parentImage *image.Image
  613. err error
  614. )
  615. for _, image := range images {
  616. parentImage, err = image.GetParent()
  617. if err != nil {
  618. return job.Errorf("Error while getting parent image: %v", err)
  619. }
  620. if parentImage != nil {
  621. job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
  622. } else {
  623. job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n"))
  624. }
  625. }
  626. reporefs := make(map[string][]string)
  627. for name, repository := range srv.daemon.Repositories().Repositories {
  628. for tag, id := range repository {
  629. reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag))
  630. }
  631. }
  632. for id, repos := range reporefs {
  633. job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n"))
  634. }
  635. job.Stdout.Write([]byte(" base [style=invisible]\n}\n"))
  636. return engine.StatusOK
  637. }
  638. func (srv *Server) Images(job *engine.Job) engine.Status {
  639. var (
  640. allImages map[string]*image.Image
  641. err error
  642. )
  643. if job.GetenvBool("all") {
  644. allImages, err = srv.daemon.Graph().Map()
  645. } else {
  646. allImages, err = srv.daemon.Graph().Heads()
  647. }
  648. if err != nil {
  649. return job.Error(err)
  650. }
  651. lookup := make(map[string]*engine.Env)
  652. for name, repository := range srv.daemon.Repositories().Repositories {
  653. if job.Getenv("filter") != "" {
  654. if match, _ := path.Match(job.Getenv("filter"), name); !match {
  655. continue
  656. }
  657. }
  658. for tag, id := range repository {
  659. image, err := srv.daemon.Graph().Get(id)
  660. if err != nil {
  661. log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
  662. continue
  663. }
  664. if out, exists := lookup[id]; exists {
  665. out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag)))
  666. } else {
  667. out := &engine.Env{}
  668. delete(allImages, id)
  669. out.Set("ParentId", image.Parent)
  670. out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)})
  671. out.Set("Id", image.ID)
  672. out.SetInt64("Created", image.Created.Unix())
  673. out.SetInt64("Size", image.Size)
  674. out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
  675. lookup[id] = out
  676. }
  677. }
  678. }
  679. outs := engine.NewTable("Created", len(lookup))
  680. for _, value := range lookup {
  681. outs.Add(value)
  682. }
  683. // Display images which aren't part of a repository/tag
  684. if job.Getenv("filter") == "" {
  685. for _, image := range allImages {
  686. out := &engine.Env{}
  687. out.Set("ParentId", image.Parent)
  688. out.SetList("RepoTags", []string{"<none>:<none>"})
  689. out.Set("Id", image.ID)
  690. out.SetInt64("Created", image.Created.Unix())
  691. out.SetInt64("Size", image.Size)
  692. out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
  693. outs.Add(out)
  694. }
  695. }
  696. outs.ReverseSort()
  697. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  698. return job.Error(err)
  699. }
  700. return engine.StatusOK
  701. }
  702. func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
  703. images, _ := srv.daemon.Graph().Map()
  704. var imgcount int
  705. if images == nil {
  706. imgcount = 0
  707. } else {
  708. imgcount = len(images)
  709. }
  710. kernelVersion := "<unknown>"
  711. if kv, err := utils.GetKernelVersion(); err == nil {
  712. kernelVersion = kv.String()
  713. }
  714. // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
  715. initPath := utils.DockerInitPath("")
  716. if initPath == "" {
  717. // if that fails, we'll just return the path from the daemon
  718. initPath = srv.daemon.SystemInitPath()
  719. }
  720. v := &engine.Env{}
  721. v.SetInt("Containers", len(srv.daemon.List()))
  722. v.SetInt("Images", imgcount)
  723. v.Set("Driver", srv.daemon.GraphDriver().String())
  724. v.SetJson("DriverStatus", srv.daemon.GraphDriver().Status())
  725. v.SetBool("MemoryLimit", srv.daemon.SystemConfig().MemoryLimit)
  726. v.SetBool("SwapLimit", srv.daemon.SystemConfig().SwapLimit)
  727. v.SetBool("IPv4Forwarding", !srv.daemon.SystemConfig().IPv4ForwardingDisabled)
  728. v.SetBool("Debug", os.Getenv("DEBUG") != "")
  729. v.SetInt("NFd", utils.GetTotalUsedFds())
  730. v.SetInt("NGoroutines", runtime.NumGoroutine())
  731. v.Set("ExecutionDriver", srv.daemon.ExecutionDriver().Name())
  732. v.SetInt("NEventsListener", len(srv.listeners))
  733. v.Set("KernelVersion", kernelVersion)
  734. v.Set("IndexServerAddress", registry.IndexServerAddress())
  735. v.Set("InitSha1", dockerversion.INITSHA1)
  736. v.Set("InitPath", initPath)
  737. if _, err := v.WriteTo(job.Stdout); err != nil {
  738. return job.Error(err)
  739. }
  740. return engine.StatusOK
  741. }
  742. func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
  743. if n := len(job.Args); n != 1 {
  744. return job.Errorf("Usage: %s IMAGE", job.Name)
  745. }
  746. name := job.Args[0]
  747. foundImage, err := srv.daemon.Repositories().LookupImage(name)
  748. if err != nil {
  749. return job.Error(err)
  750. }
  751. lookupMap := make(map[string][]string)
  752. for name, repository := range srv.daemon.Repositories().Repositories {
  753. for tag, id := range repository {
  754. // If the ID already has a reverse lookup, do not update it unless for "latest"
  755. if _, exists := lookupMap[id]; !exists {
  756. lookupMap[id] = []string{}
  757. }
  758. lookupMap[id] = append(lookupMap[id], name+":"+tag)
  759. }
  760. }
  761. outs := engine.NewTable("Created", 0)
  762. err = foundImage.WalkHistory(func(img *image.Image) error {
  763. out := &engine.Env{}
  764. out.Set("Id", img.ID)
  765. out.SetInt64("Created", img.Created.Unix())
  766. out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " "))
  767. out.SetList("Tags", lookupMap[img.ID])
  768. out.SetInt64("Size", img.Size)
  769. outs.Add(out)
  770. return nil
  771. })
  772. outs.ReverseSort()
  773. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  774. return job.Error(err)
  775. }
  776. return engine.StatusOK
  777. }
  778. func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
  779. if len(job.Args) != 1 && len(job.Args) != 2 {
  780. return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
  781. }
  782. var (
  783. name = job.Args[0]
  784. psArgs = "-ef"
  785. )
  786. if len(job.Args) == 2 && job.Args[1] != "" {
  787. psArgs = job.Args[1]
  788. }
  789. if container := srv.daemon.Get(name); container != nil {
  790. if !container.State.IsRunning() {
  791. return job.Errorf("Container %s is not running", name)
  792. }
  793. pids, err := srv.daemon.ExecutionDriver().GetPidsForContainer(container.ID)
  794. if err != nil {
  795. return job.Error(err)
  796. }
  797. output, err := exec.Command("ps", psArgs).Output()
  798. if err != nil {
  799. return job.Errorf("Error running ps: %s", err)
  800. }
  801. lines := strings.Split(string(output), "\n")
  802. header := strings.Fields(lines[0])
  803. out := &engine.Env{}
  804. out.SetList("Titles", header)
  805. pidIndex := -1
  806. for i, name := range header {
  807. if name == "PID" {
  808. pidIndex = i
  809. }
  810. }
  811. if pidIndex == -1 {
  812. return job.Errorf("Couldn't find PID field in ps output")
  813. }
  814. processes := [][]string{}
  815. for _, line := range lines[1:] {
  816. if len(line) == 0 {
  817. continue
  818. }
  819. fields := strings.Fields(line)
  820. p, err := strconv.Atoi(fields[pidIndex])
  821. if err != nil {
  822. return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
  823. }
  824. for _, pid := range pids {
  825. if pid == p {
  826. // Make sure number of fields equals number of header titles
  827. // merging "overhanging" fields
  828. process := fields[:len(header)-1]
  829. process = append(process, strings.Join(fields[len(header)-1:], " "))
  830. processes = append(processes, process)
  831. }
  832. }
  833. }
  834. out.SetJson("Processes", processes)
  835. out.WriteTo(job.Stdout)
  836. return engine.StatusOK
  837. }
  838. return job.Errorf("No such container: %s", name)
  839. }
  840. func (srv *Server) ContainerChanges(job *engine.Job) engine.Status {
  841. if n := len(job.Args); n != 1 {
  842. return job.Errorf("Usage: %s CONTAINER", job.Name)
  843. }
  844. name := job.Args[0]
  845. if container := srv.daemon.Get(name); container != nil {
  846. outs := engine.NewTable("", 0)
  847. changes, err := container.Changes()
  848. if err != nil {
  849. return job.Error(err)
  850. }
  851. for _, change := range changes {
  852. out := &engine.Env{}
  853. if err := out.Import(change); err != nil {
  854. return job.Error(err)
  855. }
  856. outs.Add(out)
  857. }
  858. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  859. return job.Error(err)
  860. }
  861. } else {
  862. return job.Errorf("No such container: %s", name)
  863. }
  864. return engine.StatusOK
  865. }
  866. func (srv *Server) Containers(job *engine.Job) engine.Status {
  867. var (
  868. foundBefore bool
  869. displayed int
  870. all = job.GetenvBool("all")
  871. since = job.Getenv("since")
  872. before = job.Getenv("before")
  873. n = job.GetenvInt("limit")
  874. size = job.GetenvBool("size")
  875. )
  876. outs := engine.NewTable("Created", 0)
  877. names := map[string][]string{}
  878. srv.daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
  879. names[e.ID()] = append(names[e.ID()], p)
  880. return nil
  881. }, -1)
  882. var beforeCont, sinceCont *daemon.Container
  883. if before != "" {
  884. beforeCont = srv.daemon.Get(before)
  885. if beforeCont == nil {
  886. return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
  887. }
  888. }
  889. if since != "" {
  890. sinceCont = srv.daemon.Get(since)
  891. if sinceCont == nil {
  892. return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
  893. }
  894. }
  895. for _, container := range srv.daemon.List() {
  896. if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
  897. continue
  898. }
  899. if before != "" && !foundBefore {
  900. if container.ID == beforeCont.ID {
  901. foundBefore = true
  902. }
  903. continue
  904. }
  905. if n > 0 && displayed == n {
  906. break
  907. }
  908. if since != "" {
  909. if container.ID == sinceCont.ID {
  910. break
  911. }
  912. }
  913. displayed++
  914. out := &engine.Env{}
  915. out.Set("Id", container.ID)
  916. out.SetList("Names", names[container.ID])
  917. out.Set("Image", srv.daemon.Repositories().ImageName(container.Image))
  918. if len(container.Args) > 0 {
  919. args := []string{}
  920. for _, arg := range container.Args {
  921. if strings.Contains(arg, " ") {
  922. args = append(args, fmt.Sprintf("'%s'", arg))
  923. } else {
  924. args = append(args, arg)
  925. }
  926. }
  927. argsAsString := strings.Join(args, " ")
  928. out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, argsAsString))
  929. } else {
  930. out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
  931. }
  932. out.SetInt64("Created", container.Created.Unix())
  933. out.Set("Status", container.State.String())
  934. str, err := container.NetworkSettings.PortMappingAPI().ToListString()
  935. if err != nil {
  936. return job.Error(err)
  937. }
  938. out.Set("Ports", str)
  939. if size {
  940. sizeRw, sizeRootFs := container.GetSize()
  941. out.SetInt64("SizeRw", sizeRw)
  942. out.SetInt64("SizeRootFs", sizeRootFs)
  943. }
  944. outs.Add(out)
  945. }
  946. outs.ReverseSort()
  947. if _, err := outs.WriteListTo(job.Stdout); err != nil {
  948. return job.Error(err)
  949. }
  950. return engine.StatusOK
  951. }
  952. func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
  953. if len(job.Args) != 1 {
  954. return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
  955. }
  956. name := job.Args[0]
  957. container := srv.daemon.Get(name)
  958. if container == nil {
  959. return job.Errorf("No such container: %s", name)
  960. }
  961. var config = container.Config
  962. var newConfig runconfig.Config
  963. if err := job.GetenvJson("config", &newConfig); err != nil {
  964. return job.Error(err)
  965. }
  966. if err := runconfig.Merge(&newConfig, config); err != nil {
  967. return job.Error(err)
  968. }
  969. img, err := srv.daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig)
  970. if err != nil {
  971. return job.Error(err)
  972. }
  973. job.Printf("%s\n", img.ID)
  974. return engine.StatusOK
  975. }
  976. func (srv *Server) ImageTag(job *engine.Job) engine.Status {
  977. if len(job.Args) != 2 && len(job.Args) != 3 {
  978. return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
  979. }
  980. var tag string
  981. if len(job.Args) == 3 {
  982. tag = job.Args[2]
  983. }
  984. if err := srv.daemon.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
  985. return job.Error(err)
  986. }
  987. return engine.StatusOK
  988. }
  989. func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error {
  990. history, err := r.GetRemoteHistory(imgID, endpoint, token)
  991. if err != nil {
  992. return err
  993. }
  994. out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil))
  995. // FIXME: Try to stream the images?
  996. // FIXME: Launch the getRemoteImage() in goroutines
  997. for i := len(history) - 1; i >= 0; i-- {
  998. id := history[i]
  999. // ensure no two downloads of the same layer happen at the same time
  1000. if c, err := srv.poolAdd("pull", "layer:"+id); err != nil {
  1001. utils.Errorf("Image (id: %s) pull is already running, skipping: %v", id, err)
  1002. <-c
  1003. }
  1004. defer srv.poolRemove("pull", "layer:"+id)
  1005. if !srv.daemon.Graph().Exists(id) {
  1006. out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
  1007. var (
  1008. imgJSON []byte
  1009. imgSize int
  1010. err error
  1011. img *image.Image
  1012. )
  1013. retries := 5
  1014. for j := 1; j <= retries; j++ {
  1015. imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
  1016. if err != nil && j == retries {
  1017. out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
  1018. return err
  1019. } else if err != nil {
  1020. time.Sleep(time.Duration(j) * 500 * time.Millisecond)
  1021. continue
  1022. }
  1023. img, err = image.NewImgJSON(imgJSON)
  1024. if err != nil && j == retries {
  1025. out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
  1026. return fmt.Errorf("Failed to parse json: %s", err)
  1027. } else if err != nil {
  1028. time.Sleep(time.Duration(j) * 500 * time.Millisecond)
  1029. continue
  1030. } else {
  1031. break
  1032. }
  1033. }
  1034. // Get the layer
  1035. out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling fs layer", nil))
  1036. layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token)
  1037. if err != nil {
  1038. out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
  1039. return err
  1040. }
  1041. defer layer.Close()
  1042. if err := srv.daemon.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
  1043. out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
  1044. return err
  1045. }
  1046. }
  1047. out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil))
  1048. }
  1049. return nil
  1050. }
  1051. func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error {
  1052. out.Write(sf.FormatStatus("", "Pulling repository %s", localName))
  1053. repoData, err := r.GetRepositoryData(remoteName)
  1054. if err != nil {
  1055. return err
  1056. }
  1057. utils.Debugf("Retrieving the tag list")
  1058. tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens)
  1059. if err != nil {
  1060. utils.Errorf("%v", err)
  1061. return err
  1062. }
  1063. for tag, id := range tagsList {
  1064. repoData.ImgList[id] = &registry.ImgData{
  1065. ID: id,
  1066. Tag: tag,
  1067. Checksum: "",
  1068. }
  1069. }
  1070. utils.Debugf("Registering tags")
  1071. // If no tag has been specified, pull them all
  1072. if askedTag == "" {
  1073. for tag, id := range tagsList {
  1074. repoData.ImgList[id].Tag = tag
  1075. }
  1076. } else {
  1077. // Otherwise, check that the tag exists and use only that one
  1078. id, exists := tagsList[askedTag]
  1079. if !exists {
  1080. return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName)
  1081. }
  1082. repoData.ImgList[id].Tag = askedTag
  1083. }
  1084. errors := make(chan error)
  1085. for _, image := range repoData.ImgList {
  1086. downloadImage := func(img *registry.ImgData) {
  1087. if askedTag != "" && img.Tag != askedTag {
  1088. utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
  1089. if parallel {
  1090. errors <- nil
  1091. }
  1092. return
  1093. }
  1094. if img.Tag == "" {
  1095. utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
  1096. if parallel {
  1097. errors <- nil
  1098. }
  1099. return
  1100. }
  1101. // ensure no two downloads of the same image happen at the same time
  1102. if c, err := srv.poolAdd("pull", "img:"+img.ID); err != nil {
  1103. if c != nil {
  1104. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
  1105. <-c
  1106. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
  1107. } else {
  1108. utils.Errorf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
  1109. }
  1110. if parallel {
  1111. errors <- nil
  1112. }
  1113. return
  1114. }
  1115. defer srv.poolRemove("pull", "img:"+img.ID)
  1116. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil))
  1117. success := false
  1118. var lastErr error
  1119. for _, ep := range repoData.Endpoints {
  1120. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil))
  1121. if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
  1122. // It's not ideal that only the last error is returned, it would be better to concatenate the errors.
  1123. // As the error is also given to the output stream the user will see the error.
  1124. lastErr = err
  1125. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil))
  1126. continue
  1127. }
  1128. success = true
  1129. break
  1130. }
  1131. if !success {
  1132. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, %s", img.Tag, localName, lastErr), nil))
  1133. if parallel {
  1134. errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
  1135. return
  1136. }
  1137. }
  1138. out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
  1139. if parallel {
  1140. errors <- nil
  1141. }
  1142. }
  1143. if parallel {
  1144. go downloadImage(image)
  1145. } else {
  1146. downloadImage(image)
  1147. }
  1148. }
  1149. if parallel {
  1150. var lastError error
  1151. for i := 0; i < len(repoData.ImgList); i++ {
  1152. if err := <-errors; err != nil {
  1153. lastError = err
  1154. }
  1155. }
  1156. if lastError != nil {
  1157. return lastError
  1158. }
  1159. }
  1160. for tag, id := range tagsList {
  1161. if askedTag != "" && tag != askedTag {
  1162. continue
  1163. }
  1164. if err := srv.daemon.Repositories().Set(localName, tag, id, true); err != nil {
  1165. return err
  1166. }
  1167. }
  1168. if err := srv.daemon.Repositories().Save(); err != nil {
  1169. return err
  1170. }
  1171. return nil
  1172. }
  1173. func (srv *Server) poolAdd(kind, key string) (chan struct{}, error) {
  1174. srv.Lock()
  1175. defer srv.Unlock()
  1176. if c, exists := srv.pullingPool[key]; exists {
  1177. return c, fmt.Errorf("pull %s is already in progress", key)
  1178. }
  1179. if c, exists := srv.pushingPool[key]; exists {
  1180. return c, fmt.Errorf("push %s is already in progress", key)
  1181. }
  1182. c := make(chan struct{})
  1183. switch kind {
  1184. case "pull":
  1185. srv.pullingPool[key] = c
  1186. case "push":
  1187. srv.pushingPool[key] = c
  1188. default:
  1189. return nil, fmt.Errorf("Unknown pool type")
  1190. }
  1191. return c, nil
  1192. }
  1193. func (srv *Server) poolRemove(kind, key string) error {
  1194. srv.Lock()
  1195. defer srv.Unlock()
  1196. switch kind {
  1197. case "pull":
  1198. if c, exists := srv.pullingPool[key]; exists {
  1199. close(c)
  1200. delete(srv.pullingPool, key)
  1201. }
  1202. case "push":
  1203. if c, exists := srv.pushingPool[key]; exists {
  1204. close(c)
  1205. delete(srv.pushingPool, key)
  1206. }
  1207. default:
  1208. return fmt.Errorf("Unknown pool type")
  1209. }
  1210. return nil
  1211. }
  1212. func (srv *Server) ImagePull(job *engine.Job) engine.Status {
  1213. if n := len(job.Args); n != 1 && n != 2 {
  1214. return job.Errorf("Usage: %s IMAGE [TAG]", job.Name)
  1215. }
  1216. var (
  1217. localName = job.Args[0]
  1218. tag string
  1219. sf = utils.NewStreamFormatter(job.GetenvBool("json"))
  1220. authConfig = &registry.AuthConfig{}
  1221. metaHeaders map[string][]string
  1222. )
  1223. if len(job.Args) > 1 {
  1224. tag = job.Args[1]
  1225. }
  1226. job.GetenvJson("authConfig", authConfig)
  1227. job.GetenvJson("metaHeaders", metaHeaders)
  1228. c, err := srv.poolAdd("pull", localName+":"+tag)
  1229. if err != nil {
  1230. if c != nil {
  1231. // Another pull of the same repository is already taking place; just wait for it to finish
  1232. job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
  1233. <-c
  1234. return engine.StatusOK
  1235. }
  1236. return job.Error(err)
  1237. }
  1238. defer srv.poolRemove("pull", localName+":"+tag)
  1239. // Resolve the Repository name from fqn to endpoint + name
  1240. hostname, remoteName, err := registry.ResolveRepositoryName(localName)
  1241. if err != nil {
  1242. return job.Error(err)
  1243. }
  1244. endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
  1245. if err != nil {
  1246. return job.Error(err)
  1247. }
  1248. r, err := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint)
  1249. if err != nil {
  1250. return job.Error(err)
  1251. }
  1252. if endpoint == registry.IndexServerAddress() {
  1253. // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
  1254. localName = remoteName
  1255. }
  1256. if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil {
  1257. return job.Error(err)
  1258. }
  1259. return engine.StatusOK
  1260. }
  1261. // Retrieve the all the images to be uploaded in the correct order
  1262. func (srv *Server) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) {
  1263. var (
  1264. imageList []string
  1265. imagesSeen map[string]bool = make(map[string]bool)
  1266. tagsByImage map[string][]string = make(map[string][]string)
  1267. )
  1268. for tag, id := range localRepo {
  1269. if requestedTag != "" && requestedTag != tag {
  1270. continue
  1271. }
  1272. var imageListForThisTag []string
  1273. tagsByImage[id] = append(tagsByImage[id], tag)
  1274. for img, err := srv.daemon.Graph().Get(id); img != nil; img, err = img.GetParent() {
  1275. if err != nil {
  1276. return nil, nil, err
  1277. }
  1278. if imagesSeen[img.ID] {
  1279. // This image is already on the list, we can ignore it and all its parents
  1280. break
  1281. }
  1282. imagesSeen[img.ID] = true
  1283. imageListForThisTag = append(imageListForThisTag, img.ID)
  1284. }
  1285. // reverse the image list for this tag (so the "most"-parent image is first)
  1286. for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 {
  1287. imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i]
  1288. }
  1289. // append to main image list
  1290. imageList = append(imageList, imageListForThisTag...)
  1291. }
  1292. if len(imageList) == 0 {
  1293. return nil, nil, fmt.Errorf("No images found for the requested repository / tag")
  1294. }
  1295. utils.Debugf("Image list: %v", imageList)
  1296. utils.Debugf("Tags by image: %v", tagsByImage)
  1297. return imageList, tagsByImage, nil
  1298. }
  1299. func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error {
  1300. out = utils.NewWriteFlusher(out)
  1301. utils.Debugf("Local repo: %s", localRepo)
  1302. imgList, tagsByImage, err := srv.getImageList(localRepo, tag)
  1303. if err != nil {
  1304. return err
  1305. }
  1306. out.Write(sf.FormatStatus("", "Sending image list"))
  1307. var (
  1308. repoData *registry.RepositoryData
  1309. imageIndex []*registry.ImgData
  1310. )
  1311. for _, imgId := range imgList {
  1312. if tags, exists := tagsByImage[imgId]; exists {
  1313. // If an image has tags you must add an entry in the image index
  1314. // for each tag
  1315. for _, tag := range tags {
  1316. imageIndex = append(imageIndex, &registry.ImgData{
  1317. ID: imgId,
  1318. Tag: tag,
  1319. })
  1320. }
  1321. } else {
  1322. // If the image does not have a tag it still needs to be sent to the
  1323. // registry with an empty tag so that it is accociated with the repository
  1324. imageIndex = append(imageIndex, &registry.ImgData{
  1325. ID: imgId,
  1326. Tag: "",
  1327. })
  1328. }
  1329. }
  1330. utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo)
  1331. for _, data := range imageIndex {
  1332. utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag)
  1333. }
  1334. // Register all the images in a repository with the registry
  1335. // If an image is not in this list it will not be associated with the repository
  1336. repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil)
  1337. if err != nil {
  1338. return err
  1339. }
  1340. nTag := 1
  1341. if tag == "" {
  1342. nTag = len(localRepo)
  1343. }
  1344. for _, ep := range repoData.Endpoints {
  1345. out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag))
  1346. for _, imgId := range imgList {
  1347. if r.LookupRemoteImage(imgId, ep, repoData.Tokens) {
  1348. out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId)))
  1349. } else {
  1350. if _, err := srv.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil {
  1351. // FIXME: Continue on error?
  1352. return err
  1353. }
  1354. }
  1355. for _, tag := range tagsByImage[imgId] {
  1356. out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag))
  1357. if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil {
  1358. return err
  1359. }
  1360. }
  1361. }
  1362. }
  1363. if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil {
  1364. return err
  1365. }
  1366. return nil
  1367. }
  1368. func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
  1369. out = utils.NewWriteFlusher(out)
  1370. jsonRaw, err := ioutil.ReadFile(path.Join(srv.daemon.Graph().Root, imgID, "json"))
  1371. if err != nil {
  1372. return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
  1373. }
  1374. out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil))
  1375. imgData := &registry.ImgData{
  1376. ID: imgID,
  1377. }
  1378. // Send the json
  1379. if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
  1380. if err == registry.ErrAlreadyExists {
  1381. out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
  1382. return "", nil
  1383. }
  1384. return "", err
  1385. }
  1386. layerData, err := srv.daemon.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out)
  1387. if err != nil {
  1388. return "", fmt.Errorf("Failed to generate layer archive: %s", err)
  1389. }
  1390. defer os.RemoveAll(layerData.Name())
  1391. // Send the layer
  1392. utils.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
  1393. checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw)
  1394. if err != nil {
  1395. return "", err
  1396. }
  1397. imgData.Checksum = checksum
  1398. imgData.ChecksumPayload = checksumPayload
  1399. // Send the checksum
  1400. if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
  1401. return "", err
  1402. }
  1403. out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil))
  1404. return imgData.Checksum, nil
  1405. }
  1406. // FIXME: Allow to interrupt current push when new push of same image is done.
  1407. func (srv *Server) ImagePush(job *engine.Job) engine.Status {
  1408. if n := len(job.Args); n != 1 {
  1409. return job.Errorf("Usage: %s IMAGE", job.Name)
  1410. }
  1411. var (
  1412. localName = job.Args[0]
  1413. sf = utils.NewStreamFormatter(job.GetenvBool("json"))
  1414. authConfig = &registry.AuthConfig{}
  1415. metaHeaders map[string][]string
  1416. )
  1417. tag := job.Getenv("tag")
  1418. job.GetenvJson("authConfig", authConfig)
  1419. job.GetenvJson("metaHeaders", metaHeaders)
  1420. if _, err := srv.poolAdd("push", localName); err != nil {
  1421. return job.Error(err)
  1422. }
  1423. defer srv.poolRemove("push", localName)
  1424. // Resolve the Repository name from fqn to endpoint + name
  1425. hostname, remoteName, err := registry.ResolveRepositoryName(localName)
  1426. if err != nil {
  1427. return job.Error(err)
  1428. }
  1429. endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname)
  1430. if err != nil {
  1431. return job.Error(err)
  1432. }
  1433. img, err := srv.daemon.Graph().Get(localName)
  1434. r, err2 := registry.NewRegistry(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint)
  1435. if err2 != nil {
  1436. return job.Error(err2)
  1437. }
  1438. if err != nil {
  1439. reposLen := 1
  1440. if tag == "" {
  1441. reposLen = len(srv.daemon.Repositories().Repositories[localName])
  1442. }
  1443. job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
  1444. // If it fails, try to get the repository
  1445. if localRepo, exists := srv.daemon.Repositories().Repositories[localName]; exists {
  1446. if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil {
  1447. return job.Error(err)
  1448. }
  1449. return engine.StatusOK
  1450. }
  1451. return job.Error(err)
  1452. }
  1453. var token []string
  1454. job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName))
  1455. if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil {
  1456. return job.Error(err)
  1457. }
  1458. return engine.StatusOK
  1459. }
  1460. func (srv *Server) ImageImport(job *engine.Job) engine.Status {
  1461. if n := len(job.Args); n != 2 && n != 3 {
  1462. return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name)
  1463. }
  1464. var (
  1465. src = job.Args[0]
  1466. repo = job.Args[1]
  1467. tag string
  1468. sf = utils.NewStreamFormatter(job.GetenvBool("json"))
  1469. archive archive.ArchiveReader
  1470. resp *http.Response
  1471. )
  1472. if len(job.Args) > 2 {
  1473. tag = job.Args[2]
  1474. }
  1475. if src == "-" {
  1476. archive = job.Stdin
  1477. } else {
  1478. u, err := url.Parse(src)
  1479. if err != nil {
  1480. return job.Error(err)
  1481. }
  1482. if u.Scheme == "" {
  1483. u.Scheme = "http"
  1484. u.Host = src
  1485. u.Path = ""
  1486. }
  1487. job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u))
  1488. // Download with curl (pretty progress bar)
  1489. // If curl is not available, fallback to http.Get()
  1490. resp, err = utils.Download(u.String())
  1491. if err != nil {
  1492. return job.Error(err)
  1493. }
  1494. progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing")
  1495. defer progressReader.Close()
  1496. archive = progressReader
  1497. }
  1498. img, err := srv.daemon.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil)
  1499. if err != nil {
  1500. return job.Error(err)
  1501. }
  1502. // Optionally register the image at REPO/TAG
  1503. if repo != "" {
  1504. if err := srv.daemon.Repositories().Set(repo, tag, img.ID, true); err != nil {
  1505. return job.Error(err)
  1506. }
  1507. }
  1508. job.Stdout.Write(sf.FormatStatus("", img.ID))
  1509. return engine.StatusOK
  1510. }
  1511. func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
  1512. var name string
  1513. if len(job.Args) == 1 {
  1514. name = job.Args[0]
  1515. } else if len(job.Args) > 1 {
  1516. return job.Errorf("Usage: %s", job.Name)
  1517. }
  1518. config := runconfig.ContainerConfigFromJob(job)
  1519. if config.Memory != 0 && config.Memory < 524288 {
  1520. return job.Errorf("Minimum memory limit allowed is 512k")
  1521. }
  1522. if config.Memory > 0 && !srv.daemon.SystemConfig().MemoryLimit {
  1523. job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
  1524. config.Memory = 0
  1525. }
  1526. if config.Memory > 0 && !srv.daemon.SystemConfig().SwapLimit {
  1527. job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
  1528. config.MemorySwap = -1
  1529. }
  1530. container, buildWarnings, err := srv.daemon.Create(config, name)
  1531. if err != nil {
  1532. if srv.daemon.Graph().IsNotExist(err) {
  1533. _, tag := utils.ParseRepositoryTag(config.Image)
  1534. if tag == "" {
  1535. tag = graph.DEFAULTTAG
  1536. }
  1537. return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
  1538. }
  1539. return job.Error(err)
  1540. }
  1541. if !container.Config.NetworkDisabled && srv.daemon.SystemConfig().IPv4ForwardingDisabled {
  1542. job.Errorf("IPv4 forwarding is disabled.\n")
  1543. }
  1544. srv.LogEvent("create", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  1545. // FIXME: this is necessary because daemon.Create might return a nil container
  1546. // with a non-nil error. This should not happen! Once it's fixed we
  1547. // can remove this workaround.
  1548. if container != nil {
  1549. job.Printf("%s\n", container.ID)
  1550. }
  1551. for _, warning := range buildWarnings {
  1552. job.Errorf("%s\n", warning)
  1553. }
  1554. return engine.StatusOK
  1555. }
  1556. func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
  1557. if len(job.Args) != 1 {
  1558. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  1559. }
  1560. var (
  1561. name = job.Args[0]
  1562. t = 10
  1563. )
  1564. if job.EnvExists("t") {
  1565. t = job.GetenvInt("t")
  1566. }
  1567. if container := srv.daemon.Get(name); container != nil {
  1568. if err := container.Restart(int(t)); err != nil {
  1569. return job.Errorf("Cannot restart container %s: %s\n", name, err)
  1570. }
  1571. srv.LogEvent("restart", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  1572. } else {
  1573. return job.Errorf("No such container: %s\n", name)
  1574. }
  1575. return engine.StatusOK
  1576. }
  1577. func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
  1578. if len(job.Args) != 1 {
  1579. return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
  1580. }
  1581. name := job.Args[0]
  1582. removeVolume := job.GetenvBool("removeVolume")
  1583. removeLink := job.GetenvBool("removeLink")
  1584. forceRemove := job.GetenvBool("forceRemove")
  1585. container := srv.daemon.Get(name)
  1586. if removeLink {
  1587. if container == nil {
  1588. return job.Errorf("No such link: %s", name)
  1589. }
  1590. name, err := daemon.GetFullContainerName(name)
  1591. if err != nil {
  1592. job.Error(err)
  1593. }
  1594. parent, n := path.Split(name)
  1595. if parent == "/" {
  1596. return job.Errorf("Conflict, cannot remove the default name of the container")
  1597. }
  1598. pe := srv.daemon.ContainerGraph().Get(parent)
  1599. if pe == nil {
  1600. return job.Errorf("Cannot get parent %s for name %s", parent, name)
  1601. }
  1602. parentContainer := srv.daemon.Get(pe.ID())
  1603. if parentContainer != nil {
  1604. parentContainer.DisableLink(n)
  1605. }
  1606. if err := srv.daemon.ContainerGraph().Delete(name); err != nil {
  1607. return job.Error(err)
  1608. }
  1609. return engine.StatusOK
  1610. }
  1611. if container != nil {
  1612. if container.State.IsRunning() {
  1613. if forceRemove {
  1614. if err := container.Stop(5); err != nil {
  1615. return job.Errorf("Could not stop running container, cannot remove - %v", err)
  1616. }
  1617. } else {
  1618. return job.Errorf("Impossible to remove a running container, please stop it first or use -f")
  1619. }
  1620. }
  1621. if err := srv.daemon.Destroy(container); err != nil {
  1622. return job.Errorf("Cannot destroy container %s: %s", name, err)
  1623. }
  1624. srv.LogEvent("destroy", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  1625. if removeVolume {
  1626. var (
  1627. volumes = make(map[string]struct{})
  1628. binds = make(map[string]struct{})
  1629. usedVolumes = make(map[string]*daemon.Container)
  1630. )
  1631. // the volume id is always the base of the path
  1632. getVolumeId := func(p string) string {
  1633. return filepath.Base(strings.TrimSuffix(p, "/layer"))
  1634. }
  1635. // populate bind map so that they can be skipped and not removed
  1636. for _, bind := range container.HostConfig().Binds {
  1637. source := strings.Split(bind, ":")[0]
  1638. // TODO: refactor all volume stuff, all of it
  1639. // it is very important that we eval the link or comparing the keys to container.Volumes will not work
  1640. //
  1641. // eval symlink can fail, ref #5244 if we receive an is not exist error we can ignore it
  1642. p, err := filepath.EvalSymlinks(source)
  1643. if err != nil && !os.IsNotExist(err) {
  1644. return job.Error(err)
  1645. }
  1646. if p != "" {
  1647. source = p
  1648. }
  1649. binds[source] = struct{}{}
  1650. }
  1651. // Store all the deleted containers volumes
  1652. for _, volumeId := range container.Volumes {
  1653. // Skip the volumes mounted from external
  1654. // bind mounts here will will be evaluated for a symlink
  1655. if _, exists := binds[volumeId]; exists {
  1656. continue
  1657. }
  1658. volumeId = getVolumeId(volumeId)
  1659. volumes[volumeId] = struct{}{}
  1660. }
  1661. // Retrieve all volumes from all remaining containers
  1662. for _, container := range srv.daemon.List() {
  1663. for _, containerVolumeId := range container.Volumes {
  1664. containerVolumeId = getVolumeId(containerVolumeId)
  1665. usedVolumes[containerVolumeId] = container
  1666. }
  1667. }
  1668. for volumeId := range volumes {
  1669. // If the requested volu
  1670. if c, exists := usedVolumes[volumeId]; exists {
  1671. log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
  1672. continue
  1673. }
  1674. if err := srv.daemon.Volumes().Delete(volumeId); err != nil {
  1675. return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
  1676. }
  1677. }
  1678. }
  1679. } else {
  1680. return job.Errorf("No such container: %s", name)
  1681. }
  1682. return engine.StatusOK
  1683. }
  1684. func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, noprune bool) error {
  1685. var (
  1686. repoName, tag string
  1687. tags = []string{}
  1688. tagDeleted bool
  1689. )
  1690. repoName, tag = utils.ParseRepositoryTag(name)
  1691. if tag == "" {
  1692. tag = graph.DEFAULTTAG
  1693. }
  1694. img, err := srv.daemon.Repositories().LookupImage(name)
  1695. if err != nil {
  1696. if r, _ := srv.daemon.Repositories().Get(repoName); r != nil {
  1697. return fmt.Errorf("No such image: %s:%s", repoName, tag)
  1698. }
  1699. return fmt.Errorf("No such image: %s", name)
  1700. }
  1701. if strings.Contains(img.ID, name) {
  1702. repoName = ""
  1703. tag = ""
  1704. }
  1705. byParents, err := srv.daemon.Graph().ByParent()
  1706. if err != nil {
  1707. return err
  1708. }
  1709. //If delete by id, see if the id belong only to one repository
  1710. if repoName == "" {
  1711. for _, repoAndTag := range srv.daemon.Repositories().ByID()[img.ID] {
  1712. parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
  1713. if repoName == "" || repoName == parsedRepo {
  1714. repoName = parsedRepo
  1715. if parsedTag != "" {
  1716. tags = append(tags, parsedTag)
  1717. }
  1718. } else if repoName != parsedRepo && !force {
  1719. // the id belongs to multiple repos, like base:latest and user:test,
  1720. // in that case return conflict
  1721. return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name)
  1722. }
  1723. }
  1724. } else {
  1725. tags = append(tags, tag)
  1726. }
  1727. if !first && len(tags) > 0 {
  1728. return nil
  1729. }
  1730. //Untag the current image
  1731. for _, tag := range tags {
  1732. tagDeleted, err = srv.daemon.Repositories().Delete(repoName, tag)
  1733. if err != nil {
  1734. return err
  1735. }
  1736. if tagDeleted {
  1737. out := &engine.Env{}
  1738. out.Set("Untagged", repoName+":"+tag)
  1739. imgs.Add(out)
  1740. srv.LogEvent("untag", img.ID, "")
  1741. }
  1742. }
  1743. tags = srv.daemon.Repositories().ByID()[img.ID]
  1744. if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
  1745. if len(byParents[img.ID]) == 0 {
  1746. if err := srv.canDeleteImage(img.ID, force, tagDeleted); err != nil {
  1747. return err
  1748. }
  1749. if err := srv.daemon.Repositories().DeleteAll(img.ID); err != nil {
  1750. return err
  1751. }
  1752. if err := srv.daemon.Graph().Delete(img.ID); err != nil {
  1753. return err
  1754. }
  1755. out := &engine.Env{}
  1756. out.Set("Deleted", img.ID)
  1757. imgs.Add(out)
  1758. srv.LogEvent("delete", img.ID, "")
  1759. if img.Parent != "" && !noprune {
  1760. err := srv.DeleteImage(img.Parent, imgs, false, force, noprune)
  1761. if first {
  1762. return err
  1763. }
  1764. }
  1765. }
  1766. }
  1767. return nil
  1768. }
  1769. func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
  1770. if n := len(job.Args); n != 1 {
  1771. return job.Errorf("Usage: %s IMAGE", job.Name)
  1772. }
  1773. imgs := engine.NewTable("", 0)
  1774. if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil {
  1775. return job.Error(err)
  1776. }
  1777. if len(imgs.Data) == 0 {
  1778. return job.Errorf("Conflict, %s wasn't deleted", job.Args[0])
  1779. }
  1780. if _, err := imgs.WriteListTo(job.Stdout); err != nil {
  1781. return job.Error(err)
  1782. }
  1783. return engine.StatusOK
  1784. }
  1785. func (srv *Server) canDeleteImage(imgID string, force, untagged bool) error {
  1786. var message string
  1787. if untagged {
  1788. message = " (docker untagged the image)"
  1789. }
  1790. for _, container := range srv.daemon.List() {
  1791. parent, err := srv.daemon.Repositories().LookupImage(container.Image)
  1792. if err != nil {
  1793. return err
  1794. }
  1795. if err := parent.WalkHistory(func(p *image.Image) error {
  1796. if imgID == p.ID {
  1797. if container.State.IsRunning() {
  1798. if force {
  1799. return fmt.Errorf("Conflict, cannot force delete %s because the running container %s is using it%s, stop it and retry", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
  1800. }
  1801. return fmt.Errorf("Conflict, cannot delete %s because the running container %s is using it%s, stop it and use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
  1802. } else if !force {
  1803. return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it%s, use -f to force", utils.TruncateID(imgID), utils.TruncateID(container.ID), message)
  1804. }
  1805. }
  1806. return nil
  1807. }); err != nil {
  1808. return err
  1809. }
  1810. }
  1811. return nil
  1812. }
  1813. func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
  1814. // Retrieve all images
  1815. images, err := srv.daemon.Graph().Map()
  1816. if err != nil {
  1817. return nil, err
  1818. }
  1819. // Store the tree in a map of map (map[parentId][childId])
  1820. imageMap := make(map[string]map[string]struct{})
  1821. for _, img := range images {
  1822. if _, exists := imageMap[img.Parent]; !exists {
  1823. imageMap[img.Parent] = make(map[string]struct{})
  1824. }
  1825. imageMap[img.Parent][img.ID] = struct{}{}
  1826. }
  1827. // Loop on the children of the given image and check the config
  1828. var match *image.Image
  1829. for elem := range imageMap[imgID] {
  1830. img, err := srv.daemon.Graph().Get(elem)
  1831. if err != nil {
  1832. return nil, err
  1833. }
  1834. if runconfig.Compare(&img.ContainerConfig, config) {
  1835. if match == nil || match.Created.Before(img.Created) {
  1836. match = img
  1837. }
  1838. }
  1839. }
  1840. return match, nil
  1841. }
  1842. func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
  1843. if len(job.Args) < 1 {
  1844. return job.Errorf("Usage: %s container_id", job.Name)
  1845. }
  1846. var (
  1847. name = job.Args[0]
  1848. daemon = srv.daemon
  1849. container = daemon.Get(name)
  1850. )
  1851. if container == nil {
  1852. return job.Errorf("No such container: %s", name)
  1853. }
  1854. // If no environment was set, then no hostconfig was passed.
  1855. if len(job.Environ()) > 0 {
  1856. hostConfig := runconfig.ContainerHostConfigFromJob(job)
  1857. // Validate the HostConfig binds. Make sure that:
  1858. // 1) the source of a bind mount isn't /
  1859. // The bind mount "/:/foo" isn't allowed.
  1860. // 2) Check that the source exists
  1861. // The source to be bind mounted must exist.
  1862. for _, bind := range hostConfig.Binds {
  1863. splitBind := strings.Split(bind, ":")
  1864. source := splitBind[0]
  1865. // refuse to bind mount "/" to the container
  1866. if source == "/" {
  1867. return job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind)
  1868. }
  1869. // ensure the source exists on the host
  1870. _, err := os.Stat(source)
  1871. if err != nil && os.IsNotExist(err) {
  1872. err = os.MkdirAll(source, 0755)
  1873. if err != nil {
  1874. return job.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error())
  1875. }
  1876. }
  1877. }
  1878. // Register any links from the host config before starting the container
  1879. if err := srv.daemon.RegisterLinks(container, hostConfig); err != nil {
  1880. return job.Error(err)
  1881. }
  1882. container.SetHostConfig(hostConfig)
  1883. container.ToDisk()
  1884. }
  1885. if err := container.Start(); err != nil {
  1886. return job.Errorf("Cannot start container %s: %s", name, err)
  1887. }
  1888. srv.LogEvent("start", container.ID, daemon.Repositories().ImageName(container.Image))
  1889. return engine.StatusOK
  1890. }
  1891. func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
  1892. if len(job.Args) != 1 {
  1893. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  1894. }
  1895. var (
  1896. name = job.Args[0]
  1897. t = 10
  1898. )
  1899. if job.EnvExists("t") {
  1900. t = job.GetenvInt("t")
  1901. }
  1902. if container := srv.daemon.Get(name); container != nil {
  1903. if err := container.Stop(int(t)); err != nil {
  1904. return job.Errorf("Cannot stop container %s: %s\n", name, err)
  1905. }
  1906. srv.LogEvent("stop", container.ID, srv.daemon.Repositories().ImageName(container.Image))
  1907. } else {
  1908. return job.Errorf("No such container: %s\n", name)
  1909. }
  1910. return engine.StatusOK
  1911. }
  1912. func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
  1913. if len(job.Args) != 1 {
  1914. return job.Errorf("Usage: %s", job.Name)
  1915. }
  1916. name := job.Args[0]
  1917. if container := srv.daemon.Get(name); container != nil {
  1918. status := container.Wait()
  1919. job.Printf("%d\n", status)
  1920. return engine.StatusOK
  1921. }
  1922. return job.Errorf("%s: no such container: %s", job.Name, name)
  1923. }
  1924. func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
  1925. if len(job.Args) != 3 {
  1926. return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
  1927. }
  1928. name := job.Args[0]
  1929. height, err := strconv.Atoi(job.Args[1])
  1930. if err != nil {
  1931. return job.Error(err)
  1932. }
  1933. width, err := strconv.Atoi(job.Args[2])
  1934. if err != nil {
  1935. return job.Error(err)
  1936. }
  1937. if container := srv.daemon.Get(name); container != nil {
  1938. if err := container.Resize(height, width); err != nil {
  1939. return job.Error(err)
  1940. }
  1941. return engine.StatusOK
  1942. }
  1943. return job.Errorf("No such container: %s", name)
  1944. }
  1945. func (srv *Server) ContainerLogs(job *engine.Job) engine.Status {
  1946. if len(job.Args) != 1 {
  1947. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  1948. }
  1949. var (
  1950. name = job.Args[0]
  1951. stdout = job.GetenvBool("stdout")
  1952. stderr = job.GetenvBool("stderr")
  1953. follow = job.GetenvBool("follow")
  1954. times = job.GetenvBool("timestamps")
  1955. format string
  1956. )
  1957. if !(stdout || stderr) {
  1958. return job.Errorf("You must choose at least one stream")
  1959. }
  1960. if times {
  1961. format = time.StampMilli
  1962. }
  1963. container := srv.daemon.Get(name)
  1964. if container == nil {
  1965. return job.Errorf("No such container: %s", name)
  1966. }
  1967. cLog, err := container.ReadLog("json")
  1968. if err != nil && os.IsNotExist(err) {
  1969. // Legacy logs
  1970. utils.Debugf("Old logs format")
  1971. if stdout {
  1972. cLog, err := container.ReadLog("stdout")
  1973. if err != nil {
  1974. utils.Errorf("Error reading logs (stdout): %s", err)
  1975. } else if _, err := io.Copy(job.Stdout, cLog); err != nil {
  1976. utils.Errorf("Error streaming logs (stdout): %s", err)
  1977. }
  1978. }
  1979. if stderr {
  1980. cLog, err := container.ReadLog("stderr")
  1981. if err != nil {
  1982. utils.Errorf("Error reading logs (stderr): %s", err)
  1983. } else if _, err := io.Copy(job.Stderr, cLog); err != nil {
  1984. utils.Errorf("Error streaming logs (stderr): %s", err)
  1985. }
  1986. }
  1987. } else if err != nil {
  1988. utils.Errorf("Error reading logs (json): %s", err)
  1989. } else {
  1990. dec := json.NewDecoder(cLog)
  1991. for {
  1992. l := &utils.JSONLog{}
  1993. if err := dec.Decode(l); err == io.EOF {
  1994. break
  1995. } else if err != nil {
  1996. utils.Errorf("Error streaming logs: %s", err)
  1997. break
  1998. }
  1999. logLine := l.Log
  2000. if times {
  2001. logLine = fmt.Sprintf("[%s] %s", l.Created.Format(format), logLine)
  2002. }
  2003. if l.Stream == "stdout" && stdout {
  2004. fmt.Fprintf(job.Stdout, "%s", logLine)
  2005. }
  2006. if l.Stream == "stderr" && stderr {
  2007. fmt.Fprintf(job.Stderr, "%s", logLine)
  2008. }
  2009. }
  2010. }
  2011. if follow {
  2012. errors := make(chan error, 2)
  2013. if stdout {
  2014. stdoutPipe := container.StdoutLogPipe()
  2015. go func() {
  2016. errors <- utils.WriteLog(stdoutPipe, job.Stdout, format)
  2017. }()
  2018. }
  2019. if stderr {
  2020. stderrPipe := container.StderrLogPipe()
  2021. go func() {
  2022. errors <- utils.WriteLog(stderrPipe, job.Stderr, format)
  2023. }()
  2024. }
  2025. err := <-errors
  2026. if err != nil {
  2027. utils.Errorf("%s", err)
  2028. }
  2029. }
  2030. return engine.StatusOK
  2031. }
  2032. func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
  2033. if len(job.Args) != 1 {
  2034. return job.Errorf("Usage: %s CONTAINER\n", job.Name)
  2035. }
  2036. var (
  2037. name = job.Args[0]
  2038. logs = job.GetenvBool("logs")
  2039. stream = job.GetenvBool("stream")
  2040. stdin = job.GetenvBool("stdin")
  2041. stdout = job.GetenvBool("stdout")
  2042. stderr = job.GetenvBool("stderr")
  2043. )
  2044. container := srv.daemon.Get(name)
  2045. if container == nil {
  2046. return job.Errorf("No such container: %s", name)
  2047. }
  2048. //logs
  2049. if logs {
  2050. cLog, err := container.ReadLog("json")
  2051. if err != nil && os.IsNotExist(err) {
  2052. // Legacy logs
  2053. utils.Debugf("Old logs format")
  2054. if stdout {
  2055. cLog, err := container.ReadLog("stdout")
  2056. if err != nil {
  2057. utils.Errorf("Error reading logs (stdout): %s", err)
  2058. } else if _, err := io.Copy(job.Stdout, cLog); err != nil {
  2059. utils.Errorf("Error streaming logs (stdout): %s", err)
  2060. }
  2061. }
  2062. if stderr {
  2063. cLog, err := container.ReadLog("stderr")
  2064. if err != nil {
  2065. utils.Errorf("Error reading logs (stderr): %s", err)
  2066. } else if _, err := io.Copy(job.Stderr, cLog); err != nil {
  2067. utils.Errorf("Error streaming logs (stderr): %s", err)
  2068. }
  2069. }
  2070. } else if err != nil {
  2071. utils.Errorf("Error reading logs (json): %s", err)
  2072. } else {
  2073. dec := json.NewDecoder(cLog)
  2074. for {
  2075. l := &utils.JSONLog{}
  2076. if err := dec.Decode(l); err == io.EOF {
  2077. break
  2078. } else if err != nil {
  2079. utils.Errorf("Error streaming logs: %s", err)
  2080. break
  2081. }
  2082. if l.Stream == "stdout" && stdout {
  2083. fmt.Fprintf(job.Stdout, "%s", l.Log)
  2084. }
  2085. if l.Stream == "stderr" && stderr {
  2086. fmt.Fprintf(job.Stderr, "%s", l.Log)
  2087. }
  2088. }
  2089. }
  2090. }
  2091. //stream
  2092. if stream {
  2093. var (
  2094. cStdin io.ReadCloser
  2095. cStdout, cStderr io.Writer
  2096. cStdinCloser io.Closer
  2097. )
  2098. if stdin {
  2099. r, w := io.Pipe()
  2100. go func() {
  2101. defer w.Close()
  2102. defer utils.Debugf("Closing buffered stdin pipe")
  2103. io.Copy(w, job.Stdin)
  2104. }()
  2105. cStdin = r
  2106. cStdinCloser = job.Stdin
  2107. }
  2108. if stdout {
  2109. cStdout = job.Stdout
  2110. }
  2111. if stderr {
  2112. cStderr = job.Stderr
  2113. }
  2114. <-srv.daemon.Attach(container, cStdin, cStdinCloser, cStdout, cStderr)
  2115. // If we are in stdinonce mode, wait for the process to end
  2116. // otherwise, simply return
  2117. if container.Config.StdinOnce && !container.Config.Tty {
  2118. container.Wait()
  2119. }
  2120. }
  2121. return engine.StatusOK
  2122. }
  2123. func (srv *Server) ContainerInspect(name string) (*daemon.Container, error) {
  2124. if container := srv.daemon.Get(name); container != nil {
  2125. return container, nil
  2126. }
  2127. return nil, fmt.Errorf("No such container: %s", name)
  2128. }
  2129. func (srv *Server) ImageInspect(name string) (*image.Image, error) {
  2130. if image, err := srv.daemon.Repositories().LookupImage(name); err == nil && image != nil {
  2131. return image, nil
  2132. }
  2133. return nil, fmt.Errorf("No such image: %s", name)
  2134. }
  2135. func (srv *Server) JobInspect(job *engine.Job) engine.Status {
  2136. // TODO: deprecate KIND/conflict
  2137. if n := len(job.Args); n != 2 {
  2138. return job.Errorf("Usage: %s CONTAINER|IMAGE KIND", job.Name)
  2139. }
  2140. var (
  2141. name = job.Args[0]
  2142. kind = job.Args[1]
  2143. object interface{}
  2144. conflict = job.GetenvBool("conflict") //should the job detect conflict between containers and images
  2145. image, errImage = srv.ImageInspect(name)
  2146. container, errContainer = srv.ContainerInspect(name)
  2147. )
  2148. if conflict && image != nil && container != nil {
  2149. return job.Errorf("Conflict between containers and images")
  2150. }
  2151. switch kind {
  2152. case "image":
  2153. if errImage != nil {
  2154. return job.Error(errImage)
  2155. }
  2156. object = image
  2157. case "container":
  2158. if errContainer != nil {
  2159. return job.Error(errContainer)
  2160. }
  2161. object = &struct {
  2162. *daemon.Container
  2163. HostConfig *runconfig.HostConfig
  2164. }{container, container.HostConfig()}
  2165. default:
  2166. return job.Errorf("Unknown kind: %s", kind)
  2167. }
  2168. b, err := json.Marshal(object)
  2169. if err != nil {
  2170. return job.Error(err)
  2171. }
  2172. job.Stdout.Write(b)
  2173. return engine.StatusOK
  2174. }
  2175. func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
  2176. if len(job.Args) != 2 {
  2177. return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
  2178. }
  2179. var (
  2180. name = job.Args[0]
  2181. resource = job.Args[1]
  2182. )
  2183. if container := srv.daemon.Get(name); container != nil {
  2184. data, err := container.Copy(resource)
  2185. if err != nil {
  2186. return job.Error(err)
  2187. }
  2188. defer data.Close()
  2189. if _, err := io.Copy(job.Stdout, data); err != nil {
  2190. return job.Error(err)
  2191. }
  2192. return engine.StatusOK
  2193. }
  2194. return job.Errorf("No such container: %s", name)
  2195. }
  2196. func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
  2197. daemon, err := daemon.NewDaemon(config, eng)
  2198. if err != nil {
  2199. return nil, err
  2200. }
  2201. srv := &Server{
  2202. Eng: eng,
  2203. daemon: daemon,
  2204. pullingPool: make(map[string]chan struct{}),
  2205. pushingPool: make(map[string]chan struct{}),
  2206. events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events
  2207. listeners: make(map[int64]chan utils.JSONMessage),
  2208. running: true,
  2209. }
  2210. daemon.SetServer(srv)
  2211. return srv, nil
  2212. }
  2213. func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
  2214. now := time.Now().UTC().Unix()
  2215. jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
  2216. srv.AddEvent(jm)
  2217. for _, c := range srv.listeners {
  2218. select { // non blocking channel
  2219. case c <- jm:
  2220. default:
  2221. }
  2222. }
  2223. return &jm
  2224. }
  2225. func (srv *Server) AddEvent(jm utils.JSONMessage) {
  2226. srv.Lock()
  2227. defer srv.Unlock()
  2228. srv.events = append(srv.events, jm)
  2229. }
  2230. func (srv *Server) GetEvents() []utils.JSONMessage {
  2231. srv.RLock()
  2232. defer srv.RUnlock()
  2233. return srv.events
  2234. }
  2235. func (srv *Server) SetRunning(status bool) {
  2236. srv.Lock()
  2237. defer srv.Unlock()
  2238. srv.running = status
  2239. }
  2240. func (srv *Server) IsRunning() bool {
  2241. srv.RLock()
  2242. defer srv.RUnlock()
  2243. return srv.running
  2244. }
  2245. func (srv *Server) Close() error {
  2246. if srv == nil {
  2247. return nil
  2248. }
  2249. srv.SetRunning(false)
  2250. if srv.daemon == nil {
  2251. return nil
  2252. }
  2253. return srv.daemon.Close()
  2254. }
  2255. type Server struct {
  2256. sync.RWMutex
  2257. daemon *daemon.Daemon
  2258. pullingPool map[string]chan struct{}
  2259. pushingPool map[string]chan struct{}
  2260. events []utils.JSONMessage
  2261. listeners map[int64]chan utils.JSONMessage
  2262. Eng *engine.Engine
  2263. running bool
  2264. }