docker_api_swarm_test.go 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927
  1. // +build !windows
  2. package main
  3. import (
  4. "encoding/json"
  5. "fmt"
  6. "io/ioutil"
  7. "net"
  8. "net/http"
  9. "os"
  10. "path/filepath"
  11. "strings"
  12. "sync"
  13. "time"
  14. "github.com/cloudflare/cfssl/helpers"
  15. "github.com/docker/docker/api/types"
  16. "github.com/docker/docker/api/types/container"
  17. "github.com/docker/docker/api/types/swarm"
  18. "github.com/docker/docker/integration-cli/checker"
  19. "github.com/docker/docker/integration-cli/daemon"
  20. "github.com/go-check/check"
  21. )
  22. var defaultReconciliationTimeout = 30 * time.Second
  23. func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
  24. // todo: should find a better way to verify that components are running than /info
  25. d1 := s.AddDaemon(c, true, true)
  26. info, err := d1.SwarmInfo()
  27. c.Assert(err, checker.IsNil)
  28. c.Assert(info.ControlAvailable, checker.True)
  29. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  30. d2 := s.AddDaemon(c, true, false)
  31. info, err = d2.SwarmInfo()
  32. c.Assert(err, checker.IsNil)
  33. c.Assert(info.ControlAvailable, checker.False)
  34. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  35. // Leaving cluster
  36. c.Assert(d2.Leave(false), checker.IsNil)
  37. info, err = d2.SwarmInfo()
  38. c.Assert(err, checker.IsNil)
  39. c.Assert(info.ControlAvailable, checker.False)
  40. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  41. c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.JoinTokens(c).Worker, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
  42. info, err = d2.SwarmInfo()
  43. c.Assert(err, checker.IsNil)
  44. c.Assert(info.ControlAvailable, checker.False)
  45. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  46. // Current state restoring after restarts
  47. d1.Stop(c)
  48. d2.Stop(c)
  49. d1.Start(c)
  50. d2.Start(c)
  51. info, err = d1.SwarmInfo()
  52. c.Assert(err, checker.IsNil)
  53. c.Assert(info.ControlAvailable, checker.True)
  54. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  55. info, err = d2.SwarmInfo()
  56. c.Assert(err, checker.IsNil)
  57. c.Assert(info.ControlAvailable, checker.False)
  58. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  59. }
  60. func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
  61. d1 := s.AddDaemon(c, false, false)
  62. c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
  63. // todo: error message differs depending if some components of token are valid
  64. d2 := s.AddDaemon(c, false, false)
  65. err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}})
  66. c.Assert(err, checker.NotNil)
  67. c.Assert(err.Error(), checker.Contains, "join token is necessary")
  68. info, err := d2.SwarmInfo()
  69. c.Assert(err, checker.IsNil)
  70. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  71. err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.ListenAddr}})
  72. c.Assert(err, checker.NotNil)
  73. c.Assert(err.Error(), checker.Contains, "invalid join token")
  74. info, err = d2.SwarmInfo()
  75. c.Assert(err, checker.IsNil)
  76. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  77. workerToken := d1.JoinTokens(c).Worker
  78. c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
  79. info, err = d2.SwarmInfo()
  80. c.Assert(err, checker.IsNil)
  81. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  82. c.Assert(d2.Leave(false), checker.IsNil)
  83. info, err = d2.SwarmInfo()
  84. c.Assert(err, checker.IsNil)
  85. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  86. // change tokens
  87. d1.RotateTokens(c)
  88. err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}})
  89. c.Assert(err, checker.NotNil)
  90. c.Assert(err.Error(), checker.Contains, "join token is necessary")
  91. info, err = d2.SwarmInfo()
  92. c.Assert(err, checker.IsNil)
  93. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  94. workerToken = d1.JoinTokens(c).Worker
  95. c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
  96. info, err = d2.SwarmInfo()
  97. c.Assert(err, checker.IsNil)
  98. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  99. c.Assert(d2.Leave(false), checker.IsNil)
  100. info, err = d2.SwarmInfo()
  101. c.Assert(err, checker.IsNil)
  102. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  103. // change spec, don't change tokens
  104. d1.UpdateSwarm(c, func(s *swarm.Spec) {})
  105. err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}})
  106. c.Assert(err, checker.NotNil)
  107. c.Assert(err.Error(), checker.Contains, "join token is necessary")
  108. info, err = d2.SwarmInfo()
  109. c.Assert(err, checker.IsNil)
  110. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  111. c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
  112. info, err = d2.SwarmInfo()
  113. c.Assert(err, checker.IsNil)
  114. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  115. c.Assert(d2.Leave(false), checker.IsNil)
  116. info, err = d2.SwarmInfo()
  117. c.Assert(err, checker.IsNil)
  118. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  119. }
  120. func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) {
  121. // TODO: when root rotation is in, convert to a series of root rotation tests instead.
  122. // currently just makes sure that we don't have to provide a CA certificate when
  123. // providing an external CA
  124. d1 := s.AddDaemon(c, false, false)
  125. c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
  126. d1.UpdateSwarm(c, func(s *swarm.Spec) {
  127. s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
  128. {
  129. Protocol: swarm.ExternalCAProtocolCFSSL,
  130. URL: "https://thishasnoca.org",
  131. },
  132. }
  133. })
  134. info, err := d1.SwarmInfo()
  135. c.Assert(err, checker.IsNil)
  136. c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 1)
  137. }
  138. func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
  139. d1 := s.AddDaemon(c, true, true)
  140. d2 := s.AddDaemon(c, false, false)
  141. splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
  142. splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
  143. replacementToken := strings.Join(splitToken, "-")
  144. err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.ListenAddr}})
  145. c.Assert(err, checker.NotNil)
  146. c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint")
  147. }
  148. func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
  149. d1 := s.AddDaemon(c, false, false)
  150. c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
  151. d2 := s.AddDaemon(c, true, false)
  152. info, err := d2.SwarmInfo()
  153. c.Assert(err, checker.IsNil)
  154. c.Assert(info.ControlAvailable, checker.False)
  155. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  156. d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
  157. n.Spec.Role = swarm.NodeRoleManager
  158. })
  159. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
  160. d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
  161. n.Spec.Role = swarm.NodeRoleWorker
  162. })
  163. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False)
  164. // Wait for the role to change to worker in the cert. This is partially
  165. // done because it's something worth testing in its own right, and
  166. // partially because changing the role from manager to worker and then
  167. // back to manager quickly might cause the node to pause for awhile
  168. // while waiting for the role to change to worker, and the test can
  169. // time out during this interval.
  170. waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
  171. certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
  172. if err != nil {
  173. return "", check.Commentf("error: %v", err)
  174. }
  175. certs, err := helpers.ParseCertificatesPEM(certBytes)
  176. if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
  177. return certs[0].Subject.OrganizationalUnit[0], nil
  178. }
  179. return "", check.Commentf("could not get organizational unit from certificate")
  180. }, checker.Equals, "swarm-worker")
  181. // Demoting last node should fail
  182. node := d1.GetNode(c, d1.NodeID)
  183. node.Spec.Role = swarm.NodeRoleWorker
  184. url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
  185. status, out, err := d1.SockRequest("POST", url, node.Spec)
  186. c.Assert(err, checker.IsNil)
  187. c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("output: %q", string(out)))
  188. // The warning specific to demoting the last manager is best-effort and
  189. // won't appear until the Role field of the demoted manager has been
  190. // updated.
  191. // Yes, I know this looks silly, but checker.Matches is broken, since
  192. // it anchors the regexp contrary to the documentation, and this makes
  193. // it impossible to match something that includes a line break.
  194. if !strings.Contains(string(out), "last manager of the swarm") {
  195. c.Assert(string(out), checker.Contains, "this would result in a loss of quorum")
  196. }
  197. info, err = d1.SwarmInfo()
  198. c.Assert(err, checker.IsNil)
  199. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  200. c.Assert(info.ControlAvailable, checker.True)
  201. // Promote already demoted node
  202. d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
  203. n.Spec.Role = swarm.NodeRoleManager
  204. })
  205. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
  206. }
  207. func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) {
  208. // add three managers, one of these is leader
  209. d1 := s.AddDaemon(c, true, true)
  210. d2 := s.AddDaemon(c, true, true)
  211. d3 := s.AddDaemon(c, true, true)
  212. // start a service by hitting each of the 3 managers
  213. d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
  214. s.Spec.Name = "test1"
  215. })
  216. d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
  217. s.Spec.Name = "test2"
  218. })
  219. d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
  220. s.Spec.Name = "test3"
  221. })
  222. // 3 services should be started now, because the requests were proxied to leader
  223. // query each node and make sure it returns 3 services
  224. for _, d := range []*daemon.Swarm{d1, d2, d3} {
  225. services := d.ListServices(c)
  226. c.Assert(services, checker.HasLen, 3)
  227. }
  228. }
  229. func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
  230. // Create 3 nodes
  231. d1 := s.AddDaemon(c, true, true)
  232. d2 := s.AddDaemon(c, true, true)
  233. d3 := s.AddDaemon(c, true, true)
  234. // assert that the first node we made is the leader, and the other two are followers
  235. c.Assert(d1.GetNode(c, d1.NodeID).ManagerStatus.Leader, checker.True)
  236. c.Assert(d1.GetNode(c, d2.NodeID).ManagerStatus.Leader, checker.False)
  237. c.Assert(d1.GetNode(c, d3.NodeID).ManagerStatus.Leader, checker.False)
  238. d1.Stop(c)
  239. var (
  240. leader *daemon.Swarm // keep track of leader
  241. followers []*daemon.Swarm // keep track of followers
  242. )
  243. checkLeader := func(nodes ...*daemon.Swarm) checkF {
  244. return func(c *check.C) (interface{}, check.CommentInterface) {
  245. // clear these out before each run
  246. leader = nil
  247. followers = nil
  248. for _, d := range nodes {
  249. if d.GetNode(c, d.NodeID).ManagerStatus.Leader {
  250. leader = d
  251. } else {
  252. followers = append(followers, d)
  253. }
  254. }
  255. if leader == nil {
  256. return false, check.Commentf("no leader elected")
  257. }
  258. return true, check.Commentf("elected %v", leader.ID())
  259. }
  260. }
  261. // wait for an election to occur
  262. waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True)
  263. // assert that we have a new leader
  264. c.Assert(leader, checker.NotNil)
  265. // Keep track of the current leader, since we want that to be chosen.
  266. stableleader := leader
  267. // add the d1, the initial leader, back
  268. d1.Start(c)
  269. // TODO(stevvooe): may need to wait for rejoin here
  270. // wait for possible election
  271. waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True)
  272. // pick out the leader and the followers again
  273. // verify that we still only have 1 leader and 2 followers
  274. c.Assert(leader, checker.NotNil)
  275. c.Assert(followers, checker.HasLen, 2)
  276. // and that after we added d1 back, the leader hasn't changed
  277. c.Assert(leader.NodeID, checker.Equals, stableleader.NodeID)
  278. }
  279. func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
  280. d1 := s.AddDaemon(c, true, true)
  281. d2 := s.AddDaemon(c, true, true)
  282. d3 := s.AddDaemon(c, true, true)
  283. d1.CreateService(c, simpleTestService)
  284. d2.Stop(c)
  285. // make sure there is a leader
  286. waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
  287. d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
  288. s.Spec.Name = "top1"
  289. })
  290. d3.Stop(c)
  291. var service swarm.Service
  292. simpleTestService(&service)
  293. service.Spec.Name = "top2"
  294. status, out, err := d1.SockRequest("POST", "/services/create", service.Spec)
  295. c.Assert(err, checker.IsNil)
  296. c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("deadline exceeded", string(out)))
  297. d2.Start(c)
  298. // make sure there is a leader
  299. waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
  300. d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
  301. s.Spec.Name = "top3"
  302. })
  303. }
  304. func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) {
  305. d := s.AddDaemon(c, true, true)
  306. instances := 2
  307. d.CreateService(c, simpleTestService, setInstances(instances))
  308. id, err := d.Cmd("run", "-d", "busybox", "top")
  309. c.Assert(err, checker.IsNil)
  310. id = strings.TrimSpace(id)
  311. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1)
  312. c.Assert(d.Leave(false), checker.NotNil)
  313. c.Assert(d.Leave(true), checker.IsNil)
  314. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
  315. id2, err := d.Cmd("ps", "-q")
  316. c.Assert(err, checker.IsNil)
  317. c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
  318. }
  319. // #23629
  320. func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) {
  321. testRequires(c, Network)
  322. s.AddDaemon(c, true, true)
  323. d2 := s.AddDaemon(c, false, false)
  324. id, err := d2.Cmd("run", "-d", "busybox", "top")
  325. c.Assert(err, checker.IsNil)
  326. id = strings.TrimSpace(id)
  327. err = d2.Join(swarm.JoinRequest{
  328. RemoteAddrs: []string{"123.123.123.123:1234"},
  329. })
  330. c.Assert(err, check.NotNil)
  331. c.Assert(err.Error(), checker.Contains, "Timeout was reached")
  332. info, err := d2.SwarmInfo()
  333. c.Assert(err, checker.IsNil)
  334. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
  335. c.Assert(d2.Leave(true), checker.IsNil)
  336. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1)
  337. id2, err := d2.Cmd("ps", "-q")
  338. c.Assert(err, checker.IsNil)
  339. c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
  340. }
  341. // #23705
  342. func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
  343. testRequires(c, Network)
  344. d := s.AddDaemon(c, false, false)
  345. err := d.Join(swarm.JoinRequest{
  346. RemoteAddrs: []string{"123.123.123.123:1234"},
  347. })
  348. c.Assert(err, check.NotNil)
  349. c.Assert(err.Error(), checker.Contains, "Timeout was reached")
  350. waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
  351. d.Stop(c)
  352. d.Start(c)
  353. info, err := d.SwarmInfo()
  354. c.Assert(err, checker.IsNil)
  355. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  356. }
  357. func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) {
  358. d1 := s.AddDaemon(c, true, true)
  359. instances := 2
  360. id := d1.CreateService(c, simpleTestService, setInstances(instances))
  361. d1.GetService(c, id)
  362. d1.Stop(c)
  363. d1.Start(c)
  364. d1.GetService(c, id)
  365. d2 := s.AddDaemon(c, true, true)
  366. d2.GetService(c, id)
  367. d2.Stop(c)
  368. d2.Start(c)
  369. d2.GetService(c, id)
  370. d3 := s.AddDaemon(c, true, true)
  371. d3.GetService(c, id)
  372. d3.Stop(c)
  373. d3.Start(c)
  374. d3.GetService(c, id)
  375. d3.Kill()
  376. time.Sleep(1 * time.Second) // time to handle signal
  377. d3.Start(c)
  378. d3.GetService(c, id)
  379. }
  380. func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) {
  381. d := s.AddDaemon(c, true, true)
  382. instances := 2
  383. id := d.CreateService(c, simpleTestService, setInstances(instances))
  384. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  385. containers := d.ActiveContainers()
  386. instances = 4
  387. d.UpdateService(c, d.GetService(c, id), setInstances(instances))
  388. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  389. containers2 := d.ActiveContainers()
  390. loop0:
  391. for _, c1 := range containers {
  392. for _, c2 := range containers2 {
  393. if c1 == c2 {
  394. continue loop0
  395. }
  396. }
  397. c.Errorf("container %v not found in new set %#v", c1, containers2)
  398. }
  399. }
  400. func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) {
  401. d := s.AddDaemon(c, false, false)
  402. req := swarm.InitRequest{
  403. ListenAddr: "",
  404. }
  405. status, _, err := d.SockRequest("POST", "/swarm/init", req)
  406. c.Assert(err, checker.IsNil)
  407. c.Assert(status, checker.Equals, http.StatusBadRequest)
  408. req2 := swarm.JoinRequest{
  409. ListenAddr: "0.0.0.0:2377",
  410. RemoteAddrs: []string{""},
  411. }
  412. status, _, err = d.SockRequest("POST", "/swarm/join", req2)
  413. c.Assert(err, checker.IsNil)
  414. c.Assert(status, checker.Equals, http.StatusBadRequest)
  415. }
  416. func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
  417. d1 := s.AddDaemon(c, true, true)
  418. d2 := s.AddDaemon(c, true, true)
  419. instances := 2
  420. id := d1.CreateService(c, simpleTestService, setInstances(instances))
  421. waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances)
  422. // drain d2, all containers should move to d1
  423. d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
  424. n.Spec.Availability = swarm.NodeAvailabilityDrain
  425. })
  426. waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
  427. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0)
  428. d2.Stop(c)
  429. c.Assert(d1.Init(swarm.InitRequest{
  430. ForceNewCluster: true,
  431. Spec: swarm.Spec{},
  432. }), checker.IsNil)
  433. waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
  434. d3 := s.AddDaemon(c, true, true)
  435. info, err := d3.SwarmInfo()
  436. c.Assert(err, checker.IsNil)
  437. c.Assert(info.ControlAvailable, checker.True)
  438. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  439. instances = 4
  440. d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
  441. waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
  442. }
  443. func simpleTestService(s *swarm.Service) {
  444. ureplicas := uint64(1)
  445. restartDelay := time.Duration(100 * time.Millisecond)
  446. s.Spec = swarm.ServiceSpec{
  447. TaskTemplate: swarm.TaskSpec{
  448. ContainerSpec: swarm.ContainerSpec{
  449. Image: "busybox:latest",
  450. Command: []string{"/bin/top"},
  451. },
  452. RestartPolicy: &swarm.RestartPolicy{
  453. Delay: &restartDelay,
  454. },
  455. },
  456. Mode: swarm.ServiceMode{
  457. Replicated: &swarm.ReplicatedService{
  458. Replicas: &ureplicas,
  459. },
  460. },
  461. }
  462. s.Spec.Name = "top"
  463. }
  464. func serviceForUpdate(s *swarm.Service) {
  465. ureplicas := uint64(1)
  466. restartDelay := time.Duration(100 * time.Millisecond)
  467. s.Spec = swarm.ServiceSpec{
  468. TaskTemplate: swarm.TaskSpec{
  469. ContainerSpec: swarm.ContainerSpec{
  470. Image: "busybox:latest",
  471. Command: []string{"/bin/top"},
  472. },
  473. RestartPolicy: &swarm.RestartPolicy{
  474. Delay: &restartDelay,
  475. },
  476. },
  477. Mode: swarm.ServiceMode{
  478. Replicated: &swarm.ReplicatedService{
  479. Replicas: &ureplicas,
  480. },
  481. },
  482. UpdateConfig: &swarm.UpdateConfig{
  483. Parallelism: 2,
  484. Delay: 4 * time.Second,
  485. FailureAction: swarm.UpdateFailureActionContinue,
  486. },
  487. RollbackConfig: &swarm.UpdateConfig{
  488. Parallelism: 3,
  489. Delay: 4 * time.Second,
  490. FailureAction: swarm.UpdateFailureActionContinue,
  491. },
  492. }
  493. s.Spec.Name = "updatetest"
  494. }
  495. func setInstances(replicas int) daemon.ServiceConstructor {
  496. ureplicas := uint64(replicas)
  497. return func(s *swarm.Service) {
  498. s.Spec.Mode = swarm.ServiceMode{
  499. Replicated: &swarm.ReplicatedService{
  500. Replicas: &ureplicas,
  501. },
  502. }
  503. }
  504. }
  505. func setUpdateOrder(order string) daemon.ServiceConstructor {
  506. return func(s *swarm.Service) {
  507. if s.Spec.UpdateConfig == nil {
  508. s.Spec.UpdateConfig = &swarm.UpdateConfig{}
  509. }
  510. s.Spec.UpdateConfig.Order = order
  511. }
  512. }
  513. func setRollbackOrder(order string) daemon.ServiceConstructor {
  514. return func(s *swarm.Service) {
  515. if s.Spec.RollbackConfig == nil {
  516. s.Spec.RollbackConfig = &swarm.UpdateConfig{}
  517. }
  518. s.Spec.RollbackConfig.Order = order
  519. }
  520. }
  521. func setImage(image string) daemon.ServiceConstructor {
  522. return func(s *swarm.Service) {
  523. s.Spec.TaskTemplate.ContainerSpec.Image = image
  524. }
  525. }
  526. func setFailureAction(failureAction string) daemon.ServiceConstructor {
  527. return func(s *swarm.Service) {
  528. s.Spec.UpdateConfig.FailureAction = failureAction
  529. }
  530. }
  531. func setMaxFailureRatio(maxFailureRatio float32) daemon.ServiceConstructor {
  532. return func(s *swarm.Service) {
  533. s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio
  534. }
  535. }
  536. func setParallelism(parallelism uint64) daemon.ServiceConstructor {
  537. return func(s *swarm.Service) {
  538. s.Spec.UpdateConfig.Parallelism = parallelism
  539. }
  540. }
  541. func setConstraints(constraints []string) daemon.ServiceConstructor {
  542. return func(s *swarm.Service) {
  543. if s.Spec.TaskTemplate.Placement == nil {
  544. s.Spec.TaskTemplate.Placement = &swarm.Placement{}
  545. }
  546. s.Spec.TaskTemplate.Placement.Constraints = constraints
  547. }
  548. }
  549. func setPlacementPrefs(prefs []swarm.PlacementPreference) daemon.ServiceConstructor {
  550. return func(s *swarm.Service) {
  551. if s.Spec.TaskTemplate.Placement == nil {
  552. s.Spec.TaskTemplate.Placement = &swarm.Placement{}
  553. }
  554. s.Spec.TaskTemplate.Placement.Preferences = prefs
  555. }
  556. }
  557. func setGlobalMode(s *swarm.Service) {
  558. s.Spec.Mode = swarm.ServiceMode{
  559. Global: &swarm.GlobalService{},
  560. }
  561. }
  562. func checkClusterHealth(c *check.C, cl []*daemon.Swarm, managerCount, workerCount int) {
  563. var totalMCount, totalWCount int
  564. for _, d := range cl {
  565. var (
  566. info swarm.Info
  567. err error
  568. )
  569. // check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error
  570. checkInfo := func(c *check.C) (interface{}, check.CommentInterface) {
  571. info, err = d.SwarmInfo()
  572. return err, check.Commentf("cluster not ready in time")
  573. }
  574. waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil)
  575. if !info.ControlAvailable {
  576. totalWCount++
  577. continue
  578. }
  579. var leaderFound bool
  580. totalMCount++
  581. var mCount, wCount int
  582. for _, n := range d.ListNodes(c) {
  583. waitReady := func(c *check.C) (interface{}, check.CommentInterface) {
  584. if n.Status.State == swarm.NodeStateReady {
  585. return true, nil
  586. }
  587. nn := d.GetNode(c, n.ID)
  588. n = *nn
  589. return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID)
  590. }
  591. waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True)
  592. waitActive := func(c *check.C) (interface{}, check.CommentInterface) {
  593. if n.Spec.Availability == swarm.NodeAvailabilityActive {
  594. return true, nil
  595. }
  596. nn := d.GetNode(c, n.ID)
  597. n = *nn
  598. return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID)
  599. }
  600. waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True)
  601. if n.Spec.Role == swarm.NodeRoleManager {
  602. c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID))
  603. if n.ManagerStatus.Leader {
  604. leaderFound = true
  605. }
  606. mCount++
  607. } else {
  608. c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.Info.NodeID))
  609. wCount++
  610. }
  611. }
  612. c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID))
  613. c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID))
  614. c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID))
  615. }
  616. c.Assert(totalMCount, checker.Equals, managerCount)
  617. c.Assert(totalWCount, checker.Equals, workerCount)
  618. }
  619. func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
  620. mCount, wCount := 5, 1
  621. var nodes []*daemon.Swarm
  622. for i := 0; i < mCount; i++ {
  623. manager := s.AddDaemon(c, true, true)
  624. info, err := manager.SwarmInfo()
  625. c.Assert(err, checker.IsNil)
  626. c.Assert(info.ControlAvailable, checker.True)
  627. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  628. nodes = append(nodes, manager)
  629. }
  630. for i := 0; i < wCount; i++ {
  631. worker := s.AddDaemon(c, true, false)
  632. info, err := worker.SwarmInfo()
  633. c.Assert(err, checker.IsNil)
  634. c.Assert(info.ControlAvailable, checker.False)
  635. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  636. nodes = append(nodes, worker)
  637. }
  638. // stop whole cluster
  639. {
  640. var wg sync.WaitGroup
  641. wg.Add(len(nodes))
  642. errs := make(chan error, len(nodes))
  643. for _, d := range nodes {
  644. go func(daemon *daemon.Swarm) {
  645. defer wg.Done()
  646. if err := daemon.StopWithError(); err != nil {
  647. errs <- err
  648. }
  649. // FIXME(vdemeester) This is duplicated…
  650. if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
  651. daemon.Root = filepath.Dir(daemon.Root)
  652. }
  653. }(d)
  654. }
  655. wg.Wait()
  656. close(errs)
  657. for err := range errs {
  658. c.Assert(err, check.IsNil)
  659. }
  660. }
  661. // start whole cluster
  662. {
  663. var wg sync.WaitGroup
  664. wg.Add(len(nodes))
  665. errs := make(chan error, len(nodes))
  666. for _, d := range nodes {
  667. go func(daemon *daemon.Swarm) {
  668. defer wg.Done()
  669. if err := daemon.StartWithError("--iptables=false"); err != nil {
  670. errs <- err
  671. }
  672. }(d)
  673. }
  674. wg.Wait()
  675. close(errs)
  676. for err := range errs {
  677. c.Assert(err, check.IsNil)
  678. }
  679. }
  680. checkClusterHealth(c, nodes, mCount, wCount)
  681. }
  682. func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) {
  683. d := s.AddDaemon(c, true, true)
  684. instances := 2
  685. id := d.CreateService(c, simpleTestService, setInstances(instances))
  686. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  687. service := d.GetService(c, id)
  688. instances = 5
  689. setInstances(instances)(service)
  690. url := fmt.Sprintf("/services/%s/update?version=%d", service.Spec.Name, service.Version.Index)
  691. status, out, err := d.SockRequest("POST", url, service.Spec)
  692. c.Assert(err, checker.IsNil)
  693. c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
  694. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  695. }
  696. // Unlocking an unlocked swarm results in an error
  697. func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *check.C) {
  698. d := s.AddDaemon(c, true, true)
  699. err := d.Unlock(swarm.UnlockRequest{UnlockKey: "wrong-key"})
  700. c.Assert(err, checker.NotNil)
  701. c.Assert(err.Error(), checker.Contains, "swarm is not locked")
  702. }
  703. // #29885
  704. func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) {
  705. ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
  706. c.Assert(err, checker.IsNil)
  707. defer ln.Close()
  708. d := s.AddDaemon(c, false, false)
  709. err = d.Init(swarm.InitRequest{})
  710. c.Assert(err, checker.NotNil)
  711. c.Assert(err.Error(), checker.Contains, "address already in use")
  712. }
  713. // Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`,
  714. // caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
  715. // This test makes sure the fixes correctly output scopes instead.
  716. func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) {
  717. d := s.AddDaemon(c, true, true)
  718. name := "foo"
  719. networkCreateRequest := types.NetworkCreateRequest{
  720. Name: name,
  721. NetworkCreate: types.NetworkCreate{
  722. CheckDuplicate: false,
  723. },
  724. }
  725. var n1 types.NetworkCreateResponse
  726. networkCreateRequest.NetworkCreate.Driver = "bridge"
  727. status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest)
  728. c.Assert(err, checker.IsNil, check.Commentf(string(out)))
  729. c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out)))
  730. c.Assert(json.Unmarshal(out, &n1), checker.IsNil)
  731. var n2 types.NetworkCreateResponse
  732. networkCreateRequest.NetworkCreate.Driver = "overlay"
  733. status, out, err = d.SockRequest("POST", "/networks/create", networkCreateRequest)
  734. c.Assert(err, checker.IsNil, check.Commentf(string(out)))
  735. c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out)))
  736. c.Assert(json.Unmarshal(out, &n2), checker.IsNil)
  737. var r1 types.NetworkResource
  738. status, out, err = d.SockRequest("GET", "/networks/"+n1.ID, nil)
  739. c.Assert(err, checker.IsNil, check.Commentf(string(out)))
  740. c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out)))
  741. c.Assert(json.Unmarshal(out, &r1), checker.IsNil)
  742. c.Assert(r1.Scope, checker.Equals, "local")
  743. var r2 types.NetworkResource
  744. status, out, err = d.SockRequest("GET", "/networks/"+n2.ID, nil)
  745. c.Assert(err, checker.IsNil, check.Commentf(string(out)))
  746. c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out)))
  747. c.Assert(json.Unmarshal(out, &r2), checker.IsNil)
  748. c.Assert(r2.Scope, checker.Equals, "swarm")
  749. }
  750. // Test case for 30178
  751. func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) {
  752. d := s.AddDaemon(c, true, true)
  753. out, err := d.Cmd("network", "create", "-d", "overlay", "lb")
  754. c.Assert(err, checker.IsNil, check.Commentf(out))
  755. instances := 1
  756. d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
  757. s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{}
  758. s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{
  759. {Target: "lb"},
  760. }
  761. })
  762. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  763. containers := d.ActiveContainers()
  764. out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top")
  765. c.Assert(err, checker.IsNil, check.Commentf(out))
  766. }