docker_api_swarm_test.go 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034
  1. // +build !windows
  2. package main
  3. import (
  4. "fmt"
  5. "io/ioutil"
  6. "net"
  7. "net/http"
  8. "path/filepath"
  9. "strings"
  10. "sync"
  11. "time"
  12. "github.com/cloudflare/cfssl/csr"
  13. "github.com/cloudflare/cfssl/helpers"
  14. "github.com/cloudflare/cfssl/initca"
  15. "github.com/docker/docker/api/types"
  16. "github.com/docker/docker/api/types/container"
  17. "github.com/docker/docker/api/types/swarm"
  18. "github.com/docker/docker/client"
  19. "github.com/docker/docker/integration-cli/checker"
  20. "github.com/docker/docker/integration-cli/daemon"
  21. testdaemon "github.com/docker/docker/internal/test/daemon"
  22. "github.com/docker/docker/internal/test/request"
  23. "github.com/docker/swarmkit/ca"
  24. "github.com/go-check/check"
  25. "github.com/gotestyourself/gotestyourself/assert"
  26. is "github.com/gotestyourself/gotestyourself/assert/cmp"
  27. "golang.org/x/net/context"
  28. )
  29. var defaultReconciliationTimeout = 30 * time.Second
  30. func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
  31. // todo: should find a better way to verify that components are running than /info
  32. d1 := s.AddDaemon(c, true, true)
  33. info := d1.SwarmInfo(c)
  34. c.Assert(info.ControlAvailable, checker.True)
  35. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  36. c.Assert(info.Cluster.RootRotationInProgress, checker.False)
  37. d2 := s.AddDaemon(c, true, false)
  38. info = d2.SwarmInfo(c)
  39. c.Assert(info.ControlAvailable, checker.False)
  40. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  41. // Leaving cluster
  42. c.Assert(d2.SwarmLeave(false), checker.IsNil)
  43. info = d2.SwarmInfo(c)
  44. c.Assert(info.ControlAvailable, checker.False)
  45. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  46. d2.SwarmJoin(c, swarm.JoinRequest{
  47. ListenAddr: d1.SwarmListenAddr(),
  48. JoinToken: d1.JoinTokens(c).Worker,
  49. RemoteAddrs: []string{d1.SwarmListenAddr()},
  50. })
  51. info = d2.SwarmInfo(c)
  52. c.Assert(info.ControlAvailable, checker.False)
  53. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  54. // Current state restoring after restarts
  55. d1.Stop(c)
  56. d2.Stop(c)
  57. d1.Start(c)
  58. d2.Start(c)
  59. info = d1.SwarmInfo(c)
  60. c.Assert(info.ControlAvailable, checker.True)
  61. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  62. info = d2.SwarmInfo(c)
  63. c.Assert(info.ControlAvailable, checker.False)
  64. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  65. }
  66. func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
  67. d1 := s.AddDaemon(c, false, false)
  68. d1.SwarmInit(c, swarm.InitRequest{})
  69. // todo: error message differs depending if some components of token are valid
  70. d2 := s.AddDaemon(c, false, false)
  71. c2 := d2.NewClientT(c)
  72. err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
  73. ListenAddr: d2.SwarmListenAddr(),
  74. RemoteAddrs: []string{d1.SwarmListenAddr()},
  75. })
  76. c.Assert(err, checker.NotNil)
  77. c.Assert(err.Error(), checker.Contains, "join token is necessary")
  78. info := d2.SwarmInfo(c)
  79. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  80. err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
  81. ListenAddr: d2.SwarmListenAddr(),
  82. JoinToken: "foobaz",
  83. RemoteAddrs: []string{d1.SwarmListenAddr()},
  84. })
  85. c.Assert(err, checker.NotNil)
  86. c.Assert(err.Error(), checker.Contains, "invalid join token")
  87. info = d2.SwarmInfo(c)
  88. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  89. workerToken := d1.JoinTokens(c).Worker
  90. d2.SwarmJoin(c, swarm.JoinRequest{
  91. ListenAddr: d2.SwarmListenAddr(),
  92. JoinToken: workerToken,
  93. RemoteAddrs: []string{d1.SwarmListenAddr()},
  94. })
  95. info = d2.SwarmInfo(c)
  96. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  97. c.Assert(d2.SwarmLeave(false), checker.IsNil)
  98. info = d2.SwarmInfo(c)
  99. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  100. // change tokens
  101. d1.RotateTokens(c)
  102. err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
  103. ListenAddr: d2.SwarmListenAddr(),
  104. JoinToken: workerToken,
  105. RemoteAddrs: []string{d1.SwarmListenAddr()},
  106. })
  107. c.Assert(err, checker.NotNil)
  108. c.Assert(err.Error(), checker.Contains, "join token is necessary")
  109. info = d2.SwarmInfo(c)
  110. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  111. workerToken = d1.JoinTokens(c).Worker
  112. d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
  113. info = d2.SwarmInfo(c)
  114. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  115. c.Assert(d2.SwarmLeave(false), checker.IsNil)
  116. info = d2.SwarmInfo(c)
  117. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  118. // change spec, don't change tokens
  119. d1.UpdateSwarm(c, func(s *swarm.Spec) {})
  120. err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
  121. ListenAddr: d2.SwarmListenAddr(),
  122. RemoteAddrs: []string{d1.SwarmListenAddr()},
  123. })
  124. c.Assert(err, checker.NotNil)
  125. c.Assert(err.Error(), checker.Contains, "join token is necessary")
  126. info = d2.SwarmInfo(c)
  127. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  128. d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
  129. info = d2.SwarmInfo(c)
  130. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  131. c.Assert(d2.SwarmLeave(false), checker.IsNil)
  132. info = d2.SwarmInfo(c)
  133. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  134. }
  135. func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) {
  136. d1 := s.AddDaemon(c, false, false)
  137. d1.SwarmInit(c, swarm.InitRequest{})
  138. d1.UpdateSwarm(c, func(s *swarm.Spec) {
  139. s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
  140. {
  141. Protocol: swarm.ExternalCAProtocolCFSSL,
  142. URL: "https://thishasnoca.org",
  143. },
  144. {
  145. Protocol: swarm.ExternalCAProtocolCFSSL,
  146. URL: "https://thishasacacert.org",
  147. CACert: "cacert",
  148. },
  149. }
  150. })
  151. info := d1.SwarmInfo(c)
  152. c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 2)
  153. c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "")
  154. c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, "cacert")
  155. }
  156. func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
  157. d1 := s.AddDaemon(c, true, true)
  158. d2 := s.AddDaemon(c, false, false)
  159. splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
  160. splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
  161. replacementToken := strings.Join(splitToken, "-")
  162. c2 := d2.NewClientT(c)
  163. err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
  164. ListenAddr: d2.SwarmListenAddr(),
  165. JoinToken: replacementToken,
  166. RemoteAddrs: []string{d1.SwarmListenAddr()},
  167. })
  168. c.Assert(err, checker.NotNil)
  169. c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint")
  170. }
  171. func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
  172. d1 := s.AddDaemon(c, false, false)
  173. d1.SwarmInit(c, swarm.InitRequest{})
  174. d2 := s.AddDaemon(c, true, false)
  175. info := d2.SwarmInfo(c)
  176. c.Assert(info.ControlAvailable, checker.False)
  177. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  178. d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
  179. n.Spec.Role = swarm.NodeRoleManager
  180. })
  181. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
  182. d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
  183. n.Spec.Role = swarm.NodeRoleWorker
  184. })
  185. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False)
  186. // Wait for the role to change to worker in the cert. This is partially
  187. // done because it's something worth testing in its own right, and
  188. // partially because changing the role from manager to worker and then
  189. // back to manager quickly might cause the node to pause for awhile
  190. // while waiting for the role to change to worker, and the test can
  191. // time out during this interval.
  192. waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
  193. certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
  194. if err != nil {
  195. return "", check.Commentf("error: %v", err)
  196. }
  197. certs, err := helpers.ParseCertificatesPEM(certBytes)
  198. if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
  199. return certs[0].Subject.OrganizationalUnit[0], nil
  200. }
  201. return "", check.Commentf("could not get organizational unit from certificate")
  202. }, checker.Equals, "swarm-worker")
  203. // Demoting last node should fail
  204. node := d1.GetNode(c, d1.NodeID())
  205. node.Spec.Role = swarm.NodeRoleWorker
  206. url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
  207. res, body, err := request.Post(url, request.Host(d1.Sock()), request.JSONBody(node.Spec))
  208. c.Assert(err, checker.IsNil)
  209. b, err := request.ReadBody(body)
  210. c.Assert(err, checker.IsNil)
  211. c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(b)))
  212. // The warning specific to demoting the last manager is best-effort and
  213. // won't appear until the Role field of the demoted manager has been
  214. // updated.
  215. // Yes, I know this looks silly, but checker.Matches is broken, since
  216. // it anchors the regexp contrary to the documentation, and this makes
  217. // it impossible to match something that includes a line break.
  218. if !strings.Contains(string(b), "last manager of the swarm") {
  219. c.Assert(string(b), checker.Contains, "this would result in a loss of quorum")
  220. }
  221. info = d1.SwarmInfo(c)
  222. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  223. c.Assert(info.ControlAvailable, checker.True)
  224. // Promote already demoted node
  225. d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
  226. n.Spec.Role = swarm.NodeRoleManager
  227. })
  228. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
  229. }
  230. func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) {
  231. // add three managers, one of these is leader
  232. d1 := s.AddDaemon(c, true, true)
  233. d2 := s.AddDaemon(c, true, true)
  234. d3 := s.AddDaemon(c, true, true)
  235. // start a service by hitting each of the 3 managers
  236. d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
  237. s.Spec.Name = "test1"
  238. })
  239. d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
  240. s.Spec.Name = "test2"
  241. })
  242. d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
  243. s.Spec.Name = "test3"
  244. })
  245. // 3 services should be started now, because the requests were proxied to leader
  246. // query each node and make sure it returns 3 services
  247. for _, d := range []*daemon.Daemon{d1, d2, d3} {
  248. services := d.ListServices(c)
  249. c.Assert(services, checker.HasLen, 3)
  250. }
  251. }
  252. func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
  253. // Create 3 nodes
  254. d1 := s.AddDaemon(c, true, true)
  255. d2 := s.AddDaemon(c, true, true)
  256. d3 := s.AddDaemon(c, true, true)
  257. // assert that the first node we made is the leader, and the other two are followers
  258. c.Assert(d1.GetNode(c, d1.NodeID()).ManagerStatus.Leader, checker.True)
  259. c.Assert(d1.GetNode(c, d2.NodeID()).ManagerStatus.Leader, checker.False)
  260. c.Assert(d1.GetNode(c, d3.NodeID()).ManagerStatus.Leader, checker.False)
  261. d1.Stop(c)
  262. var (
  263. leader *daemon.Daemon // keep track of leader
  264. followers []*daemon.Daemon // keep track of followers
  265. )
  266. checkLeader := func(nodes ...*daemon.Daemon) checkF {
  267. return func(c *check.C) (interface{}, check.CommentInterface) {
  268. // clear these out before each run
  269. leader = nil
  270. followers = nil
  271. for _, d := range nodes {
  272. if d.GetNode(c, d.NodeID()).ManagerStatus.Leader {
  273. leader = d
  274. } else {
  275. followers = append(followers, d)
  276. }
  277. }
  278. if leader == nil {
  279. return false, check.Commentf("no leader elected")
  280. }
  281. return true, check.Commentf("elected %v", leader.ID())
  282. }
  283. }
  284. // wait for an election to occur
  285. waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True)
  286. // assert that we have a new leader
  287. c.Assert(leader, checker.NotNil)
  288. // Keep track of the current leader, since we want that to be chosen.
  289. stableleader := leader
  290. // add the d1, the initial leader, back
  291. d1.Start(c)
  292. // TODO(stevvooe): may need to wait for rejoin here
  293. // wait for possible election
  294. waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True)
  295. // pick out the leader and the followers again
  296. // verify that we still only have 1 leader and 2 followers
  297. c.Assert(leader, checker.NotNil)
  298. c.Assert(followers, checker.HasLen, 2)
  299. // and that after we added d1 back, the leader hasn't changed
  300. c.Assert(leader.NodeID(), checker.Equals, stableleader.NodeID())
  301. }
  302. func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
  303. d1 := s.AddDaemon(c, true, true)
  304. d2 := s.AddDaemon(c, true, true)
  305. d3 := s.AddDaemon(c, true, true)
  306. d1.CreateService(c, simpleTestService)
  307. d2.Stop(c)
  308. // make sure there is a leader
  309. waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
  310. d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
  311. s.Spec.Name = "top1"
  312. })
  313. d3.Stop(c)
  314. var service swarm.Service
  315. simpleTestService(&service)
  316. service.Spec.Name = "top2"
  317. cli, err := d1.NewClient()
  318. c.Assert(err, checker.IsNil)
  319. defer cli.Close()
  320. // d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen
  321. waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
  322. _, err = cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{})
  323. return err.Error(), nil
  324. }, checker.Contains, "Make sure more than half of the managers are online.")
  325. d2.Start(c)
  326. // make sure there is a leader
  327. waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
  328. d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
  329. s.Spec.Name = "top3"
  330. })
  331. }
  332. func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) {
  333. d := s.AddDaemon(c, true, true)
  334. instances := 2
  335. d.CreateService(c, simpleTestService, setInstances(instances))
  336. id, err := d.Cmd("run", "-d", "busybox", "top")
  337. c.Assert(err, checker.IsNil)
  338. id = strings.TrimSpace(id)
  339. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1)
  340. c.Assert(d.SwarmLeave(false), checker.NotNil)
  341. c.Assert(d.SwarmLeave(true), checker.IsNil)
  342. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
  343. id2, err := d.Cmd("ps", "-q")
  344. c.Assert(err, checker.IsNil)
  345. c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
  346. }
  347. // #23629
  348. func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) {
  349. testRequires(c, Network)
  350. s.AddDaemon(c, true, true)
  351. d2 := s.AddDaemon(c, false, false)
  352. id, err := d2.Cmd("run", "-d", "busybox", "top")
  353. c.Assert(err, checker.IsNil)
  354. id = strings.TrimSpace(id)
  355. c2 := d2.NewClientT(c)
  356. err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
  357. ListenAddr: d2.SwarmListenAddr(),
  358. RemoteAddrs: []string{"123.123.123.123:1234"},
  359. })
  360. c.Assert(err, check.NotNil)
  361. c.Assert(err.Error(), checker.Contains, "Timeout was reached")
  362. info := d2.SwarmInfo(c)
  363. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
  364. c.Assert(d2.SwarmLeave(true), checker.IsNil)
  365. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1)
  366. id2, err := d2.Cmd("ps", "-q")
  367. c.Assert(err, checker.IsNil)
  368. c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
  369. }
  370. // #23705
  371. func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
  372. testRequires(c, Network)
  373. d := s.AddDaemon(c, false, false)
  374. client := d.NewClientT(c)
  375. err := client.SwarmJoin(context.Background(), swarm.JoinRequest{
  376. ListenAddr: d.SwarmListenAddr(),
  377. RemoteAddrs: []string{"123.123.123.123:1234"},
  378. })
  379. c.Assert(err, check.NotNil)
  380. c.Assert(err.Error(), checker.Contains, "Timeout was reached")
  381. waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
  382. d.Stop(c)
  383. d.Start(c)
  384. info := d.SwarmInfo(c)
  385. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
  386. }
  387. func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) {
  388. d1 := s.AddDaemon(c, true, true)
  389. instances := 2
  390. id := d1.CreateService(c, simpleTestService, setInstances(instances))
  391. d1.GetService(c, id)
  392. d1.Stop(c)
  393. d1.Start(c)
  394. d1.GetService(c, id)
  395. d2 := s.AddDaemon(c, true, true)
  396. d2.GetService(c, id)
  397. d2.Stop(c)
  398. d2.Start(c)
  399. d2.GetService(c, id)
  400. d3 := s.AddDaemon(c, true, true)
  401. d3.GetService(c, id)
  402. d3.Stop(c)
  403. d3.Start(c)
  404. d3.GetService(c, id)
  405. d3.Kill()
  406. time.Sleep(1 * time.Second) // time to handle signal
  407. d3.Start(c)
  408. d3.GetService(c, id)
  409. }
  410. func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) {
  411. d := s.AddDaemon(c, true, true)
  412. instances := 2
  413. id := d.CreateService(c, simpleTestService, setInstances(instances))
  414. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  415. containers := d.ActiveContainers(c)
  416. instances = 4
  417. d.UpdateService(c, d.GetService(c, id), setInstances(instances))
  418. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  419. containers2 := d.ActiveContainers(c)
  420. loop0:
  421. for _, c1 := range containers {
  422. for _, c2 := range containers2 {
  423. if c1 == c2 {
  424. continue loop0
  425. }
  426. }
  427. c.Errorf("container %v not found in new set %#v", c1, containers2)
  428. }
  429. }
  430. func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) {
  431. d := s.AddDaemon(c, false, false)
  432. req := swarm.InitRequest{
  433. ListenAddr: "",
  434. }
  435. res, _, err := request.Post("/swarm/init", request.Host(d.Sock()), request.JSONBody(req))
  436. c.Assert(err, checker.IsNil)
  437. c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest)
  438. req2 := swarm.JoinRequest{
  439. ListenAddr: "0.0.0.0:2377",
  440. RemoteAddrs: []string{""},
  441. }
  442. res, _, err = request.Post("/swarm/join", request.Host(d.Sock()), request.JSONBody(req2))
  443. c.Assert(err, checker.IsNil)
  444. c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest)
  445. }
  446. func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
  447. d1 := s.AddDaemon(c, true, true)
  448. d2 := s.AddDaemon(c, true, true)
  449. instances := 2
  450. id := d1.CreateService(c, simpleTestService, setInstances(instances))
  451. waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances)
  452. // drain d2, all containers should move to d1
  453. d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
  454. n.Spec.Availability = swarm.NodeAvailabilityDrain
  455. })
  456. waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
  457. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0)
  458. d2.Stop(c)
  459. d1.SwarmInit(c, swarm.InitRequest{
  460. ForceNewCluster: true,
  461. Spec: swarm.Spec{},
  462. })
  463. waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
  464. d3 := s.AddDaemon(c, true, true)
  465. info := d3.SwarmInfo(c)
  466. c.Assert(info.ControlAvailable, checker.True)
  467. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  468. instances = 4
  469. d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
  470. waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
  471. }
  472. func simpleTestService(s *swarm.Service) {
  473. ureplicas := uint64(1)
  474. restartDelay := time.Duration(100 * time.Millisecond)
  475. s.Spec = swarm.ServiceSpec{
  476. TaskTemplate: swarm.TaskSpec{
  477. ContainerSpec: &swarm.ContainerSpec{
  478. Image: "busybox:latest",
  479. Command: []string{"/bin/top"},
  480. },
  481. RestartPolicy: &swarm.RestartPolicy{
  482. Delay: &restartDelay,
  483. },
  484. },
  485. Mode: swarm.ServiceMode{
  486. Replicated: &swarm.ReplicatedService{
  487. Replicas: &ureplicas,
  488. },
  489. },
  490. }
  491. s.Spec.Name = "top"
  492. }
  493. func serviceForUpdate(s *swarm.Service) {
  494. ureplicas := uint64(1)
  495. restartDelay := time.Duration(100 * time.Millisecond)
  496. s.Spec = swarm.ServiceSpec{
  497. TaskTemplate: swarm.TaskSpec{
  498. ContainerSpec: &swarm.ContainerSpec{
  499. Image: "busybox:latest",
  500. Command: []string{"/bin/top"},
  501. },
  502. RestartPolicy: &swarm.RestartPolicy{
  503. Delay: &restartDelay,
  504. },
  505. },
  506. Mode: swarm.ServiceMode{
  507. Replicated: &swarm.ReplicatedService{
  508. Replicas: &ureplicas,
  509. },
  510. },
  511. UpdateConfig: &swarm.UpdateConfig{
  512. Parallelism: 2,
  513. Delay: 4 * time.Second,
  514. FailureAction: swarm.UpdateFailureActionContinue,
  515. },
  516. RollbackConfig: &swarm.UpdateConfig{
  517. Parallelism: 3,
  518. Delay: 4 * time.Second,
  519. FailureAction: swarm.UpdateFailureActionContinue,
  520. },
  521. }
  522. s.Spec.Name = "updatetest"
  523. }
  524. func setInstances(replicas int) testdaemon.ServiceConstructor {
  525. ureplicas := uint64(replicas)
  526. return func(s *swarm.Service) {
  527. s.Spec.Mode = swarm.ServiceMode{
  528. Replicated: &swarm.ReplicatedService{
  529. Replicas: &ureplicas,
  530. },
  531. }
  532. }
  533. }
  534. func setUpdateOrder(order string) testdaemon.ServiceConstructor {
  535. return func(s *swarm.Service) {
  536. if s.Spec.UpdateConfig == nil {
  537. s.Spec.UpdateConfig = &swarm.UpdateConfig{}
  538. }
  539. s.Spec.UpdateConfig.Order = order
  540. }
  541. }
  542. func setRollbackOrder(order string) testdaemon.ServiceConstructor {
  543. return func(s *swarm.Service) {
  544. if s.Spec.RollbackConfig == nil {
  545. s.Spec.RollbackConfig = &swarm.UpdateConfig{}
  546. }
  547. s.Spec.RollbackConfig.Order = order
  548. }
  549. }
  550. func setImage(image string) testdaemon.ServiceConstructor {
  551. return func(s *swarm.Service) {
  552. if s.Spec.TaskTemplate.ContainerSpec == nil {
  553. s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
  554. }
  555. s.Spec.TaskTemplate.ContainerSpec.Image = image
  556. }
  557. }
  558. func setFailureAction(failureAction string) testdaemon.ServiceConstructor {
  559. return func(s *swarm.Service) {
  560. s.Spec.UpdateConfig.FailureAction = failureAction
  561. }
  562. }
  563. func setMaxFailureRatio(maxFailureRatio float32) testdaemon.ServiceConstructor {
  564. return func(s *swarm.Service) {
  565. s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio
  566. }
  567. }
  568. func setParallelism(parallelism uint64) testdaemon.ServiceConstructor {
  569. return func(s *swarm.Service) {
  570. s.Spec.UpdateConfig.Parallelism = parallelism
  571. }
  572. }
  573. func setConstraints(constraints []string) testdaemon.ServiceConstructor {
  574. return func(s *swarm.Service) {
  575. if s.Spec.TaskTemplate.Placement == nil {
  576. s.Spec.TaskTemplate.Placement = &swarm.Placement{}
  577. }
  578. s.Spec.TaskTemplate.Placement.Constraints = constraints
  579. }
  580. }
  581. func setPlacementPrefs(prefs []swarm.PlacementPreference) testdaemon.ServiceConstructor {
  582. return func(s *swarm.Service) {
  583. if s.Spec.TaskTemplate.Placement == nil {
  584. s.Spec.TaskTemplate.Placement = &swarm.Placement{}
  585. }
  586. s.Spec.TaskTemplate.Placement.Preferences = prefs
  587. }
  588. }
  589. func setGlobalMode(s *swarm.Service) {
  590. s.Spec.Mode = swarm.ServiceMode{
  591. Global: &swarm.GlobalService{},
  592. }
  593. }
  594. func checkClusterHealth(c *check.C, cl []*daemon.Daemon, managerCount, workerCount int) {
  595. var totalMCount, totalWCount int
  596. for _, d := range cl {
  597. var (
  598. info swarm.Info
  599. )
  600. // check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error
  601. checkInfo := func(c *check.C) (interface{}, check.CommentInterface) {
  602. client := d.NewClientT(c)
  603. daemonInfo, err := client.Info(context.Background())
  604. info = daemonInfo.Swarm
  605. return err, check.Commentf("cluster not ready in time")
  606. }
  607. waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil)
  608. if !info.ControlAvailable {
  609. totalWCount++
  610. continue
  611. }
  612. var leaderFound bool
  613. totalMCount++
  614. var mCount, wCount int
  615. for _, n := range d.ListNodes(c) {
  616. waitReady := func(c *check.C) (interface{}, check.CommentInterface) {
  617. if n.Status.State == swarm.NodeStateReady {
  618. return true, nil
  619. }
  620. nn := d.GetNode(c, n.ID)
  621. n = *nn
  622. return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.NodeID())
  623. }
  624. waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True)
  625. waitActive := func(c *check.C) (interface{}, check.CommentInterface) {
  626. if n.Spec.Availability == swarm.NodeAvailabilityActive {
  627. return true, nil
  628. }
  629. nn := d.GetNode(c, n.ID)
  630. n = *nn
  631. return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.NodeID())
  632. }
  633. waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True)
  634. if n.Spec.Role == swarm.NodeRoleManager {
  635. c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.NodeID()))
  636. if n.ManagerStatus.Leader {
  637. leaderFound = true
  638. }
  639. mCount++
  640. } else {
  641. c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.NodeID()))
  642. wCount++
  643. }
  644. }
  645. c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID))
  646. c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID))
  647. c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID))
  648. }
  649. c.Assert(totalMCount, checker.Equals, managerCount)
  650. c.Assert(totalWCount, checker.Equals, workerCount)
  651. }
  652. func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
  653. mCount, wCount := 5, 1
  654. var nodes []*daemon.Daemon
  655. for i := 0; i < mCount; i++ {
  656. manager := s.AddDaemon(c, true, true)
  657. info := manager.SwarmInfo(c)
  658. c.Assert(info.ControlAvailable, checker.True)
  659. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  660. nodes = append(nodes, manager)
  661. }
  662. for i := 0; i < wCount; i++ {
  663. worker := s.AddDaemon(c, true, false)
  664. info := worker.SwarmInfo(c)
  665. c.Assert(info.ControlAvailable, checker.False)
  666. c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
  667. nodes = append(nodes, worker)
  668. }
  669. // stop whole cluster
  670. {
  671. var wg sync.WaitGroup
  672. wg.Add(len(nodes))
  673. errs := make(chan error, len(nodes))
  674. for _, d := range nodes {
  675. go func(daemon *daemon.Daemon) {
  676. defer wg.Done()
  677. if err := daemon.StopWithError(); err != nil {
  678. errs <- err
  679. }
  680. }(d)
  681. }
  682. wg.Wait()
  683. close(errs)
  684. for err := range errs {
  685. c.Assert(err, check.IsNil)
  686. }
  687. }
  688. // start whole cluster
  689. {
  690. var wg sync.WaitGroup
  691. wg.Add(len(nodes))
  692. errs := make(chan error, len(nodes))
  693. for _, d := range nodes {
  694. go func(daemon *daemon.Daemon) {
  695. defer wg.Done()
  696. if err := daemon.StartWithError("--iptables=false"); err != nil {
  697. errs <- err
  698. }
  699. }(d)
  700. }
  701. wg.Wait()
  702. close(errs)
  703. for err := range errs {
  704. c.Assert(err, check.IsNil)
  705. }
  706. }
  707. checkClusterHealth(c, nodes, mCount, wCount)
  708. }
  709. func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) {
  710. d := s.AddDaemon(c, true, true)
  711. instances := 2
  712. id := d.CreateService(c, simpleTestService, setInstances(instances))
  713. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  714. service := d.GetService(c, id)
  715. instances = 5
  716. setInstances(instances)(service)
  717. cli, err := d.NewClient()
  718. c.Assert(err, checker.IsNil)
  719. defer cli.Close()
  720. _, err = cli.ServiceUpdate(context.Background(), service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{})
  721. c.Assert(err, checker.IsNil)
  722. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  723. }
  724. // Unlocking an unlocked swarm results in an error
  725. func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *check.C) {
  726. d := s.AddDaemon(c, true, true)
  727. err := d.SwarmUnlock(swarm.UnlockRequest{UnlockKey: "wrong-key"})
  728. c.Assert(err, checker.NotNil)
  729. c.Assert(err.Error(), checker.Contains, "swarm is not locked")
  730. }
  731. // #29885
  732. func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) {
  733. ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
  734. c.Assert(err, checker.IsNil)
  735. defer ln.Close()
  736. d := s.AddDaemon(c, false, false)
  737. client := d.NewClientT(c)
  738. _, err = client.SwarmInit(context.Background(), swarm.InitRequest{
  739. ListenAddr: d.SwarmListenAddr(),
  740. })
  741. c.Assert(err, checker.NotNil)
  742. c.Assert(err.Error(), checker.Contains, "address already in use")
  743. }
  744. // Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`,
  745. // caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
  746. // This test makes sure the fixes correctly output scopes instead.
  747. func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) {
  748. d := s.AddDaemon(c, true, true)
  749. cli, err := d.NewClient()
  750. c.Assert(err, checker.IsNil)
  751. defer cli.Close()
  752. name := "foo"
  753. networkCreate := types.NetworkCreate{
  754. CheckDuplicate: false,
  755. }
  756. networkCreate.Driver = "bridge"
  757. n1, err := cli.NetworkCreate(context.Background(), name, networkCreate)
  758. c.Assert(err, checker.IsNil)
  759. networkCreate.Driver = "overlay"
  760. n2, err := cli.NetworkCreate(context.Background(), name, networkCreate)
  761. c.Assert(err, checker.IsNil)
  762. r1, err := cli.NetworkInspect(context.Background(), n1.ID, types.NetworkInspectOptions{})
  763. c.Assert(err, checker.IsNil)
  764. c.Assert(r1.Scope, checker.Equals, "local")
  765. r2, err := cli.NetworkInspect(context.Background(), n2.ID, types.NetworkInspectOptions{})
  766. c.Assert(err, checker.IsNil)
  767. c.Assert(r2.Scope, checker.Equals, "swarm")
  768. }
  769. // Test case for 30178
  770. func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) {
  771. // Issue #36386 can be a independent one, which is worth further investigation.
  772. c.Skip("Root cause of Issue #36386 is needed")
  773. d := s.AddDaemon(c, true, true)
  774. out, err := d.Cmd("network", "create", "-d", "overlay", "lb")
  775. c.Assert(err, checker.IsNil, check.Commentf(out))
  776. instances := 1
  777. d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
  778. if s.Spec.TaskTemplate.ContainerSpec == nil {
  779. s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
  780. }
  781. s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{}
  782. s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{
  783. {Target: "lb"},
  784. }
  785. })
  786. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  787. containers := d.ActiveContainers(c)
  788. out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top")
  789. c.Assert(err, checker.IsNil, check.Commentf(out))
  790. }
  791. func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) {
  792. m := s.AddDaemon(c, true, true)
  793. w := s.AddDaemon(c, true, false)
  794. info := m.SwarmInfo(c)
  795. currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
  796. // rotate multiple times
  797. for i := 0; i < 4; i++ {
  798. var err error
  799. var cert, key []byte
  800. if i%2 != 0 {
  801. cert, _, key, err = initca.New(&csr.CertificateRequest{
  802. CN: "newRoot",
  803. KeyRequest: csr.NewBasicKeyRequest(),
  804. CA: &csr.CAConfig{Expiry: ca.RootCAExpiration},
  805. })
  806. c.Assert(err, checker.IsNil)
  807. }
  808. expectedCert := string(cert)
  809. m.UpdateSwarm(c, func(s *swarm.Spec) {
  810. s.CAConfig.SigningCACert = expectedCert
  811. s.CAConfig.SigningCAKey = string(key)
  812. s.CAConfig.ForceRotate++
  813. })
  814. // poll to make sure update succeeds
  815. var clusterTLSInfo swarm.TLSInfo
  816. for j := 0; j < 18; j++ {
  817. info := m.SwarmInfo(c)
  818. // the desired CA cert and key is always redacted
  819. c.Assert(info.Cluster.Spec.CAConfig.SigningCAKey, checker.Equals, "")
  820. c.Assert(info.Cluster.Spec.CAConfig.SigningCACert, checker.Equals, "")
  821. clusterTLSInfo = info.Cluster.TLSInfo
  822. // if root rotation is done and the trust root has changed, we don't have to poll anymore
  823. if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot {
  824. break
  825. }
  826. // root rotation not done
  827. time.Sleep(250 * time.Millisecond)
  828. }
  829. if cert != nil {
  830. c.Assert(clusterTLSInfo.TrustRoot, checker.Equals, expectedCert)
  831. }
  832. // could take another second or two for the nodes to trust the new roots after they've all gotten
  833. // new TLS certificates
  834. for j := 0; j < 18; j++ {
  835. mInfo := m.GetNode(c, m.NodeID()).Description.TLSInfo
  836. wInfo := m.GetNode(c, w.NodeID()).Description.TLSInfo
  837. if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot {
  838. break
  839. }
  840. // nodes don't trust root certs yet
  841. time.Sleep(250 * time.Millisecond)
  842. }
  843. c.Assert(m.GetNode(c, m.NodeID()).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo)
  844. c.Assert(m.GetNode(c, w.NodeID()).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo)
  845. currentTrustRoot = clusterTLSInfo.TrustRoot
  846. }
  847. }
  848. func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) {
  849. d := s.AddDaemon(c, true, true)
  850. name := "test-scoped-network"
  851. ctx := context.Background()
  852. apiclient, err := d.NewClient()
  853. assert.NilError(c, err)
  854. resp, err := apiclient.NetworkCreate(ctx, name, types.NetworkCreate{Driver: "overlay"})
  855. assert.NilError(c, err)
  856. network, err := apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{})
  857. assert.NilError(c, err)
  858. assert.Check(c, is.Equal("swarm", network.Scope))
  859. assert.Check(c, is.Equal(resp.ID, network.ID))
  860. _, err = apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{Scope: "local"})
  861. assert.Check(c, client.IsErrNotFound(err))
  862. }