docker_api_swarm_test.go 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033
  1. // +build !windows
  2. package main
  3. import (
  4. "context"
  5. "fmt"
  6. "io/ioutil"
  7. "net"
  8. "net/http"
  9. "path/filepath"
  10. "runtime"
  11. "strings"
  12. "sync"
  13. "time"
  14. "github.com/cloudflare/cfssl/csr"
  15. "github.com/cloudflare/cfssl/helpers"
  16. "github.com/cloudflare/cfssl/initca"
  17. "github.com/docker/docker/api/types"
  18. "github.com/docker/docker/api/types/container"
  19. "github.com/docker/docker/api/types/swarm"
  20. "github.com/docker/docker/client"
  21. "github.com/docker/docker/integration-cli/checker"
  22. "github.com/docker/docker/integration-cli/daemon"
  23. testdaemon "github.com/docker/docker/internal/test/daemon"
  24. "github.com/docker/docker/internal/test/request"
  25. "github.com/docker/swarmkit/ca"
  26. "github.com/go-check/check"
  27. "gotest.tools/assert"
  28. is "gotest.tools/assert/cmp"
  29. )
  30. var defaultReconciliationTimeout = 30 * time.Second
  31. func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
  32. // todo: should find a better way to verify that components are running than /info
  33. d1 := s.AddDaemon(c, true, true)
  34. info := d1.SwarmInfo(c)
  35. assert.Equal(c, info.ControlAvailable, true)
  36. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  37. assert.Equal(c, info.Cluster.RootRotationInProgress, false)
  38. d2 := s.AddDaemon(c, true, false)
  39. info = d2.SwarmInfo(c)
  40. assert.Equal(c, info.ControlAvailable, false)
  41. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  42. // Leaving cluster
  43. assert.NilError(c, d2.SwarmLeave(c, false))
  44. info = d2.SwarmInfo(c)
  45. assert.Equal(c, info.ControlAvailable, false)
  46. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
  47. d2.SwarmJoin(c, swarm.JoinRequest{
  48. ListenAddr: d1.SwarmListenAddr(),
  49. JoinToken: d1.JoinTokens(c).Worker,
  50. RemoteAddrs: []string{d1.SwarmListenAddr()},
  51. })
  52. info = d2.SwarmInfo(c)
  53. assert.Equal(c, info.ControlAvailable, false)
  54. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  55. // Current state restoring after restarts
  56. d1.Stop(c)
  57. d2.Stop(c)
  58. d1.StartNode(c)
  59. d2.StartNode(c)
  60. info = d1.SwarmInfo(c)
  61. assert.Equal(c, info.ControlAvailable, true)
  62. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  63. info = d2.SwarmInfo(c)
  64. assert.Equal(c, info.ControlAvailable, false)
  65. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  66. }
  67. func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
  68. d1 := s.AddDaemon(c, false, false)
  69. d1.SwarmInit(c, swarm.InitRequest{})
  70. // todo: error message differs depending if some components of token are valid
  71. d2 := s.AddDaemon(c, false, false)
  72. c2 := d2.NewClientT(c)
  73. err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
  74. ListenAddr: d2.SwarmListenAddr(),
  75. RemoteAddrs: []string{d1.SwarmListenAddr()},
  76. })
  77. assert.ErrorContains(c, err, "join token is necessary")
  78. info := d2.SwarmInfo(c)
  79. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
  80. err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
  81. ListenAddr: d2.SwarmListenAddr(),
  82. JoinToken: "foobaz",
  83. RemoteAddrs: []string{d1.SwarmListenAddr()},
  84. })
  85. assert.ErrorContains(c, err, "invalid join token")
  86. info = d2.SwarmInfo(c)
  87. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
  88. workerToken := d1.JoinTokens(c).Worker
  89. d2.SwarmJoin(c, swarm.JoinRequest{
  90. ListenAddr: d2.SwarmListenAddr(),
  91. JoinToken: workerToken,
  92. RemoteAddrs: []string{d1.SwarmListenAddr()},
  93. })
  94. info = d2.SwarmInfo(c)
  95. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  96. assert.NilError(c, d2.SwarmLeave(c, false))
  97. info = d2.SwarmInfo(c)
  98. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
  99. // change tokens
  100. d1.RotateTokens(c)
  101. err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
  102. ListenAddr: d2.SwarmListenAddr(),
  103. JoinToken: workerToken,
  104. RemoteAddrs: []string{d1.SwarmListenAddr()},
  105. })
  106. assert.ErrorContains(c, err, "join token is necessary")
  107. info = d2.SwarmInfo(c)
  108. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
  109. workerToken = d1.JoinTokens(c).Worker
  110. d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
  111. info = d2.SwarmInfo(c)
  112. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  113. assert.NilError(c, d2.SwarmLeave(c, false))
  114. info = d2.SwarmInfo(c)
  115. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
  116. // change spec, don't change tokens
  117. d1.UpdateSwarm(c, func(s *swarm.Spec) {})
  118. err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
  119. ListenAddr: d2.SwarmListenAddr(),
  120. RemoteAddrs: []string{d1.SwarmListenAddr()},
  121. })
  122. assert.ErrorContains(c, err, "join token is necessary")
  123. info = d2.SwarmInfo(c)
  124. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
  125. d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
  126. info = d2.SwarmInfo(c)
  127. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  128. assert.NilError(c, d2.SwarmLeave(c, false))
  129. info = d2.SwarmInfo(c)
  130. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
  131. }
  132. func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) {
  133. d1 := s.AddDaemon(c, false, false)
  134. d1.SwarmInit(c, swarm.InitRequest{})
  135. d1.UpdateSwarm(c, func(s *swarm.Spec) {
  136. s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
  137. {
  138. Protocol: swarm.ExternalCAProtocolCFSSL,
  139. URL: "https://thishasnoca.org",
  140. },
  141. {
  142. Protocol: swarm.ExternalCAProtocolCFSSL,
  143. URL: "https://thishasacacert.org",
  144. CACert: "cacert",
  145. },
  146. }
  147. })
  148. info := d1.SwarmInfo(c)
  149. assert.Equal(c, len(info.Cluster.Spec.CAConfig.ExternalCAs), 2)
  150. assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, "")
  151. assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, "cacert")
  152. }
  153. func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
  154. d1 := s.AddDaemon(c, true, true)
  155. d2 := s.AddDaemon(c, false, false)
  156. splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
  157. splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
  158. replacementToken := strings.Join(splitToken, "-")
  159. c2 := d2.NewClientT(c)
  160. err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
  161. ListenAddr: d2.SwarmListenAddr(),
  162. JoinToken: replacementToken,
  163. RemoteAddrs: []string{d1.SwarmListenAddr()},
  164. })
  165. assert.ErrorContains(c, err, "remote CA does not match fingerprint")
  166. }
  167. func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
  168. d1 := s.AddDaemon(c, false, false)
  169. d1.SwarmInit(c, swarm.InitRequest{})
  170. d2 := s.AddDaemon(c, true, false)
  171. info := d2.SwarmInfo(c)
  172. assert.Equal(c, info.ControlAvailable, false)
  173. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  174. d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
  175. n.Spec.Role = swarm.NodeRoleManager
  176. })
  177. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
  178. d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
  179. n.Spec.Role = swarm.NodeRoleWorker
  180. })
  181. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False)
  182. // Wait for the role to change to worker in the cert. This is partially
  183. // done because it's something worth testing in its own right, and
  184. // partially because changing the role from manager to worker and then
  185. // back to manager quickly might cause the node to pause for awhile
  186. // while waiting for the role to change to worker, and the test can
  187. // time out during this interval.
  188. waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
  189. certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
  190. if err != nil {
  191. return "", check.Commentf("error: %v", err)
  192. }
  193. certs, err := helpers.ParseCertificatesPEM(certBytes)
  194. if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
  195. return certs[0].Subject.OrganizationalUnit[0], nil
  196. }
  197. return "", check.Commentf("could not get organizational unit from certificate")
  198. }, checker.Equals, "swarm-worker")
  199. // Demoting last node should fail
  200. node := d1.GetNode(c, d1.NodeID())
  201. node.Spec.Role = swarm.NodeRoleWorker
  202. url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
  203. res, body, err := request.Post(url, request.Host(d1.Sock()), request.JSONBody(node.Spec))
  204. assert.NilError(c, err)
  205. b, err := request.ReadBody(body)
  206. assert.NilError(c, err)
  207. assert.Equal(c, res.StatusCode, http.StatusBadRequest, "output: %q", string(b))
  208. // The warning specific to demoting the last manager is best-effort and
  209. // won't appear until the Role field of the demoted manager has been
  210. // updated.
  211. // Yes, I know this looks silly, but checker.Matches is broken, since
  212. // it anchors the regexp contrary to the documentation, and this makes
  213. // it impossible to match something that includes a line break.
  214. if !strings.Contains(string(b), "last manager of the swarm") {
  215. assert.Assert(c, strings.Contains(string(b), "this would result in a loss of quorum"))
  216. }
  217. info = d1.SwarmInfo(c)
  218. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  219. assert.Equal(c, info.ControlAvailable, true)
  220. // Promote already demoted node
  221. d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
  222. n.Spec.Role = swarm.NodeRoleManager
  223. })
  224. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
  225. }
  226. func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) {
  227. // add three managers, one of these is leader
  228. d1 := s.AddDaemon(c, true, true)
  229. d2 := s.AddDaemon(c, true, true)
  230. d3 := s.AddDaemon(c, true, true)
  231. // start a service by hitting each of the 3 managers
  232. d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
  233. s.Spec.Name = "test1"
  234. })
  235. d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
  236. s.Spec.Name = "test2"
  237. })
  238. d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
  239. s.Spec.Name = "test3"
  240. })
  241. // 3 services should be started now, because the requests were proxied to leader
  242. // query each node and make sure it returns 3 services
  243. for _, d := range []*daemon.Daemon{d1, d2, d3} {
  244. services := d.ListServices(c)
  245. assert.Equal(c, len(services), 3)
  246. }
  247. }
  248. func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
  249. if runtime.GOARCH == "s390x" {
  250. c.Skip("Disabled on s390x")
  251. }
  252. if runtime.GOARCH == "ppc64le" {
  253. c.Skip("Disabled on ppc64le")
  254. }
  255. // Create 3 nodes
  256. d1 := s.AddDaemon(c, true, true)
  257. d2 := s.AddDaemon(c, true, true)
  258. d3 := s.AddDaemon(c, true, true)
  259. // assert that the first node we made is the leader, and the other two are followers
  260. assert.Equal(c, d1.GetNode(c, d1.NodeID()).ManagerStatus.Leader, true)
  261. assert.Equal(c, d1.GetNode(c, d2.NodeID()).ManagerStatus.Leader, false)
  262. assert.Equal(c, d1.GetNode(c, d3.NodeID()).ManagerStatus.Leader, false)
  263. d1.Stop(c)
  264. var (
  265. leader *daemon.Daemon // keep track of leader
  266. followers []*daemon.Daemon // keep track of followers
  267. )
  268. checkLeader := func(nodes ...*daemon.Daemon) checkF {
  269. return func(c *check.C) (interface{}, check.CommentInterface) {
  270. // clear these out before each run
  271. leader = nil
  272. followers = nil
  273. for _, d := range nodes {
  274. if d.GetNode(c, d.NodeID()).ManagerStatus.Leader {
  275. leader = d
  276. } else {
  277. followers = append(followers, d)
  278. }
  279. }
  280. if leader == nil {
  281. return false, check.Commentf("no leader elected")
  282. }
  283. return true, check.Commentf("elected %v", leader.ID())
  284. }
  285. }
  286. // wait for an election to occur
  287. c.Logf("Waiting for election to occur...")
  288. waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True)
  289. // assert that we have a new leader
  290. assert.Assert(c, leader != nil)
  291. // Keep track of the current leader, since we want that to be chosen.
  292. stableleader := leader
  293. // add the d1, the initial leader, back
  294. d1.StartNode(c)
  295. // wait for possible election
  296. c.Logf("Waiting for possible election...")
  297. waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True)
  298. // pick out the leader and the followers again
  299. // verify that we still only have 1 leader and 2 followers
  300. assert.Assert(c, leader != nil)
  301. assert.Equal(c, len(followers), 2)
  302. // and that after we added d1 back, the leader hasn't changed
  303. assert.Equal(c, leader.NodeID(), stableleader.NodeID())
  304. }
  305. func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
  306. if runtime.GOARCH == "s390x" {
  307. c.Skip("Disabled on s390x")
  308. }
  309. if runtime.GOARCH == "ppc64le" {
  310. c.Skip("Disabled on ppc64le")
  311. }
  312. d1 := s.AddDaemon(c, true, true)
  313. d2 := s.AddDaemon(c, true, true)
  314. d3 := s.AddDaemon(c, true, true)
  315. d1.CreateService(c, simpleTestService)
  316. d2.Stop(c)
  317. // make sure there is a leader
  318. waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
  319. d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
  320. s.Spec.Name = "top1"
  321. })
  322. d3.Stop(c)
  323. var service swarm.Service
  324. simpleTestService(&service)
  325. service.Spec.Name = "top2"
  326. cli := d1.NewClientT(c)
  327. defer cli.Close()
  328. // d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen
  329. waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
  330. _, err := cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{})
  331. return err.Error(), nil
  332. }, checker.Contains, "Make sure more than half of the managers are online.")
  333. d2.StartNode(c)
  334. // make sure there is a leader
  335. waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
  336. d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
  337. s.Spec.Name = "top3"
  338. })
  339. }
  340. func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) {
  341. d := s.AddDaemon(c, true, true)
  342. instances := 2
  343. d.CreateService(c, simpleTestService, setInstances(instances))
  344. id, err := d.Cmd("run", "-d", "busybox", "top")
  345. assert.NilError(c, err, id)
  346. id = strings.TrimSpace(id)
  347. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1)
  348. assert.ErrorContains(c, d.SwarmLeave(c, false), "")
  349. assert.NilError(c, d.SwarmLeave(c, true))
  350. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
  351. id2, err := d.Cmd("ps", "-q")
  352. assert.NilError(c, err, id2)
  353. assert.Assert(c, strings.HasPrefix(id, strings.TrimSpace(id2)))
  354. }
  355. // #23629
  356. func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) {
  357. testRequires(c, Network)
  358. s.AddDaemon(c, true, true)
  359. d2 := s.AddDaemon(c, false, false)
  360. id, err := d2.Cmd("run", "-d", "busybox", "top")
  361. assert.NilError(c, err, id)
  362. id = strings.TrimSpace(id)
  363. c2 := d2.NewClientT(c)
  364. err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
  365. ListenAddr: d2.SwarmListenAddr(),
  366. RemoteAddrs: []string{"123.123.123.123:1234"},
  367. })
  368. assert.ErrorContains(c, err, "Timeout was reached")
  369. info := d2.SwarmInfo(c)
  370. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStatePending)
  371. assert.NilError(c, d2.SwarmLeave(c, true))
  372. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1)
  373. id2, err := d2.Cmd("ps", "-q")
  374. assert.NilError(c, err, id2)
  375. assert.Assert(c, strings.HasPrefix(id, strings.TrimSpace(id2)))
  376. }
  377. // #23705
  378. func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
  379. testRequires(c, Network)
  380. d := s.AddDaemon(c, false, false)
  381. client := d.NewClientT(c)
  382. err := client.SwarmJoin(context.Background(), swarm.JoinRequest{
  383. ListenAddr: d.SwarmListenAddr(),
  384. RemoteAddrs: []string{"123.123.123.123:1234"},
  385. })
  386. assert.ErrorContains(c, err, "Timeout was reached")
  387. waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
  388. d.RestartNode(c)
  389. info := d.SwarmInfo(c)
  390. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive)
  391. }
  392. func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) {
  393. d1 := s.AddDaemon(c, true, true)
  394. instances := 2
  395. id := d1.CreateService(c, simpleTestService, setInstances(instances))
  396. d1.GetService(c, id)
  397. d1.RestartNode(c)
  398. d1.GetService(c, id)
  399. d2 := s.AddDaemon(c, true, true)
  400. d2.GetService(c, id)
  401. d2.RestartNode(c)
  402. d2.GetService(c, id)
  403. d3 := s.AddDaemon(c, true, true)
  404. d3.GetService(c, id)
  405. d3.RestartNode(c)
  406. d3.GetService(c, id)
  407. err := d3.Kill()
  408. assert.NilError(c, err)
  409. time.Sleep(1 * time.Second) // time to handle signal
  410. d3.StartNode(c)
  411. d3.GetService(c, id)
  412. }
  413. func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) {
  414. d := s.AddDaemon(c, true, true)
  415. instances := 2
  416. id := d.CreateService(c, simpleTestService, setInstances(instances))
  417. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  418. containers := d.ActiveContainers(c)
  419. instances = 4
  420. d.UpdateService(c, d.GetService(c, id), setInstances(instances))
  421. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  422. containers2 := d.ActiveContainers(c)
  423. loop0:
  424. for _, c1 := range containers {
  425. for _, c2 := range containers2 {
  426. if c1 == c2 {
  427. continue loop0
  428. }
  429. }
  430. c.Errorf("container %v not found in new set %#v", c1, containers2)
  431. }
  432. }
  433. func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) {
  434. d := s.AddDaemon(c, false, false)
  435. req := swarm.InitRequest{
  436. ListenAddr: "",
  437. }
  438. res, _, err := request.Post("/swarm/init", request.Host(d.Sock()), request.JSONBody(req))
  439. assert.NilError(c, err)
  440. assert.Equal(c, res.StatusCode, http.StatusBadRequest)
  441. req2 := swarm.JoinRequest{
  442. ListenAddr: "0.0.0.0:2377",
  443. RemoteAddrs: []string{""},
  444. }
  445. res, _, err = request.Post("/swarm/join", request.Host(d.Sock()), request.JSONBody(req2))
  446. assert.NilError(c, err)
  447. assert.Equal(c, res.StatusCode, http.StatusBadRequest)
  448. }
  449. func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
  450. d1 := s.AddDaemon(c, true, true)
  451. d2 := s.AddDaemon(c, true, true)
  452. instances := 2
  453. id := d1.CreateService(c, simpleTestService, setInstances(instances))
  454. waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances)
  455. // drain d2, all containers should move to d1
  456. d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) {
  457. n.Spec.Availability = swarm.NodeAvailabilityDrain
  458. })
  459. waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
  460. waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0)
  461. d2.Stop(c)
  462. d1.SwarmInit(c, swarm.InitRequest{
  463. ForceNewCluster: true,
  464. Spec: swarm.Spec{},
  465. })
  466. waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
  467. d3 := s.AddDaemon(c, true, true)
  468. info := d3.SwarmInfo(c)
  469. assert.Equal(c, info.ControlAvailable, true)
  470. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  471. instances = 4
  472. d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
  473. waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
  474. }
  475. func simpleTestService(s *swarm.Service) {
  476. ureplicas := uint64(1)
  477. restartDelay := time.Duration(100 * time.Millisecond)
  478. s.Spec = swarm.ServiceSpec{
  479. TaskTemplate: swarm.TaskSpec{
  480. ContainerSpec: &swarm.ContainerSpec{
  481. Image: "busybox:latest",
  482. Command: []string{"/bin/top"},
  483. },
  484. RestartPolicy: &swarm.RestartPolicy{
  485. Delay: &restartDelay,
  486. },
  487. },
  488. Mode: swarm.ServiceMode{
  489. Replicated: &swarm.ReplicatedService{
  490. Replicas: &ureplicas,
  491. },
  492. },
  493. }
  494. s.Spec.Name = "top"
  495. }
  496. func serviceForUpdate(s *swarm.Service) {
  497. ureplicas := uint64(1)
  498. restartDelay := time.Duration(100 * time.Millisecond)
  499. s.Spec = swarm.ServiceSpec{
  500. TaskTemplate: swarm.TaskSpec{
  501. ContainerSpec: &swarm.ContainerSpec{
  502. Image: "busybox:latest",
  503. Command: []string{"/bin/top"},
  504. },
  505. RestartPolicy: &swarm.RestartPolicy{
  506. Delay: &restartDelay,
  507. },
  508. },
  509. Mode: swarm.ServiceMode{
  510. Replicated: &swarm.ReplicatedService{
  511. Replicas: &ureplicas,
  512. },
  513. },
  514. UpdateConfig: &swarm.UpdateConfig{
  515. Parallelism: 2,
  516. Delay: 4 * time.Second,
  517. FailureAction: swarm.UpdateFailureActionContinue,
  518. },
  519. RollbackConfig: &swarm.UpdateConfig{
  520. Parallelism: 3,
  521. Delay: 4 * time.Second,
  522. FailureAction: swarm.UpdateFailureActionContinue,
  523. },
  524. }
  525. s.Spec.Name = "updatetest"
  526. }
  527. func setInstances(replicas int) testdaemon.ServiceConstructor {
  528. ureplicas := uint64(replicas)
  529. return func(s *swarm.Service) {
  530. s.Spec.Mode = swarm.ServiceMode{
  531. Replicated: &swarm.ReplicatedService{
  532. Replicas: &ureplicas,
  533. },
  534. }
  535. }
  536. }
  537. func setUpdateOrder(order string) testdaemon.ServiceConstructor {
  538. return func(s *swarm.Service) {
  539. if s.Spec.UpdateConfig == nil {
  540. s.Spec.UpdateConfig = &swarm.UpdateConfig{}
  541. }
  542. s.Spec.UpdateConfig.Order = order
  543. }
  544. }
  545. func setRollbackOrder(order string) testdaemon.ServiceConstructor {
  546. return func(s *swarm.Service) {
  547. if s.Spec.RollbackConfig == nil {
  548. s.Spec.RollbackConfig = &swarm.UpdateConfig{}
  549. }
  550. s.Spec.RollbackConfig.Order = order
  551. }
  552. }
  553. func setImage(image string) testdaemon.ServiceConstructor {
  554. return func(s *swarm.Service) {
  555. if s.Spec.TaskTemplate.ContainerSpec == nil {
  556. s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
  557. }
  558. s.Spec.TaskTemplate.ContainerSpec.Image = image
  559. }
  560. }
  561. func setFailureAction(failureAction string) testdaemon.ServiceConstructor {
  562. return func(s *swarm.Service) {
  563. s.Spec.UpdateConfig.FailureAction = failureAction
  564. }
  565. }
  566. func setMaxFailureRatio(maxFailureRatio float32) testdaemon.ServiceConstructor {
  567. return func(s *swarm.Service) {
  568. s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio
  569. }
  570. }
  571. func setParallelism(parallelism uint64) testdaemon.ServiceConstructor {
  572. return func(s *swarm.Service) {
  573. s.Spec.UpdateConfig.Parallelism = parallelism
  574. }
  575. }
  576. func setConstraints(constraints []string) testdaemon.ServiceConstructor {
  577. return func(s *swarm.Service) {
  578. if s.Spec.TaskTemplate.Placement == nil {
  579. s.Spec.TaskTemplate.Placement = &swarm.Placement{}
  580. }
  581. s.Spec.TaskTemplate.Placement.Constraints = constraints
  582. }
  583. }
  584. func setPlacementPrefs(prefs []swarm.PlacementPreference) testdaemon.ServiceConstructor {
  585. return func(s *swarm.Service) {
  586. if s.Spec.TaskTemplate.Placement == nil {
  587. s.Spec.TaskTemplate.Placement = &swarm.Placement{}
  588. }
  589. s.Spec.TaskTemplate.Placement.Preferences = prefs
  590. }
  591. }
  592. func setGlobalMode(s *swarm.Service) {
  593. s.Spec.Mode = swarm.ServiceMode{
  594. Global: &swarm.GlobalService{},
  595. }
  596. }
  597. func checkClusterHealth(c *check.C, cl []*daemon.Daemon, managerCount, workerCount int) {
  598. var totalMCount, totalWCount int
  599. for _, d := range cl {
  600. var (
  601. info swarm.Info
  602. )
  603. // check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error
  604. checkInfo := func(c *check.C) (interface{}, check.CommentInterface) {
  605. client := d.NewClientT(c)
  606. daemonInfo, err := client.Info(context.Background())
  607. info = daemonInfo.Swarm
  608. return err, check.Commentf("cluster not ready in time")
  609. }
  610. waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil)
  611. if !info.ControlAvailable {
  612. totalWCount++
  613. continue
  614. }
  615. var leaderFound bool
  616. totalMCount++
  617. var mCount, wCount int
  618. for _, n := range d.ListNodes(c) {
  619. waitReady := func(c *check.C) (interface{}, check.CommentInterface) {
  620. if n.Status.State == swarm.NodeStateReady {
  621. return true, nil
  622. }
  623. nn := d.GetNode(c, n.ID)
  624. n = *nn
  625. return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.NodeID())
  626. }
  627. waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True)
  628. waitActive := func(c *check.C) (interface{}, check.CommentInterface) {
  629. if n.Spec.Availability == swarm.NodeAvailabilityActive {
  630. return true, nil
  631. }
  632. nn := d.GetNode(c, n.ID)
  633. n = *nn
  634. return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.NodeID())
  635. }
  636. waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True)
  637. if n.Spec.Role == swarm.NodeRoleManager {
  638. assert.Assert(c, n.ManagerStatus != nil, "manager status of node %s (manager), reported by %s", n.ID, d.NodeID())
  639. if n.ManagerStatus.Leader {
  640. leaderFound = true
  641. }
  642. mCount++
  643. } else {
  644. assert.Assert(c, n.ManagerStatus == nil, "manager status of node %s (worker), reported by %s", n.ID, d.NodeID())
  645. wCount++
  646. }
  647. }
  648. assert.Equal(c, leaderFound, true, "lack of leader reported by node %s", info.NodeID)
  649. assert.Equal(c, mCount, managerCount, "managers count reported by node %s", info.NodeID)
  650. assert.Equal(c, wCount, workerCount, "workers count reported by node %s", info.NodeID)
  651. }
  652. assert.Equal(c, totalMCount, managerCount)
  653. assert.Equal(c, totalWCount, workerCount)
  654. }
  655. func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
  656. mCount, wCount := 5, 1
  657. var nodes []*daemon.Daemon
  658. for i := 0; i < mCount; i++ {
  659. manager := s.AddDaemon(c, true, true)
  660. info := manager.SwarmInfo(c)
  661. assert.Equal(c, info.ControlAvailable, true)
  662. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  663. nodes = append(nodes, manager)
  664. }
  665. for i := 0; i < wCount; i++ {
  666. worker := s.AddDaemon(c, true, false)
  667. info := worker.SwarmInfo(c)
  668. assert.Equal(c, info.ControlAvailable, false)
  669. assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
  670. nodes = append(nodes, worker)
  671. }
  672. // stop whole cluster
  673. {
  674. var wg sync.WaitGroup
  675. wg.Add(len(nodes))
  676. errs := make(chan error, len(nodes))
  677. for _, d := range nodes {
  678. go func(daemon *daemon.Daemon) {
  679. defer wg.Done()
  680. if err := daemon.StopWithError(); err != nil {
  681. errs <- err
  682. }
  683. }(d)
  684. }
  685. wg.Wait()
  686. close(errs)
  687. for err := range errs {
  688. assert.NilError(c, err)
  689. }
  690. }
  691. // start whole cluster
  692. {
  693. var wg sync.WaitGroup
  694. wg.Add(len(nodes))
  695. errs := make(chan error, len(nodes))
  696. for _, d := range nodes {
  697. go func(daemon *daemon.Daemon) {
  698. defer wg.Done()
  699. if err := daemon.StartWithError("--iptables=false"); err != nil {
  700. errs <- err
  701. }
  702. }(d)
  703. }
  704. wg.Wait()
  705. close(errs)
  706. for err := range errs {
  707. assert.NilError(c, err)
  708. }
  709. }
  710. checkClusterHealth(c, nodes, mCount, wCount)
  711. }
  712. func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) {
  713. d := s.AddDaemon(c, true, true)
  714. instances := 2
  715. id := d.CreateService(c, simpleTestService, setInstances(instances))
  716. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  717. service := d.GetService(c, id)
  718. instances = 5
  719. setInstances(instances)(service)
  720. cli := d.NewClientT(c)
  721. defer cli.Close()
  722. _, err := cli.ServiceUpdate(context.Background(), service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{})
  723. assert.NilError(c, err)
  724. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  725. }
  726. // Unlocking an unlocked swarm results in an error
  727. func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *check.C) {
  728. d := s.AddDaemon(c, true, true)
  729. err := d.SwarmUnlock(c, swarm.UnlockRequest{UnlockKey: "wrong-key"})
  730. assert.ErrorContains(c, err, "swarm is not locked")
  731. }
  732. // #29885
  733. func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) {
  734. ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
  735. assert.NilError(c, err)
  736. defer ln.Close()
  737. d := s.AddDaemon(c, false, false)
  738. client := d.NewClientT(c)
  739. _, err = client.SwarmInit(context.Background(), swarm.InitRequest{
  740. ListenAddr: d.SwarmListenAddr(),
  741. })
  742. assert.ErrorContains(c, err, "address already in use")
  743. }
  744. // Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`,
  745. // caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
  746. // This test makes sure the fixes correctly output scopes instead.
  747. func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) {
  748. d := s.AddDaemon(c, true, true)
  749. cli := d.NewClientT(c)
  750. defer cli.Close()
  751. name := "foo"
  752. networkCreate := types.NetworkCreate{
  753. CheckDuplicate: false,
  754. }
  755. networkCreate.Driver = "bridge"
  756. n1, err := cli.NetworkCreate(context.Background(), name, networkCreate)
  757. assert.NilError(c, err)
  758. networkCreate.Driver = "overlay"
  759. n2, err := cli.NetworkCreate(context.Background(), name, networkCreate)
  760. assert.NilError(c, err)
  761. r1, err := cli.NetworkInspect(context.Background(), n1.ID, types.NetworkInspectOptions{})
  762. assert.NilError(c, err)
  763. assert.Equal(c, r1.Scope, "local")
  764. r2, err := cli.NetworkInspect(context.Background(), n2.ID, types.NetworkInspectOptions{})
  765. assert.NilError(c, err)
  766. assert.Equal(c, r2.Scope, "swarm")
  767. }
  768. // Test case for 30178
  769. func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) {
  770. // Issue #36386 can be a independent one, which is worth further investigation.
  771. c.Skip("Root cause of Issue #36386 is needed")
  772. d := s.AddDaemon(c, true, true)
  773. out, err := d.Cmd("network", "create", "-d", "overlay", "lb")
  774. assert.NilError(c, err, out)
  775. instances := 1
  776. d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
  777. if s.Spec.TaskTemplate.ContainerSpec == nil {
  778. s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
  779. }
  780. s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{}
  781. s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{
  782. {Target: "lb"},
  783. }
  784. })
  785. waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
  786. containers := d.ActiveContainers(c)
  787. out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top")
  788. assert.NilError(c, err, out)
  789. }
  790. func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) {
  791. m := s.AddDaemon(c, true, true)
  792. w := s.AddDaemon(c, true, false)
  793. info := m.SwarmInfo(c)
  794. currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
  795. // rotate multiple times
  796. for i := 0; i < 4; i++ {
  797. var err error
  798. var cert, key []byte
  799. if i%2 != 0 {
  800. cert, _, key, err = initca.New(&csr.CertificateRequest{
  801. CN: "newRoot",
  802. KeyRequest: csr.NewBasicKeyRequest(),
  803. CA: &csr.CAConfig{Expiry: ca.RootCAExpiration},
  804. })
  805. assert.NilError(c, err)
  806. }
  807. expectedCert := string(cert)
  808. m.UpdateSwarm(c, func(s *swarm.Spec) {
  809. s.CAConfig.SigningCACert = expectedCert
  810. s.CAConfig.SigningCAKey = string(key)
  811. s.CAConfig.ForceRotate++
  812. })
  813. // poll to make sure update succeeds
  814. var clusterTLSInfo swarm.TLSInfo
  815. for j := 0; j < 18; j++ {
  816. info := m.SwarmInfo(c)
  817. // the desired CA cert and key is always redacted
  818. assert.Equal(c, info.Cluster.Spec.CAConfig.SigningCAKey, "")
  819. assert.Equal(c, info.Cluster.Spec.CAConfig.SigningCACert, "")
  820. clusterTLSInfo = info.Cluster.TLSInfo
  821. // if root rotation is done and the trust root has changed, we don't have to poll anymore
  822. if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot {
  823. break
  824. }
  825. // root rotation not done
  826. time.Sleep(250 * time.Millisecond)
  827. }
  828. if cert != nil {
  829. assert.Equal(c, clusterTLSInfo.TrustRoot, expectedCert)
  830. }
  831. // could take another second or two for the nodes to trust the new roots after they've all gotten
  832. // new TLS certificates
  833. for j := 0; j < 18; j++ {
  834. mInfo := m.GetNode(c, m.NodeID()).Description.TLSInfo
  835. wInfo := m.GetNode(c, w.NodeID()).Description.TLSInfo
  836. if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot {
  837. break
  838. }
  839. // nodes don't trust root certs yet
  840. time.Sleep(250 * time.Millisecond)
  841. }
  842. assert.DeepEqual(c, m.GetNode(c, m.NodeID()).Description.TLSInfo, clusterTLSInfo)
  843. assert.DeepEqual(c, m.GetNode(c, w.NodeID()).Description.TLSInfo, clusterTLSInfo)
  844. currentTrustRoot = clusterTLSInfo.TrustRoot
  845. }
  846. }
  847. func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) {
  848. d := s.AddDaemon(c, true, true)
  849. name := "test-scoped-network"
  850. ctx := context.Background()
  851. apiclient := d.NewClientT(c)
  852. resp, err := apiclient.NetworkCreate(ctx, name, types.NetworkCreate{Driver: "overlay"})
  853. assert.NilError(c, err)
  854. network, err := apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{})
  855. assert.NilError(c, err)
  856. assert.Check(c, is.Equal("swarm", network.Scope))
  857. assert.Check(c, is.Equal(resp.ID, network.ID))
  858. _, err = apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{Scope: "local"})
  859. assert.Check(c, client.IsErrNotFound(err))
  860. }