runtime_test.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. package docker
  2. import (
  3. "bytes"
  4. "fmt"
  5. "github.com/dotcloud/docker/utils"
  6. "io"
  7. "log"
  8. "net"
  9. "os"
  10. "runtime"
  11. "strconv"
  12. "strings"
  13. "sync"
  14. "syscall"
  15. "testing"
  16. "time"
  17. )
  18. const (
  19. unitTestImageName = "docker-test-image"
  20. unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
  21. unitTestNetworkBridge = "testdockbr0"
  22. unitTestStoreBase = "/var/lib/docker/unit-tests"
  23. testDaemonAddr = "127.0.0.1:4270"
  24. testDaemonProto = "tcp"
  25. )
  26. var (
  27. globalRuntime *Runtime
  28. startFds int
  29. startGoroutines int
  30. )
  31. func nuke(runtime *Runtime) error {
  32. var wg sync.WaitGroup
  33. for _, container := range runtime.List() {
  34. wg.Add(1)
  35. go func(c *Container) {
  36. c.Kill()
  37. wg.Done()
  38. }(container)
  39. }
  40. wg.Wait()
  41. runtime.networkManager.Close()
  42. return os.RemoveAll(runtime.root)
  43. }
  44. func cleanup(runtime *Runtime) error {
  45. for _, container := range runtime.List() {
  46. container.Kill()
  47. runtime.Destroy(container)
  48. }
  49. images, err := runtime.graph.Map()
  50. if err != nil {
  51. return err
  52. }
  53. for _, image := range images {
  54. if image.ID != unitTestImageID {
  55. runtime.graph.Delete(image.ID)
  56. }
  57. }
  58. return nil
  59. }
  60. func layerArchive(tarfile string) (io.Reader, error) {
  61. // FIXME: need to close f somewhere
  62. f, err := os.Open(tarfile)
  63. if err != nil {
  64. return nil, err
  65. }
  66. return f, nil
  67. }
  68. func init() {
  69. os.Setenv("TEST", "1")
  70. // Hack to run sys init during unit testing
  71. if selfPath := utils.SelfPath(); selfPath == "/sbin/init" || selfPath == "/.dockerinit" {
  72. SysInit()
  73. return
  74. }
  75. if uid := syscall.Geteuid(); uid != 0 {
  76. log.Fatal("docker tests need to be run as root")
  77. }
  78. NetworkBridgeIface = unitTestNetworkBridge
  79. // Setup the base runtime, which will be duplicated for each test.
  80. // (no tests are run directly in the base)
  81. setupBaseImage()
  82. // Create the "global runtime" with a long-running daemon for integration tests
  83. spawnGlobalDaemon()
  84. startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine()
  85. }
  86. func setupBaseImage() {
  87. runtime, err := NewRuntimeFromDirectory(unitTestStoreBase, false)
  88. if err != nil {
  89. log.Fatalf("Unable to create a runtime for tests:", err)
  90. }
  91. // Create the "Server"
  92. srv := &Server{
  93. runtime: runtime,
  94. enableCors: false,
  95. pullingPool: make(map[string]struct{}),
  96. pushingPool: make(map[string]struct{}),
  97. }
  98. // If the unit test is not found, try to download it.
  99. if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
  100. // Retrieve the Image
  101. if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
  102. log.Fatalf("Unable to pull the test image:", err)
  103. }
  104. }
  105. }
  106. func spawnGlobalDaemon() {
  107. if globalRuntime != nil {
  108. utils.Debugf("Global runtime already exists. Skipping.")
  109. return
  110. }
  111. globalRuntime = mkRuntime(log.New(os.Stderr, "", 0))
  112. srv := &Server{
  113. runtime: globalRuntime,
  114. enableCors: false,
  115. pullingPool: make(map[string]struct{}),
  116. pushingPool: make(map[string]struct{}),
  117. }
  118. // Spawn a Daemon
  119. go func() {
  120. utils.Debugf("Spawning global daemon for integration tests")
  121. if err := ListenAndServe(testDaemonProto, testDaemonAddr, srv, os.Getenv("DEBUG") != ""); err != nil {
  122. log.Fatalf("Unable to spawn the test daemon:", err)
  123. }
  124. }()
  125. // Give some time to ListenAndServer to actually start
  126. // FIXME: use inmem transports instead of tcp
  127. time.Sleep(time.Second)
  128. }
  129. // FIXME: test that ImagePull(json=true) send correct json output
  130. func GetTestImage(runtime *Runtime) *Image {
  131. imgs, err := runtime.graph.Map()
  132. if err != nil {
  133. log.Fatalf("Unable to get the test image:", err)
  134. }
  135. for _, image := range imgs {
  136. if image.ID == unitTestImageID {
  137. return image
  138. }
  139. }
  140. log.Fatalf("Test image %v not found", unitTestImageID)
  141. return nil
  142. }
  143. func TestRuntimeCreate(t *testing.T) {
  144. runtime := mkRuntime(t)
  145. defer nuke(runtime)
  146. // Make sure we start we 0 containers
  147. if len(runtime.List()) != 0 {
  148. t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
  149. }
  150. container, err := runtime.Create(&Config{
  151. Image: GetTestImage(runtime).ID,
  152. Cmd: []string{"ls", "-al"},
  153. },
  154. )
  155. if err != nil {
  156. t.Fatal(err)
  157. }
  158. defer func() {
  159. if err := runtime.Destroy(container); err != nil {
  160. t.Error(err)
  161. }
  162. }()
  163. // Make sure we can find the newly created container with List()
  164. if len(runtime.List()) != 1 {
  165. t.Errorf("Expected 1 container, %v found", len(runtime.List()))
  166. }
  167. // Make sure the container List() returns is the right one
  168. if runtime.List()[0].ID != container.ID {
  169. t.Errorf("Unexpected container %v returned by List", runtime.List()[0])
  170. }
  171. // Make sure we can get the container with Get()
  172. if runtime.Get(container.ID) == nil {
  173. t.Errorf("Unable to get newly created container")
  174. }
  175. // Make sure it is the right container
  176. if runtime.Get(container.ID) != container {
  177. t.Errorf("Get() returned the wrong container")
  178. }
  179. // Make sure Exists returns it as existing
  180. if !runtime.Exists(container.ID) {
  181. t.Errorf("Exists() returned false for a newly created container")
  182. }
  183. // Make sure crete with bad parameters returns an error
  184. _, err = runtime.Create(
  185. &Config{
  186. Image: GetTestImage(runtime).ID,
  187. },
  188. )
  189. if err == nil {
  190. t.Fatal("Builder.Create should throw an error when Cmd is missing")
  191. }
  192. _, err = runtime.Create(
  193. &Config{
  194. Image: GetTestImage(runtime).ID,
  195. Cmd: []string{},
  196. },
  197. )
  198. if err == nil {
  199. t.Fatal("Builder.Create should throw an error when Cmd is empty")
  200. }
  201. config := &Config{
  202. Image: GetTestImage(runtime).ID,
  203. Cmd: []string{"/bin/ls"},
  204. PortSpecs: []string{"80"},
  205. }
  206. container, err = runtime.Create(config)
  207. image, err := runtime.Commit(container, "testrepo", "testtag", "", "", config)
  208. if err != nil {
  209. t.Error(err)
  210. }
  211. _, err = runtime.Create(
  212. &Config{
  213. Image: image.ID,
  214. PortSpecs: []string{"80000:80"},
  215. },
  216. )
  217. if err == nil {
  218. t.Fatal("Builder.Create should throw an error when PortSpecs is invalid")
  219. }
  220. }
  221. func TestDestroy(t *testing.T) {
  222. runtime := mkRuntime(t)
  223. defer nuke(runtime)
  224. container, err := runtime.Create(&Config{
  225. Image: GetTestImage(runtime).ID,
  226. Cmd: []string{"ls", "-al"},
  227. },
  228. )
  229. if err != nil {
  230. t.Fatal(err)
  231. }
  232. // Destroy
  233. if err := runtime.Destroy(container); err != nil {
  234. t.Error(err)
  235. }
  236. // Make sure runtime.Exists() behaves correctly
  237. if runtime.Exists("test_destroy") {
  238. t.Errorf("Exists() returned true")
  239. }
  240. // Make sure runtime.List() doesn't list the destroyed container
  241. if len(runtime.List()) != 0 {
  242. t.Errorf("Expected 0 container, %v found", len(runtime.List()))
  243. }
  244. // Make sure runtime.Get() refuses to return the unexisting container
  245. if runtime.Get(container.ID) != nil {
  246. t.Errorf("Unable to get newly created container")
  247. }
  248. // Make sure the container root directory does not exist anymore
  249. _, err = os.Stat(container.root)
  250. if err == nil || !os.IsNotExist(err) {
  251. t.Errorf("Container root directory still exists after destroy")
  252. }
  253. // Test double destroy
  254. if err := runtime.Destroy(container); err == nil {
  255. // It should have failed
  256. t.Errorf("Double destroy did not fail")
  257. }
  258. }
  259. func TestGet(t *testing.T) {
  260. runtime := mkRuntime(t)
  261. defer nuke(runtime)
  262. container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  263. defer runtime.Destroy(container1)
  264. container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  265. defer runtime.Destroy(container2)
  266. container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  267. defer runtime.Destroy(container3)
  268. if runtime.Get(container1.ID) != container1 {
  269. t.Errorf("Get(test1) returned %v while expecting %v", runtime.Get(container1.ID), container1)
  270. }
  271. if runtime.Get(container2.ID) != container2 {
  272. t.Errorf("Get(test2) returned %v while expecting %v", runtime.Get(container2.ID), container2)
  273. }
  274. if runtime.Get(container3.ID) != container3 {
  275. t.Errorf("Get(test3) returned %v while expecting %v", runtime.Get(container3.ID), container3)
  276. }
  277. }
  278. func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) {
  279. var err error
  280. runtime := mkRuntime(t)
  281. port := 5554
  282. var container *Container
  283. var strPort string
  284. for {
  285. port += 1
  286. strPort = strconv.Itoa(port)
  287. var cmd string
  288. if proto == "tcp" {
  289. cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat"
  290. } else if proto == "udp" {
  291. cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat"
  292. } else {
  293. t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
  294. }
  295. t.Log("Trying port", strPort)
  296. container, err = runtime.Create(&Config{
  297. Image: GetTestImage(runtime).ID,
  298. Cmd: []string{"sh", "-c", cmd},
  299. PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)},
  300. })
  301. if container != nil {
  302. break
  303. }
  304. if err != nil {
  305. nuke(runtime)
  306. t.Fatal(err)
  307. }
  308. t.Logf("Port %v already in use", strPort)
  309. }
  310. hostConfig := &HostConfig{}
  311. if err := container.Start(hostConfig); err != nil {
  312. nuke(runtime)
  313. t.Fatal(err)
  314. }
  315. setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
  316. for !container.State.Running {
  317. time.Sleep(10 * time.Millisecond)
  318. }
  319. })
  320. // Even if the state is running, lets give some time to lxc to spawn the process
  321. container.WaitTimeout(500 * time.Millisecond)
  322. strPort = container.NetworkSettings.PortMapping[strings.Title(proto)][strPort]
  323. return runtime, container, strPort
  324. }
  325. // Run a container with a TCP port allocated, and test that it can receive connections on localhost
  326. func TestAllocateTCPPortLocalhost(t *testing.T) {
  327. runtime, container, port := startEchoServerContainer(t, "tcp")
  328. defer nuke(runtime)
  329. defer container.Kill()
  330. for i := 0; i != 10; i++ {
  331. conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port))
  332. if err != nil {
  333. t.Fatal(err)
  334. }
  335. defer conn.Close()
  336. input := bytes.NewBufferString("well hello there\n")
  337. _, err = conn.Write(input.Bytes())
  338. if err != nil {
  339. t.Fatal(err)
  340. }
  341. buf := make([]byte, 16)
  342. read := 0
  343. conn.SetReadDeadline(time.Now().Add(3 * time.Second))
  344. read, err = conn.Read(buf)
  345. if err != nil {
  346. if err, ok := err.(*net.OpError); ok {
  347. if err.Err == syscall.ECONNRESET {
  348. t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec")
  349. conn.Close()
  350. time.Sleep(time.Second)
  351. continue
  352. }
  353. if err.Timeout() {
  354. t.Log("Timeout, trying again")
  355. conn.Close()
  356. continue
  357. }
  358. }
  359. t.Fatal(err)
  360. }
  361. output := string(buf[:read])
  362. if !strings.Contains(output, "well hello there") {
  363. t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output))
  364. } else {
  365. return
  366. }
  367. }
  368. t.Fatal("No reply from the container")
  369. }
  370. // Run a container with an UDP port allocated, and test that it can receive connections on localhost
  371. func TestAllocateUDPPortLocalhost(t *testing.T) {
  372. runtime, container, port := startEchoServerContainer(t, "udp")
  373. defer nuke(runtime)
  374. defer container.Kill()
  375. conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port))
  376. if err != nil {
  377. t.Fatal(err)
  378. }
  379. defer conn.Close()
  380. input := bytes.NewBufferString("well hello there\n")
  381. buf := make([]byte, 16)
  382. // Try for a minute, for some reason the select in socat may take ages
  383. // to return even though everything on the path seems fine (i.e: the
  384. // UDPProxy forwards the traffic correctly and you can see the packets
  385. // on the interface from within the container).
  386. for i := 0; i != 120; i++ {
  387. _, err := conn.Write(input.Bytes())
  388. if err != nil {
  389. t.Fatal(err)
  390. }
  391. conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond))
  392. read, err := conn.Read(buf)
  393. if err == nil {
  394. output := string(buf[:read])
  395. if strings.Contains(output, "well hello there") {
  396. return
  397. }
  398. }
  399. }
  400. t.Fatal("No reply from the container")
  401. }
  402. func TestRestore(t *testing.T) {
  403. runtime1 := mkRuntime(t)
  404. defer nuke(runtime1)
  405. // Create a container with one instance of docker
  406. container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
  407. defer runtime1.Destroy(container1)
  408. // Create a second container meant to be killed
  409. container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
  410. defer runtime1.Destroy(container2)
  411. // Start the container non blocking
  412. hostConfig := &HostConfig{}
  413. if err := container2.Start(hostConfig); err != nil {
  414. t.Fatal(err)
  415. }
  416. if !container2.State.Running {
  417. t.Fatalf("Container %v should appear as running but isn't", container2.ID)
  418. }
  419. // Simulate a crash/manual quit of dockerd: process dies, states stays 'Running'
  420. cStdin, _ := container2.StdinPipe()
  421. cStdin.Close()
  422. if err := container2.WaitTimeout(2 * time.Second); err != nil {
  423. t.Fatal(err)
  424. }
  425. container2.State.Running = true
  426. container2.ToDisk()
  427. if len(runtime1.List()) != 2 {
  428. t.Errorf("Expected 2 container, %v found", len(runtime1.List()))
  429. }
  430. if err := container1.Run(); err != nil {
  431. t.Fatal(err)
  432. }
  433. if !container2.State.Running {
  434. t.Fatalf("Container %v should appear as running but isn't", container2.ID)
  435. }
  436. // Here are are simulating a docker restart - that is, reloading all containers
  437. // from scratch
  438. runtime2, err := NewRuntimeFromDirectory(runtime1.root, false)
  439. if err != nil {
  440. t.Fatal(err)
  441. }
  442. defer nuke(runtime2)
  443. if len(runtime2.List()) != 2 {
  444. t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
  445. }
  446. runningCount := 0
  447. for _, c := range runtime2.List() {
  448. if c.State.Running {
  449. t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
  450. runningCount++
  451. }
  452. }
  453. if runningCount != 0 {
  454. t.Fatalf("Expected 0 container alive, %d found", runningCount)
  455. }
  456. container3 := runtime2.Get(container1.ID)
  457. if container3 == nil {
  458. t.Fatal("Unable to Get container")
  459. }
  460. if err := container3.Run(); err != nil {
  461. t.Fatal(err)
  462. }
  463. container2.State.Running = false
  464. }