runtime_test.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. package docker
  2. import (
  3. "bytes"
  4. "fmt"
  5. "github.com/dotcloud/docker/devmapper"
  6. "github.com/dotcloud/docker/utils"
  7. "io"
  8. "io/ioutil"
  9. "log"
  10. "net"
  11. "os"
  12. "path/filepath"
  13. "runtime"
  14. "strconv"
  15. "strings"
  16. "sync"
  17. "syscall"
  18. "testing"
  19. "time"
  20. )
  21. const (
  22. unitTestImageName = "docker-test-image"
  23. unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
  24. unitTestNetworkBridge = "testdockbr0"
  25. unitTestStoreBase = "/var/lib/docker/unit-tests"
  26. unitTestStoreDevicesBase = "/var/lib/docker/unit-tests-devices"
  27. testDaemonAddr = "127.0.0.1:4270"
  28. testDaemonProto = "tcp"
  29. )
  30. var (
  31. globalRuntime *Runtime
  32. startFds int
  33. startGoroutines int
  34. )
  35. func nuke(runtime *Runtime) error {
  36. var wg sync.WaitGroup
  37. for _, container := range runtime.List() {
  38. wg.Add(1)
  39. go func(c *Container) {
  40. c.Kill()
  41. wg.Done()
  42. }(container)
  43. }
  44. wg.Wait()
  45. for _, container := range runtime.List() {
  46. container.EnsureUnmounted()
  47. }
  48. return os.RemoveAll(runtime.root)
  49. }
  50. func cleanup(runtime *Runtime) error {
  51. for _, container := range runtime.List() {
  52. container.Kill()
  53. runtime.Destroy(container)
  54. }
  55. images, err := runtime.graph.Map()
  56. if err != nil {
  57. return err
  58. }
  59. for _, image := range images {
  60. if image.ID != unitTestImageID {
  61. runtime.DeleteImage(image.ID)
  62. }
  63. }
  64. return nil
  65. }
  66. func cleanupLast(runtime *Runtime) error {
  67. cleanup(runtime)
  68. runtime.deviceSet.Shutdown()
  69. return nil
  70. }
  71. func layerArchive(tarfile string) (io.Reader, error) {
  72. // FIXME: need to close f somewhere
  73. f, err := os.Open(tarfile)
  74. if err != nil {
  75. return nil, err
  76. }
  77. return f, nil
  78. }
  79. // Remove any leftover device mapper devices from earlier runs of the unit tests
  80. func removeDev(name string) {
  81. path := filepath.Join("/dev/mapper", name)
  82. fd, err := syscall.Open(path, syscall.O_RDONLY, 07777)
  83. if err != nil {
  84. if err == syscall.ENXIO {
  85. // No device for this node, just remove it
  86. os.Remove(path)
  87. return
  88. }
  89. } else {
  90. syscall.Close(fd)
  91. }
  92. if err := devmapper.RemoveDevice(name); err != nil {
  93. panic(fmt.Errorf("Unable to remove existing device %s: %s", name, err))
  94. }
  95. }
  96. func cleanupDevMapper() {
  97. infos, _ := ioutil.ReadDir("/dev/mapper")
  98. if infos != nil {
  99. hasPool := false
  100. for _, info := range infos {
  101. name := info.Name()
  102. if strings.HasPrefix(name, "docker-unit-tests-devices-") {
  103. if name == "docker-unit-tests-devices-pool" {
  104. hasPool = true
  105. } else {
  106. removeDev(name)
  107. }
  108. }
  109. // We need to remove the pool last as the other devices block it
  110. if hasPool {
  111. removeDev("docker-unit-tests-devices-pool")
  112. }
  113. }
  114. }
  115. }
  116. func init() {
  117. os.Setenv("TEST", "1")
  118. // Hack to run sys init during unit testing
  119. if selfPath := utils.SelfPath(); selfPath == "/sbin/init" || selfPath == "/.dockerinit" {
  120. SysInit()
  121. return
  122. }
  123. if uid := syscall.Geteuid(); uid != 0 {
  124. log.Fatal("docker tests need to be run as root")
  125. }
  126. NetworkBridgeIface = unitTestNetworkBridge
  127. cleanupDevMapper()
  128. // Always start from a clean set of loopback mounts
  129. err := os.RemoveAll(unitTestStoreDevicesBase)
  130. if err != nil {
  131. panic(err)
  132. }
  133. deviceset := devmapper.NewDeviceSetDM(unitTestStoreDevicesBase)
  134. // Create a device, which triggers the initiation of the base FS
  135. // This avoids other tests doing this and timing out
  136. deviceset.AddDevice("init","")
  137. // Make it our Store root
  138. if runtime, err := NewRuntimeFromDirectory(unitTestStoreBase, deviceset, false); err != nil {
  139. panic(err)
  140. } else {
  141. globalRuntime = runtime
  142. }
  143. // Create the "Server"
  144. srv := &Server{
  145. runtime: globalRuntime,
  146. enableCors: false,
  147. pullingPool: make(map[string]struct{}),
  148. pushingPool: make(map[string]struct{}),
  149. }
  150. // If the unit test is not found, try to download it.
  151. if img, err := globalRuntime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
  152. // Retrieve the Image
  153. if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
  154. panic(err)
  155. }
  156. }
  157. // Spawn a Daemon
  158. go func() {
  159. if err := ListenAndServe(testDaemonProto, testDaemonAddr, srv, os.Getenv("DEBUG") != ""); err != nil {
  160. panic(err)
  161. }
  162. }()
  163. // Give some time to ListenAndServer to actually start
  164. time.Sleep(time.Second)
  165. startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine()
  166. }
  167. // FIXME: test that ImagePull(json=true) send correct json output
  168. func GetTestImage(runtime *Runtime) *Image {
  169. imgs, err := runtime.graph.Map()
  170. if err != nil {
  171. panic(err)
  172. }
  173. for _, image := range imgs {
  174. if image.ID == unitTestImageID {
  175. return image
  176. }
  177. }
  178. panic(fmt.Errorf("Test image %v not found", unitTestImageID))
  179. }
  180. func TestRuntimeCreate(t *testing.T) {
  181. runtime := mkRuntime(t)
  182. defer nuke(runtime)
  183. // Make sure we start we 0 containers
  184. if len(runtime.List()) != 0 {
  185. t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
  186. }
  187. container, err := runtime.Create(&Config{
  188. Image: GetTestImage(runtime).ID,
  189. Cmd: []string{"ls", "-al"},
  190. },
  191. )
  192. if err != nil {
  193. t.Fatal(err)
  194. }
  195. defer func() {
  196. if err := runtime.Destroy(container); err != nil {
  197. t.Error(err)
  198. }
  199. }()
  200. // Make sure we can find the newly created container with List()
  201. if len(runtime.List()) != 1 {
  202. t.Errorf("Expected 1 container, %v found", len(runtime.List()))
  203. }
  204. // Make sure the container List() returns is the right one
  205. if runtime.List()[0].ID != container.ID {
  206. t.Errorf("Unexpected container %v returned by List", runtime.List()[0])
  207. }
  208. // Make sure we can get the container with Get()
  209. if runtime.Get(container.ID) == nil {
  210. t.Errorf("Unable to get newly created container")
  211. }
  212. // Make sure it is the right container
  213. if runtime.Get(container.ID) != container {
  214. t.Errorf("Get() returned the wrong container")
  215. }
  216. // Make sure Exists returns it as existing
  217. if !runtime.Exists(container.ID) {
  218. t.Errorf("Exists() returned false for a newly created container")
  219. }
  220. // Make sure crete with bad parameters returns an error
  221. _, err = runtime.Create(
  222. &Config{
  223. Image: GetTestImage(runtime).ID,
  224. },
  225. )
  226. if err == nil {
  227. t.Fatal("Builder.Create should throw an error when Cmd is missing")
  228. }
  229. _, err = runtime.Create(
  230. &Config{
  231. Image: GetTestImage(runtime).ID,
  232. Cmd: []string{},
  233. },
  234. )
  235. if err == nil {
  236. t.Fatal("Builder.Create should throw an error when Cmd is empty")
  237. }
  238. }
  239. func TestDestroy(t *testing.T) {
  240. runtime := mkRuntime(t)
  241. defer nuke(runtime)
  242. container, err := runtime.Create(&Config{
  243. Image: GetTestImage(runtime).ID,
  244. Cmd: []string{"ls", "-al"},
  245. },
  246. )
  247. if err != nil {
  248. t.Fatal(err)
  249. }
  250. // Destroy
  251. if err := runtime.Destroy(container); err != nil {
  252. t.Error(err)
  253. }
  254. // Make sure runtime.Exists() behaves correctly
  255. if runtime.Exists("test_destroy") {
  256. t.Errorf("Exists() returned true")
  257. }
  258. // Make sure runtime.List() doesn't list the destroyed container
  259. if len(runtime.List()) != 0 {
  260. t.Errorf("Expected 0 container, %v found", len(runtime.List()))
  261. }
  262. // Make sure runtime.Get() refuses to return the unexisting container
  263. if runtime.Get(container.ID) != nil {
  264. t.Errorf("Unable to get newly created container")
  265. }
  266. // Make sure the container root directory does not exist anymore
  267. _, err = os.Stat(container.root)
  268. if err == nil || !os.IsNotExist(err) {
  269. t.Errorf("Container root directory still exists after destroy")
  270. }
  271. // Test double destroy
  272. if err := runtime.Destroy(container); err == nil {
  273. // It should have failed
  274. t.Errorf("Double destroy did not fail")
  275. }
  276. }
  277. func TestGet(t *testing.T) {
  278. runtime := mkRuntime(t)
  279. defer nuke(runtime)
  280. container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  281. defer runtime.Destroy(container1)
  282. container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  283. defer runtime.Destroy(container2)
  284. container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  285. defer runtime.Destroy(container3)
  286. if runtime.Get(container1.ID) != container1 {
  287. t.Errorf("Get(test1) returned %v while expecting %v", runtime.Get(container1.ID), container1)
  288. }
  289. if runtime.Get(container2.ID) != container2 {
  290. t.Errorf("Get(test2) returned %v while expecting %v", runtime.Get(container2.ID), container2)
  291. }
  292. if runtime.Get(container3.ID) != container3 {
  293. t.Errorf("Get(test3) returned %v while expecting %v", runtime.Get(container3.ID), container3)
  294. }
  295. }
  296. func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) {
  297. var err error
  298. runtime := mkRuntime(t)
  299. port := 5554
  300. var container *Container
  301. var strPort string
  302. for {
  303. port += 1
  304. strPort = strconv.Itoa(port)
  305. var cmd string
  306. if proto == "tcp" {
  307. cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat"
  308. } else if proto == "udp" {
  309. cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat"
  310. } else {
  311. t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
  312. }
  313. t.Log("Trying port", strPort)
  314. container, err = runtime.Create(&Config{
  315. Image: GetTestImage(runtime).ID,
  316. Cmd: []string{"sh", "-c", cmd},
  317. PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)},
  318. })
  319. if container != nil {
  320. break
  321. }
  322. if err != nil {
  323. nuke(runtime)
  324. t.Fatal(err)
  325. }
  326. t.Logf("Port %v already in use", strPort)
  327. }
  328. hostConfig := &HostConfig{}
  329. if err := container.Start(hostConfig); err != nil {
  330. nuke(runtime)
  331. t.Fatal(err)
  332. }
  333. setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
  334. for !container.State.Running {
  335. time.Sleep(10 * time.Millisecond)
  336. }
  337. })
  338. // Even if the state is running, lets give some time to lxc to spawn the process
  339. container.WaitTimeout(500 * time.Millisecond)
  340. strPort = container.NetworkSettings.PortMapping[strings.Title(proto)][strPort]
  341. return runtime, container, strPort
  342. }
  343. // Run a container with a TCP port allocated, and test that it can receive connections on localhost
  344. func TestAllocateTCPPortLocalhost(t *testing.T) {
  345. runtime, container, port := startEchoServerContainer(t, "tcp")
  346. defer nuke(runtime)
  347. defer container.Kill()
  348. for i := 0; i != 10; i++ {
  349. conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port))
  350. if err != nil {
  351. t.Fatal(err)
  352. }
  353. defer conn.Close()
  354. input := bytes.NewBufferString("well hello there\n")
  355. _, err = conn.Write(input.Bytes())
  356. if err != nil {
  357. t.Fatal(err)
  358. }
  359. buf := make([]byte, 16)
  360. read := 0
  361. conn.SetReadDeadline(time.Now().Add(3 * time.Second))
  362. read, err = conn.Read(buf)
  363. if err != nil {
  364. if err, ok := err.(*net.OpError); ok {
  365. if err.Err == syscall.ECONNRESET {
  366. t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec")
  367. conn.Close()
  368. time.Sleep(time.Second)
  369. continue
  370. }
  371. if err.Timeout() {
  372. t.Log("Timeout, trying again")
  373. conn.Close()
  374. continue
  375. }
  376. }
  377. t.Fatal(err)
  378. }
  379. output := string(buf[:read])
  380. if !strings.Contains(output, "well hello there") {
  381. t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output))
  382. } else {
  383. return
  384. }
  385. }
  386. t.Fatal("No reply from the container")
  387. }
  388. // Run a container with an UDP port allocated, and test that it can receive connections on localhost
  389. func TestAllocateUDPPortLocalhost(t *testing.T) {
  390. runtime, container, port := startEchoServerContainer(t, "udp")
  391. defer nuke(runtime)
  392. defer container.Kill()
  393. conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port))
  394. if err != nil {
  395. t.Fatal(err)
  396. }
  397. defer conn.Close()
  398. input := bytes.NewBufferString("well hello there\n")
  399. buf := make([]byte, 16)
  400. // Try for a minute, for some reason the select in socat may take ages
  401. // to return even though everything on the path seems fine (i.e: the
  402. // UDPProxy forwards the traffic correctly and you can see the packets
  403. // on the interface from within the container).
  404. for i := 0; i != 120; i++ {
  405. _, err := conn.Write(input.Bytes())
  406. if err != nil {
  407. t.Fatal(err)
  408. }
  409. conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond))
  410. read, err := conn.Read(buf)
  411. if err == nil {
  412. output := string(buf[:read])
  413. if strings.Contains(output, "well hello there") {
  414. return
  415. }
  416. }
  417. }
  418. t.Fatal("No reply from the container")
  419. }
  420. func TestRestore(t *testing.T) {
  421. runtime1 := mkRuntime(t)
  422. defer nuke(runtime1)
  423. // Create a container with one instance of docker
  424. container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
  425. defer runtime1.Destroy(container1)
  426. // Create a second container meant to be killed
  427. container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
  428. defer runtime1.Destroy(container2)
  429. // Start the container non blocking
  430. hostConfig := &HostConfig{}
  431. if err := container2.Start(hostConfig); err != nil {
  432. t.Fatal(err)
  433. }
  434. if !container2.State.Running {
  435. t.Fatalf("Container %v should appear as running but isn't", container2.ID)
  436. }
  437. // Simulate a crash/manual quit of dockerd: process dies, states stays 'Running'
  438. cStdin, _ := container2.StdinPipe()
  439. cStdin.Close()
  440. if err := container2.WaitTimeout(2 * time.Second); err != nil {
  441. t.Fatal(err)
  442. }
  443. container2.State.Running = true
  444. container2.ToDisk()
  445. if len(runtime1.List()) != 2 {
  446. t.Errorf("Expected 2 container, %v found", len(runtime1.List()))
  447. }
  448. if err := container1.Run(); err != nil {
  449. t.Fatal(err)
  450. }
  451. if !container2.State.Running {
  452. t.Fatalf("Container %v should appear as running but isn't", container2.ID)
  453. }
  454. // Here are are simulating a docker restart - that is, reloading all containers
  455. // from scratch
  456. runtime2, err := NewRuntimeFromDirectory(runtime1.root, runtime1.deviceSet, false)
  457. if err != nil {
  458. t.Fatal(err)
  459. }
  460. defer nuke(runtime2)
  461. if len(runtime2.List()) != 2 {
  462. t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
  463. }
  464. runningCount := 0
  465. for _, c := range runtime2.List() {
  466. if c.State.Running {
  467. t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
  468. runningCount++
  469. }
  470. }
  471. if runningCount != 0 {
  472. t.Fatalf("Expected 0 container alive, %d found", runningCount)
  473. }
  474. container3 := runtime2.Get(container1.ID)
  475. if container3 == nil {
  476. t.Fatal("Unable to Get container")
  477. }
  478. if err := container3.Run(); err != nil {
  479. t.Fatal(err)
  480. }
  481. container2.State.Running = false
  482. }