runtime_test.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. package docker
  2. import (
  3. "bytes"
  4. "fmt"
  5. "github.com/dotcloud/docker/devmapper"
  6. "github.com/dotcloud/docker/utils"
  7. "io"
  8. "io/ioutil"
  9. "log"
  10. "net"
  11. "os"
  12. "runtime"
  13. "strconv"
  14. "strings"
  15. "sync"
  16. "syscall"
  17. "testing"
  18. "time"
  19. )
  20. const (
  21. unitTestImageName = "docker-test-image"
  22. unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
  23. unitTestNetworkBridge = "testdockbr0"
  24. unitTestStoreBase = "/var/lib/docker/unit-tests"
  25. unitTestStoreDevicesBase = "/var/lib/docker/unit-tests-devices"
  26. testDaemonAddr = "127.0.0.1:4270"
  27. testDaemonProto = "tcp"
  28. )
  29. var (
  30. globalRuntime *Runtime
  31. startFds int
  32. startGoroutines int
  33. )
  34. func nuke(runtime *Runtime) error {
  35. var wg sync.WaitGroup
  36. for _, container := range runtime.List() {
  37. wg.Add(1)
  38. go func(c *Container) {
  39. c.Kill()
  40. wg.Done()
  41. }(container)
  42. }
  43. wg.Wait()
  44. for _, container := range runtime.List() {
  45. container.EnsureUnmounted()
  46. }
  47. return os.RemoveAll(runtime.root)
  48. }
  49. func cleanup(runtime *Runtime) error {
  50. for _, container := range runtime.List() {
  51. container.Kill()
  52. runtime.Destroy(container)
  53. }
  54. images, err := runtime.graph.Map()
  55. if err != nil {
  56. return err
  57. }
  58. for _, image := range images {
  59. if image.ID != unitTestImageID {
  60. runtime.DeleteImage(image.ID)
  61. }
  62. }
  63. return nil
  64. }
  65. func cleanupLast(runtime *Runtime) error {
  66. cleanup(runtime)
  67. runtime.deviceSet.Shutdown()
  68. return nil
  69. }
  70. func layerArchive(tarfile string) (io.Reader, error) {
  71. // FIXME: need to close f somewhere
  72. f, err := os.Open(tarfile)
  73. if err != nil {
  74. return nil, err
  75. }
  76. return f, nil
  77. }
  78. // Remove any leftover device mapper devices from earlier runs of the unit tests
  79. func cleanupDevMapper() {
  80. infos, _ := ioutil.ReadDir("/dev/mapper")
  81. if infos != nil {
  82. hasPool := false
  83. for _, info := range infos {
  84. name := info.Name()
  85. if strings.HasPrefix(name, "docker-unit-tests-devices-") {
  86. if name == "docker-unit-tests-devices-pool" {
  87. hasPool = true
  88. } else {
  89. if err := devmapper.RemoveDevice(name); err != nil {
  90. panic(fmt.Errorf("Unable to remove existing device %s: %s", name, err))
  91. }
  92. }
  93. }
  94. // We need to remove the pool last as the other devices block it
  95. if hasPool {
  96. if err := devmapper.RemoveDevice("docker-unit-tests-devices-pool"); err != nil {
  97. panic(fmt.Errorf("Unable to remove existing device docker-unit-tests-devices-pool: %s", name, err))
  98. }
  99. }
  100. }
  101. }
  102. }
  103. func init() {
  104. os.Setenv("TEST", "1")
  105. // Hack to run sys init during unit testing
  106. if selfPath := utils.SelfPath(); selfPath == "/sbin/init" || selfPath == "/.dockerinit" {
  107. SysInit()
  108. return
  109. }
  110. if uid := syscall.Geteuid(); uid != 0 {
  111. log.Fatal("docker tests need to be run as root")
  112. }
  113. NetworkBridgeIface = unitTestNetworkBridge
  114. cleanupDevMapper()
  115. // Always start from a clean set of loopback mounts
  116. err := os.RemoveAll(unitTestStoreDevicesBase)
  117. if err != nil {
  118. panic(err)
  119. }
  120. // Make it our Store root
  121. if runtime, err := NewRuntimeFromDirectory(unitTestStoreBase, devmapper.NewDeviceSetDM(unitTestStoreDevicesBase), false); err != nil {
  122. panic(err)
  123. } else {
  124. globalRuntime = runtime
  125. }
  126. // Create the "Server"
  127. srv := &Server{
  128. runtime: globalRuntime,
  129. enableCors: false,
  130. pullingPool: make(map[string]struct{}),
  131. pushingPool: make(map[string]struct{}),
  132. }
  133. // If the unit test is not found, try to download it.
  134. if img, err := globalRuntime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
  135. // Retrieve the Image
  136. if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
  137. panic(err)
  138. }
  139. }
  140. // Spawn a Daemon
  141. go func() {
  142. if err := ListenAndServe(testDaemonProto, testDaemonAddr, srv, os.Getenv("DEBUG") != ""); err != nil {
  143. panic(err)
  144. }
  145. }()
  146. // Give some time to ListenAndServer to actually start
  147. time.Sleep(time.Second)
  148. startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine()
  149. }
  150. // FIXME: test that ImagePull(json=true) send correct json output
  151. func GetTestImage(runtime *Runtime) *Image {
  152. imgs, err := runtime.graph.Map()
  153. if err != nil {
  154. panic(err)
  155. }
  156. for _, image := range imgs {
  157. if image.ID == unitTestImageID {
  158. return image
  159. }
  160. }
  161. panic(fmt.Errorf("Test image %v not found", unitTestImageID))
  162. }
  163. func TestRuntimeCreate(t *testing.T) {
  164. runtime := mkRuntime(t)
  165. defer nuke(runtime)
  166. // Make sure we start we 0 containers
  167. if len(runtime.List()) != 0 {
  168. t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
  169. }
  170. container, err := runtime.Create(&Config{
  171. Image: GetTestImage(runtime).ID,
  172. Cmd: []string{"ls", "-al"},
  173. },
  174. )
  175. if err != nil {
  176. t.Fatal(err)
  177. }
  178. defer func() {
  179. if err := runtime.Destroy(container); err != nil {
  180. t.Error(err)
  181. }
  182. }()
  183. // Make sure we can find the newly created container with List()
  184. if len(runtime.List()) != 1 {
  185. t.Errorf("Expected 1 container, %v found", len(runtime.List()))
  186. }
  187. // Make sure the container List() returns is the right one
  188. if runtime.List()[0].ID != container.ID {
  189. t.Errorf("Unexpected container %v returned by List", runtime.List()[0])
  190. }
  191. // Make sure we can get the container with Get()
  192. if runtime.Get(container.ID) == nil {
  193. t.Errorf("Unable to get newly created container")
  194. }
  195. // Make sure it is the right container
  196. if runtime.Get(container.ID) != container {
  197. t.Errorf("Get() returned the wrong container")
  198. }
  199. // Make sure Exists returns it as existing
  200. if !runtime.Exists(container.ID) {
  201. t.Errorf("Exists() returned false for a newly created container")
  202. }
  203. // Make sure crete with bad parameters returns an error
  204. _, err = runtime.Create(
  205. &Config{
  206. Image: GetTestImage(runtime).ID,
  207. },
  208. )
  209. if err == nil {
  210. t.Fatal("Builder.Create should throw an error when Cmd is missing")
  211. }
  212. _, err = runtime.Create(
  213. &Config{
  214. Image: GetTestImage(runtime).ID,
  215. Cmd: []string{},
  216. },
  217. )
  218. if err == nil {
  219. t.Fatal("Builder.Create should throw an error when Cmd is empty")
  220. }
  221. }
  222. func TestDestroy(t *testing.T) {
  223. runtime := mkRuntime(t)
  224. defer nuke(runtime)
  225. container, err := runtime.Create(&Config{
  226. Image: GetTestImage(runtime).ID,
  227. Cmd: []string{"ls", "-al"},
  228. },
  229. )
  230. if err != nil {
  231. t.Fatal(err)
  232. }
  233. // Destroy
  234. if err := runtime.Destroy(container); err != nil {
  235. t.Error(err)
  236. }
  237. // Make sure runtime.Exists() behaves correctly
  238. if runtime.Exists("test_destroy") {
  239. t.Errorf("Exists() returned true")
  240. }
  241. // Make sure runtime.List() doesn't list the destroyed container
  242. if len(runtime.List()) != 0 {
  243. t.Errorf("Expected 0 container, %v found", len(runtime.List()))
  244. }
  245. // Make sure runtime.Get() refuses to return the unexisting container
  246. if runtime.Get(container.ID) != nil {
  247. t.Errorf("Unable to get newly created container")
  248. }
  249. // Make sure the container root directory does not exist anymore
  250. _, err = os.Stat(container.root)
  251. if err == nil || !os.IsNotExist(err) {
  252. t.Errorf("Container root directory still exists after destroy")
  253. }
  254. // Test double destroy
  255. if err := runtime.Destroy(container); err == nil {
  256. // It should have failed
  257. t.Errorf("Double destroy did not fail")
  258. }
  259. }
  260. func TestGet(t *testing.T) {
  261. runtime := mkRuntime(t)
  262. defer nuke(runtime)
  263. container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  264. defer runtime.Destroy(container1)
  265. container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  266. defer runtime.Destroy(container2)
  267. container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  268. defer runtime.Destroy(container3)
  269. if runtime.Get(container1.ID) != container1 {
  270. t.Errorf("Get(test1) returned %v while expecting %v", runtime.Get(container1.ID), container1)
  271. }
  272. if runtime.Get(container2.ID) != container2 {
  273. t.Errorf("Get(test2) returned %v while expecting %v", runtime.Get(container2.ID), container2)
  274. }
  275. if runtime.Get(container3.ID) != container3 {
  276. t.Errorf("Get(test3) returned %v while expecting %v", runtime.Get(container3.ID), container3)
  277. }
  278. }
  279. func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) {
  280. var err error
  281. runtime := mkRuntime(t)
  282. port := 5554
  283. var container *Container
  284. var strPort string
  285. for {
  286. port += 1
  287. strPort = strconv.Itoa(port)
  288. var cmd string
  289. if proto == "tcp" {
  290. cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat"
  291. } else if proto == "udp" {
  292. cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat"
  293. } else {
  294. t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
  295. }
  296. t.Log("Trying port", strPort)
  297. container, err = runtime.Create(&Config{
  298. Image: GetTestImage(runtime).ID,
  299. Cmd: []string{"sh", "-c", cmd},
  300. PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)},
  301. })
  302. if container != nil {
  303. break
  304. }
  305. if err != nil {
  306. nuke(runtime)
  307. t.Fatal(err)
  308. }
  309. t.Logf("Port %v already in use", strPort)
  310. }
  311. hostConfig := &HostConfig{}
  312. if err := container.Start(hostConfig); err != nil {
  313. nuke(runtime)
  314. t.Fatal(err)
  315. }
  316. setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
  317. for !container.State.Running {
  318. time.Sleep(10 * time.Millisecond)
  319. }
  320. })
  321. // Even if the state is running, lets give some time to lxc to spawn the process
  322. container.WaitTimeout(500 * time.Millisecond)
  323. strPort = container.NetworkSettings.PortMapping[strings.Title(proto)][strPort]
  324. return runtime, container, strPort
  325. }
  326. // Run a container with a TCP port allocated, and test that it can receive connections on localhost
  327. func TestAllocateTCPPortLocalhost(t *testing.T) {
  328. runtime, container, port := startEchoServerContainer(t, "tcp")
  329. defer nuke(runtime)
  330. defer container.Kill()
  331. for i := 0; i != 10; i++ {
  332. conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port))
  333. if err != nil {
  334. t.Fatal(err)
  335. }
  336. defer conn.Close()
  337. input := bytes.NewBufferString("well hello there\n")
  338. _, err = conn.Write(input.Bytes())
  339. if err != nil {
  340. t.Fatal(err)
  341. }
  342. buf := make([]byte, 16)
  343. read := 0
  344. conn.SetReadDeadline(time.Now().Add(3 * time.Second))
  345. read, err = conn.Read(buf)
  346. if err != nil {
  347. if err, ok := err.(*net.OpError); ok {
  348. if err.Err == syscall.ECONNRESET {
  349. t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec")
  350. conn.Close()
  351. time.Sleep(time.Second)
  352. continue
  353. }
  354. if err.Timeout() {
  355. t.Log("Timeout, trying again")
  356. conn.Close()
  357. continue
  358. }
  359. }
  360. t.Fatal(err)
  361. }
  362. output := string(buf[:read])
  363. if !strings.Contains(output, "well hello there") {
  364. t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output))
  365. } else {
  366. return
  367. }
  368. }
  369. t.Fatal("No reply from the container")
  370. }
  371. // Run a container with an UDP port allocated, and test that it can receive connections on localhost
  372. func TestAllocateUDPPortLocalhost(t *testing.T) {
  373. runtime, container, port := startEchoServerContainer(t, "udp")
  374. defer nuke(runtime)
  375. defer container.Kill()
  376. conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port))
  377. if err != nil {
  378. t.Fatal(err)
  379. }
  380. defer conn.Close()
  381. input := bytes.NewBufferString("well hello there\n")
  382. buf := make([]byte, 16)
  383. // Try for a minute, for some reason the select in socat may take ages
  384. // to return even though everything on the path seems fine (i.e: the
  385. // UDPProxy forwards the traffic correctly and you can see the packets
  386. // on the interface from within the container).
  387. for i := 0; i != 120; i++ {
  388. _, err := conn.Write(input.Bytes())
  389. if err != nil {
  390. t.Fatal(err)
  391. }
  392. conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond))
  393. read, err := conn.Read(buf)
  394. if err == nil {
  395. output := string(buf[:read])
  396. if strings.Contains(output, "well hello there") {
  397. return
  398. }
  399. }
  400. }
  401. t.Fatal("No reply from the container")
  402. }
  403. func TestRestore(t *testing.T) {
  404. runtime1 := mkRuntime(t)
  405. defer nuke(runtime1)
  406. // Create a container with one instance of docker
  407. container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
  408. defer runtime1.Destroy(container1)
  409. // Create a second container meant to be killed
  410. container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
  411. defer runtime1.Destroy(container2)
  412. // Start the container non blocking
  413. hostConfig := &HostConfig{}
  414. if err := container2.Start(hostConfig); err != nil {
  415. t.Fatal(err)
  416. }
  417. if !container2.State.Running {
  418. t.Fatalf("Container %v should appear as running but isn't", container2.ID)
  419. }
  420. // Simulate a crash/manual quit of dockerd: process dies, states stays 'Running'
  421. cStdin, _ := container2.StdinPipe()
  422. cStdin.Close()
  423. if err := container2.WaitTimeout(2 * time.Second); err != nil {
  424. t.Fatal(err)
  425. }
  426. container2.State.Running = true
  427. container2.ToDisk()
  428. if len(runtime1.List()) != 2 {
  429. t.Errorf("Expected 2 container, %v found", len(runtime1.List()))
  430. }
  431. if err := container1.Run(); err != nil {
  432. t.Fatal(err)
  433. }
  434. if !container2.State.Running {
  435. t.Fatalf("Container %v should appear as running but isn't", container2.ID)
  436. }
  437. // Here are are simulating a docker restart - that is, reloading all containers
  438. // from scratch
  439. runtime2, err := NewRuntimeFromDirectory(runtime1.root, runtime1.deviceSet, false)
  440. if err != nil {
  441. t.Fatal(err)
  442. }
  443. defer nuke(runtime2)
  444. if len(runtime2.List()) != 2 {
  445. t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
  446. }
  447. runningCount := 0
  448. for _, c := range runtime2.List() {
  449. if c.State.Running {
  450. t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
  451. runningCount++
  452. }
  453. }
  454. if runningCount != 0 {
  455. t.Fatalf("Expected 0 container alive, %d found", runningCount)
  456. }
  457. container3 := runtime2.Get(container1.ID)
  458. if container3 == nil {
  459. t.Fatal("Unable to Get container")
  460. }
  461. if err := container3.Run(); err != nil {
  462. t.Fatal(err)
  463. }
  464. container2.State.Running = false
  465. }