runtime_test.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. package docker
  2. import (
  3. "bytes"
  4. "fmt"
  5. "github.com/dotcloud/docker/devmapper"
  6. "github.com/dotcloud/docker/utils"
  7. "io"
  8. "io/ioutil"
  9. "log"
  10. "net"
  11. "os"
  12. "path/filepath"
  13. "runtime"
  14. "strconv"
  15. "strings"
  16. "sync"
  17. "syscall"
  18. "testing"
  19. "time"
  20. )
  21. const (
  22. unitTestImageName = "docker-test-image"
  23. unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
  24. unitTestNetworkBridge = "testdockbr0"
  25. unitTestStoreBase = "/var/lib/docker/unit-tests"
  26. unitTestStoreDevicesBase = "/var/lib/docker/unit-tests-devices"
  27. testDaemonAddr = "127.0.0.1:4270"
  28. testDaemonProto = "tcp"
  29. )
  30. var (
  31. globalRuntime *Runtime
  32. startFds int
  33. startGoroutines int
  34. )
  35. func nuke(runtime *Runtime) error {
  36. var wg sync.WaitGroup
  37. for _, container := range runtime.List() {
  38. wg.Add(1)
  39. go func(c *Container) {
  40. c.Kill()
  41. wg.Done()
  42. }(container)
  43. }
  44. wg.Wait()
  45. for _, container := range runtime.List() {
  46. container.EnsureUnmounted()
  47. }
  48. return os.RemoveAll(runtime.root)
  49. }
  50. func cleanup(runtime *Runtime) error {
  51. for _, container := range runtime.List() {
  52. container.Kill()
  53. runtime.Destroy(container)
  54. }
  55. images, err := runtime.graph.Map()
  56. if err != nil {
  57. return err
  58. }
  59. for _, image := range images {
  60. if image.ID != unitTestImageID {
  61. runtime.DeleteImage(image.ID)
  62. }
  63. }
  64. return nil
  65. }
  66. func cleanupLast(runtime *Runtime) error {
  67. cleanup(runtime)
  68. runtime.deviceSet.Shutdown()
  69. return nil
  70. }
  71. func layerArchive(tarfile string) (io.Reader, error) {
  72. // FIXME: need to close f somewhere
  73. f, err := os.Open(tarfile)
  74. if err != nil {
  75. return nil, err
  76. }
  77. return f, nil
  78. }
  79. // Remove any leftover device mapper devices from earlier runs of the unit tests
  80. func removeDev(name string) {
  81. path := filepath.Join("/dev/mapper", name)
  82. fd, err := syscall.Open(path, syscall.O_RDONLY, 07777)
  83. if err != nil {
  84. if err == syscall.ENXIO {
  85. // No device for this node, just remove it
  86. os.Remove(path)
  87. return
  88. }
  89. } else {
  90. syscall.Close(fd)
  91. }
  92. if err := devmapper.RemoveDevice(name); err != nil {
  93. panic(fmt.Errorf("Unable to remove existing device %s: %s", name, err))
  94. }
  95. }
  96. func cleanupDevMapper() {
  97. infos, _ := ioutil.ReadDir("/dev/mapper")
  98. if infos != nil {
  99. hasPool := false
  100. for _, info := range infos {
  101. name := info.Name()
  102. if strings.HasPrefix(name, "docker-unit-tests-devices-") {
  103. if name == "docker-unit-tests-devices-pool" {
  104. hasPool = true
  105. } else {
  106. removeDev(name)
  107. }
  108. }
  109. // We need to remove the pool last as the other devices block it
  110. if hasPool {
  111. removeDev("docker-unit-tests-devices-pool")
  112. }
  113. }
  114. }
  115. }
  116. func init() {
  117. os.Setenv("TEST", "1")
  118. // Hack to run sys init during unit testing
  119. if selfPath := utils.SelfPath(); selfPath == "/sbin/init" || selfPath == "/.dockerinit" {
  120. SysInit()
  121. return
  122. }
  123. if uid := syscall.Geteuid(); uid != 0 {
  124. log.Fatal("docker tests need to be run as root")
  125. }
  126. NetworkBridgeIface = unitTestNetworkBridge
  127. cleanupDevMapper()
  128. // Always start from a clean set of loopback mounts
  129. err := os.RemoveAll(unitTestStoreDevicesBase)
  130. if err != nil {
  131. panic(err)
  132. }
  133. // Make it our Store root
  134. if runtime, err := NewRuntimeFromDirectory(unitTestStoreBase, devmapper.NewDeviceSetDM(unitTestStoreDevicesBase), false); err != nil {
  135. panic(err)
  136. } else {
  137. globalRuntime = runtime
  138. }
  139. // Create the "Server"
  140. srv := &Server{
  141. runtime: globalRuntime,
  142. enableCors: false,
  143. pullingPool: make(map[string]struct{}),
  144. pushingPool: make(map[string]struct{}),
  145. }
  146. // If the unit test is not found, try to download it.
  147. if img, err := globalRuntime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
  148. // Retrieve the Image
  149. if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil, nil, true); err != nil {
  150. panic(err)
  151. }
  152. }
  153. // Spawn a Daemon
  154. go func() {
  155. if err := ListenAndServe(testDaemonProto, testDaemonAddr, srv, os.Getenv("DEBUG") != ""); err != nil {
  156. panic(err)
  157. }
  158. }()
  159. // Give some time to ListenAndServer to actually start
  160. time.Sleep(time.Second)
  161. startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine()
  162. }
  163. // FIXME: test that ImagePull(json=true) send correct json output
  164. func GetTestImage(runtime *Runtime) *Image {
  165. imgs, err := runtime.graph.Map()
  166. if err != nil {
  167. panic(err)
  168. }
  169. for _, image := range imgs {
  170. if image.ID == unitTestImageID {
  171. return image
  172. }
  173. }
  174. panic(fmt.Errorf("Test image %v not found", unitTestImageID))
  175. }
  176. func TestRuntimeCreate(t *testing.T) {
  177. runtime := mkRuntime(t)
  178. defer nuke(runtime)
  179. // Make sure we start we 0 containers
  180. if len(runtime.List()) != 0 {
  181. t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
  182. }
  183. container, err := runtime.Create(&Config{
  184. Image: GetTestImage(runtime).ID,
  185. Cmd: []string{"ls", "-al"},
  186. },
  187. )
  188. if err != nil {
  189. t.Fatal(err)
  190. }
  191. defer func() {
  192. if err := runtime.Destroy(container); err != nil {
  193. t.Error(err)
  194. }
  195. }()
  196. // Make sure we can find the newly created container with List()
  197. if len(runtime.List()) != 1 {
  198. t.Errorf("Expected 1 container, %v found", len(runtime.List()))
  199. }
  200. // Make sure the container List() returns is the right one
  201. if runtime.List()[0].ID != container.ID {
  202. t.Errorf("Unexpected container %v returned by List", runtime.List()[0])
  203. }
  204. // Make sure we can get the container with Get()
  205. if runtime.Get(container.ID) == nil {
  206. t.Errorf("Unable to get newly created container")
  207. }
  208. // Make sure it is the right container
  209. if runtime.Get(container.ID) != container {
  210. t.Errorf("Get() returned the wrong container")
  211. }
  212. // Make sure Exists returns it as existing
  213. if !runtime.Exists(container.ID) {
  214. t.Errorf("Exists() returned false for a newly created container")
  215. }
  216. // Make sure crete with bad parameters returns an error
  217. _, err = runtime.Create(
  218. &Config{
  219. Image: GetTestImage(runtime).ID,
  220. },
  221. )
  222. if err == nil {
  223. t.Fatal("Builder.Create should throw an error when Cmd is missing")
  224. }
  225. _, err = runtime.Create(
  226. &Config{
  227. Image: GetTestImage(runtime).ID,
  228. Cmd: []string{},
  229. },
  230. )
  231. if err == nil {
  232. t.Fatal("Builder.Create should throw an error when Cmd is empty")
  233. }
  234. }
  235. func TestDestroy(t *testing.T) {
  236. runtime := mkRuntime(t)
  237. defer nuke(runtime)
  238. container, err := runtime.Create(&Config{
  239. Image: GetTestImage(runtime).ID,
  240. Cmd: []string{"ls", "-al"},
  241. },
  242. )
  243. if err != nil {
  244. t.Fatal(err)
  245. }
  246. // Destroy
  247. if err := runtime.Destroy(container); err != nil {
  248. t.Error(err)
  249. }
  250. // Make sure runtime.Exists() behaves correctly
  251. if runtime.Exists("test_destroy") {
  252. t.Errorf("Exists() returned true")
  253. }
  254. // Make sure runtime.List() doesn't list the destroyed container
  255. if len(runtime.List()) != 0 {
  256. t.Errorf("Expected 0 container, %v found", len(runtime.List()))
  257. }
  258. // Make sure runtime.Get() refuses to return the unexisting container
  259. if runtime.Get(container.ID) != nil {
  260. t.Errorf("Unable to get newly created container")
  261. }
  262. // Make sure the container root directory does not exist anymore
  263. _, err = os.Stat(container.root)
  264. if err == nil || !os.IsNotExist(err) {
  265. t.Errorf("Container root directory still exists after destroy")
  266. }
  267. // Test double destroy
  268. if err := runtime.Destroy(container); err == nil {
  269. // It should have failed
  270. t.Errorf("Double destroy did not fail")
  271. }
  272. }
  273. func TestGet(t *testing.T) {
  274. runtime := mkRuntime(t)
  275. defer nuke(runtime)
  276. container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  277. defer runtime.Destroy(container1)
  278. container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  279. defer runtime.Destroy(container2)
  280. container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  281. defer runtime.Destroy(container3)
  282. if runtime.Get(container1.ID) != container1 {
  283. t.Errorf("Get(test1) returned %v while expecting %v", runtime.Get(container1.ID), container1)
  284. }
  285. if runtime.Get(container2.ID) != container2 {
  286. t.Errorf("Get(test2) returned %v while expecting %v", runtime.Get(container2.ID), container2)
  287. }
  288. if runtime.Get(container3.ID) != container3 {
  289. t.Errorf("Get(test3) returned %v while expecting %v", runtime.Get(container3.ID), container3)
  290. }
  291. }
  292. func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) {
  293. var err error
  294. runtime := mkRuntime(t)
  295. port := 5554
  296. var container *Container
  297. var strPort string
  298. for {
  299. port += 1
  300. strPort = strconv.Itoa(port)
  301. var cmd string
  302. if proto == "tcp" {
  303. cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat"
  304. } else if proto == "udp" {
  305. cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat"
  306. } else {
  307. t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
  308. }
  309. t.Log("Trying port", strPort)
  310. container, err = runtime.Create(&Config{
  311. Image: GetTestImage(runtime).ID,
  312. Cmd: []string{"sh", "-c", cmd},
  313. PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)},
  314. })
  315. if container != nil {
  316. break
  317. }
  318. if err != nil {
  319. nuke(runtime)
  320. t.Fatal(err)
  321. }
  322. t.Logf("Port %v already in use", strPort)
  323. }
  324. hostConfig := &HostConfig{}
  325. if err := container.Start(hostConfig); err != nil {
  326. nuke(runtime)
  327. t.Fatal(err)
  328. }
  329. setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
  330. for !container.State.Running {
  331. time.Sleep(10 * time.Millisecond)
  332. }
  333. })
  334. // Even if the state is running, lets give some time to lxc to spawn the process
  335. container.WaitTimeout(500 * time.Millisecond)
  336. strPort = container.NetworkSettings.PortMapping[strings.Title(proto)][strPort]
  337. return runtime, container, strPort
  338. }
  339. // Run a container with a TCP port allocated, and test that it can receive connections on localhost
  340. func TestAllocateTCPPortLocalhost(t *testing.T) {
  341. runtime, container, port := startEchoServerContainer(t, "tcp")
  342. defer nuke(runtime)
  343. defer container.Kill()
  344. for i := 0; i != 10; i++ {
  345. conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port))
  346. if err != nil {
  347. t.Fatal(err)
  348. }
  349. defer conn.Close()
  350. input := bytes.NewBufferString("well hello there\n")
  351. _, err = conn.Write(input.Bytes())
  352. if err != nil {
  353. t.Fatal(err)
  354. }
  355. buf := make([]byte, 16)
  356. read := 0
  357. conn.SetReadDeadline(time.Now().Add(3 * time.Second))
  358. read, err = conn.Read(buf)
  359. if err != nil {
  360. if err, ok := err.(*net.OpError); ok {
  361. if err.Err == syscall.ECONNRESET {
  362. t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec")
  363. conn.Close()
  364. time.Sleep(time.Second)
  365. continue
  366. }
  367. if err.Timeout() {
  368. t.Log("Timeout, trying again")
  369. conn.Close()
  370. continue
  371. }
  372. }
  373. t.Fatal(err)
  374. }
  375. output := string(buf[:read])
  376. if !strings.Contains(output, "well hello there") {
  377. t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output))
  378. } else {
  379. return
  380. }
  381. }
  382. t.Fatal("No reply from the container")
  383. }
  384. // Run a container with an UDP port allocated, and test that it can receive connections on localhost
  385. func TestAllocateUDPPortLocalhost(t *testing.T) {
  386. runtime, container, port := startEchoServerContainer(t, "udp")
  387. defer nuke(runtime)
  388. defer container.Kill()
  389. conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port))
  390. if err != nil {
  391. t.Fatal(err)
  392. }
  393. defer conn.Close()
  394. input := bytes.NewBufferString("well hello there\n")
  395. buf := make([]byte, 16)
  396. // Try for a minute, for some reason the select in socat may take ages
  397. // to return even though everything on the path seems fine (i.e: the
  398. // UDPProxy forwards the traffic correctly and you can see the packets
  399. // on the interface from within the container).
  400. for i := 0; i != 120; i++ {
  401. _, err := conn.Write(input.Bytes())
  402. if err != nil {
  403. t.Fatal(err)
  404. }
  405. conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond))
  406. read, err := conn.Read(buf)
  407. if err == nil {
  408. output := string(buf[:read])
  409. if strings.Contains(output, "well hello there") {
  410. return
  411. }
  412. }
  413. }
  414. t.Fatal("No reply from the container")
  415. }
  416. func TestRestore(t *testing.T) {
  417. runtime1 := mkRuntime(t)
  418. defer nuke(runtime1)
  419. // Create a container with one instance of docker
  420. container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
  421. defer runtime1.Destroy(container1)
  422. // Create a second container meant to be killed
  423. container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
  424. defer runtime1.Destroy(container2)
  425. // Start the container non blocking
  426. hostConfig := &HostConfig{}
  427. if err := container2.Start(hostConfig); err != nil {
  428. t.Fatal(err)
  429. }
  430. if !container2.State.Running {
  431. t.Fatalf("Container %v should appear as running but isn't", container2.ID)
  432. }
  433. // Simulate a crash/manual quit of dockerd: process dies, states stays 'Running'
  434. cStdin, _ := container2.StdinPipe()
  435. cStdin.Close()
  436. if err := container2.WaitTimeout(2 * time.Second); err != nil {
  437. t.Fatal(err)
  438. }
  439. container2.State.Running = true
  440. container2.ToDisk()
  441. if len(runtime1.List()) != 2 {
  442. t.Errorf("Expected 2 container, %v found", len(runtime1.List()))
  443. }
  444. if err := container1.Run(); err != nil {
  445. t.Fatal(err)
  446. }
  447. if !container2.State.Running {
  448. t.Fatalf("Container %v should appear as running but isn't", container2.ID)
  449. }
  450. // Here are are simulating a docker restart - that is, reloading all containers
  451. // from scratch
  452. runtime2, err := NewRuntimeFromDirectory(runtime1.root, runtime1.deviceSet, false)
  453. if err != nil {
  454. t.Fatal(err)
  455. }
  456. defer nuke(runtime2)
  457. if len(runtime2.List()) != 2 {
  458. t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
  459. }
  460. runningCount := 0
  461. for _, c := range runtime2.List() {
  462. if c.State.Running {
  463. t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
  464. runningCount++
  465. }
  466. }
  467. if runningCount != 0 {
  468. t.Fatalf("Expected 0 container alive, %d found", runningCount)
  469. }
  470. container3 := runtime2.Get(container1.ID)
  471. if container3 == nil {
  472. t.Fatal("Unable to Get container")
  473. }
  474. if err := container3.Run(); err != nil {
  475. t.Fatal(err)
  476. }
  477. container2.State.Running = false
  478. }