runtime_test.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. package docker
  2. import (
  3. "bytes"
  4. "fmt"
  5. "github.com/dotcloud/docker/utils"
  6. "io"
  7. "log"
  8. "net"
  9. "os"
  10. "strconv"
  11. "strings"
  12. "sync"
  13. "syscall"
  14. "testing"
  15. "time"
  16. )
  17. const (
  18. unitTestImageName = "docker-test-image"
  19. unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
  20. unitTestNetworkBridge = "testdockbr0"
  21. unitTestStoreBase = "/var/lib/docker/unit-tests"
  22. testDaemonAddr = "127.0.0.1:4270"
  23. testDaemonProto = "tcp"
  24. )
  25. var globalRuntime *Runtime
  26. func nuke(runtime *Runtime) error {
  27. var wg sync.WaitGroup
  28. for _, container := range runtime.List() {
  29. wg.Add(1)
  30. go func(c *Container) {
  31. c.Kill()
  32. wg.Done()
  33. }(container)
  34. }
  35. wg.Wait()
  36. return os.RemoveAll(runtime.root)
  37. }
  38. func cleanup(runtime *Runtime) error {
  39. for _, container := range runtime.List() {
  40. container.Kill()
  41. runtime.Destroy(container)
  42. }
  43. images, err := runtime.graph.All()
  44. if err != nil {
  45. return err
  46. }
  47. for _, image := range images {
  48. if image.ID != unitTestImageID {
  49. runtime.graph.Delete(image.ID)
  50. }
  51. }
  52. return nil
  53. }
  54. func layerArchive(tarfile string) (io.Reader, error) {
  55. // FIXME: need to close f somewhere
  56. f, err := os.Open(tarfile)
  57. if err != nil {
  58. return nil, err
  59. }
  60. return f, nil
  61. }
  62. func init() {
  63. // Hack to run sys init during unit testing
  64. if utils.SelfPath() == "/sbin/init" {
  65. SysInit()
  66. return
  67. }
  68. if uid := syscall.Geteuid(); uid != 0 {
  69. log.Fatal("docker tests need to be run as root")
  70. }
  71. NetworkBridgeIface = unitTestNetworkBridge
  72. // Make it our Store root
  73. runtime, err := NewRuntimeFromDirectory(unitTestStoreBase, false)
  74. if err != nil {
  75. panic(err)
  76. }
  77. globalRuntime = runtime
  78. // Create the "Server"
  79. srv := &Server{
  80. runtime: runtime,
  81. enableCors: false,
  82. pullingPool: make(map[string]struct{}),
  83. pushingPool: make(map[string]struct{}),
  84. }
  85. // If the unit test is not found, try to download it.
  86. if img, err := runtime.repositories.LookupImage(unitTestImageName); err != nil || img.ID != unitTestImageID {
  87. // Retrieve the Image
  88. if err := srv.ImagePull(unitTestImageName, "", os.Stdout, utils.NewStreamFormatter(false), nil); err != nil {
  89. panic(err)
  90. }
  91. }
  92. // Spawn a Daemon
  93. go func() {
  94. if err := ListenAndServe(testDaemonProto, testDaemonAddr, srv, os.Getenv("DEBUG") != ""); err != nil {
  95. panic(err)
  96. }
  97. }()
  98. // Give some time to ListenAndServer to actually start
  99. time.Sleep(time.Second)
  100. }
  101. // FIXME: test that ImagePull(json=true) send correct json output
  102. func GetTestImage(runtime *Runtime) *Image {
  103. imgs, err := runtime.graph.All()
  104. if err != nil {
  105. panic(err)
  106. }
  107. for i := range imgs {
  108. if imgs[i].ID == unitTestImageID {
  109. return imgs[i]
  110. }
  111. }
  112. panic(fmt.Errorf("Test image %v not found", unitTestImageID))
  113. }
  114. func TestRuntimeCreate(t *testing.T) {
  115. runtime := mkRuntime(t)
  116. defer nuke(runtime)
  117. // Make sure we start we 0 containers
  118. if len(runtime.List()) != 0 {
  119. t.Errorf("Expected 0 containers, %v found", len(runtime.List()))
  120. }
  121. builder := NewBuilder(runtime)
  122. container, err := builder.Create(&Config{
  123. Image: GetTestImage(runtime).ID,
  124. Cmd: []string{"ls", "-al"},
  125. },
  126. )
  127. if err != nil {
  128. t.Fatal(err)
  129. }
  130. defer func() {
  131. if err := runtime.Destroy(container); err != nil {
  132. t.Error(err)
  133. }
  134. }()
  135. // Make sure we can find the newly created container with List()
  136. if len(runtime.List()) != 1 {
  137. t.Errorf("Expected 1 container, %v found", len(runtime.List()))
  138. }
  139. // Make sure the container List() returns is the right one
  140. if runtime.List()[0].ID != container.ID {
  141. t.Errorf("Unexpected container %v returned by List", runtime.List()[0])
  142. }
  143. // Make sure we can get the container with Get()
  144. if runtime.Get(container.ID) == nil {
  145. t.Errorf("Unable to get newly created container")
  146. }
  147. // Make sure it is the right container
  148. if runtime.Get(container.ID) != container {
  149. t.Errorf("Get() returned the wrong container")
  150. }
  151. // Make sure Exists returns it as existing
  152. if !runtime.Exists(container.ID) {
  153. t.Errorf("Exists() returned false for a newly created container")
  154. }
  155. // Make sure crete with bad parameters returns an error
  156. _, err = builder.Create(
  157. &Config{
  158. Image: GetTestImage(runtime).ID,
  159. },
  160. )
  161. if err == nil {
  162. t.Fatal("Builder.Create should throw an error when Cmd is missing")
  163. }
  164. _, err = builder.Create(
  165. &Config{
  166. Image: GetTestImage(runtime).ID,
  167. Cmd: []string{},
  168. },
  169. )
  170. if err == nil {
  171. t.Fatal("Builder.Create should throw an error when Cmd is empty")
  172. }
  173. }
  174. func TestDestroy(t *testing.T) {
  175. runtime := mkRuntime(t)
  176. defer nuke(runtime)
  177. container, err := NewBuilder(runtime).Create(&Config{
  178. Image: GetTestImage(runtime).ID,
  179. Cmd: []string{"ls", "-al"},
  180. },
  181. )
  182. if err != nil {
  183. t.Fatal(err)
  184. }
  185. // Destroy
  186. if err := runtime.Destroy(container); err != nil {
  187. t.Error(err)
  188. }
  189. // Make sure runtime.Exists() behaves correctly
  190. if runtime.Exists("test_destroy") {
  191. t.Errorf("Exists() returned true")
  192. }
  193. // Make sure runtime.List() doesn't list the destroyed container
  194. if len(runtime.List()) != 0 {
  195. t.Errorf("Expected 0 container, %v found", len(runtime.List()))
  196. }
  197. // Make sure runtime.Get() refuses to return the unexisting container
  198. if runtime.Get(container.ID) != nil {
  199. t.Errorf("Unable to get newly created container")
  200. }
  201. // Make sure the container root directory does not exist anymore
  202. _, err = os.Stat(container.root)
  203. if err == nil || !os.IsNotExist(err) {
  204. t.Errorf("Container root directory still exists after destroy")
  205. }
  206. // Test double destroy
  207. if err := runtime.Destroy(container); err == nil {
  208. // It should have failed
  209. t.Errorf("Double destroy did not fail")
  210. }
  211. }
  212. func TestGet(t *testing.T) {
  213. runtime := mkRuntime(t)
  214. defer nuke(runtime)
  215. container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  216. defer runtime.Destroy(container1)
  217. container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  218. defer runtime.Destroy(container2)
  219. container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
  220. defer runtime.Destroy(container3)
  221. if runtime.Get(container1.ID) != container1 {
  222. t.Errorf("Get(test1) returned %v while expecting %v", runtime.Get(container1.ID), container1)
  223. }
  224. if runtime.Get(container2.ID) != container2 {
  225. t.Errorf("Get(test2) returned %v while expecting %v", runtime.Get(container2.ID), container2)
  226. }
  227. if runtime.Get(container3.ID) != container3 {
  228. t.Errorf("Get(test3) returned %v while expecting %v", runtime.Get(container3.ID), container3)
  229. }
  230. }
  231. func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, string) {
  232. var err error
  233. runtime := mkRuntime(t)
  234. port := 5554
  235. var container *Container
  236. var strPort string
  237. for {
  238. port += 1
  239. strPort = strconv.Itoa(port)
  240. var cmd string
  241. if proto == "tcp" {
  242. cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat"
  243. } else if proto == "udp" {
  244. cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat"
  245. } else {
  246. t.Fatal(fmt.Errorf("Unknown protocol %v", proto))
  247. }
  248. t.Log("Trying port", strPort)
  249. container, err = NewBuilder(runtime).Create(&Config{
  250. Image: GetTestImage(runtime).ID,
  251. Cmd: []string{"sh", "-c", cmd},
  252. PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)},
  253. })
  254. if container != nil {
  255. break
  256. }
  257. if err != nil {
  258. nuke(runtime)
  259. t.Fatal(err)
  260. }
  261. t.Logf("Port %v already in use", strPort)
  262. }
  263. hostConfig := &HostConfig{}
  264. if err := container.Start(hostConfig); err != nil {
  265. nuke(runtime)
  266. t.Fatal(err)
  267. }
  268. setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() {
  269. for !container.State.Running {
  270. time.Sleep(10 * time.Millisecond)
  271. }
  272. })
  273. // Even if the state is running, lets give some time to lxc to spawn the process
  274. container.WaitTimeout(500 * time.Millisecond)
  275. strPort = container.NetworkSettings.PortMapping[strings.Title(proto)][strPort]
  276. return runtime, container, strPort
  277. }
  278. // Run a container with a TCP port allocated, and test that it can receive connections on localhost
  279. func TestAllocateTCPPortLocalhost(t *testing.T) {
  280. runtime, container, port := startEchoServerContainer(t, "tcp")
  281. defer nuke(runtime)
  282. defer container.Kill()
  283. for i := 0; i != 10; i++ {
  284. conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port))
  285. if err != nil {
  286. t.Fatal(err)
  287. }
  288. defer conn.Close()
  289. input := bytes.NewBufferString("well hello there\n")
  290. _, err = conn.Write(input.Bytes())
  291. if err != nil {
  292. t.Fatal(err)
  293. }
  294. buf := make([]byte, 16)
  295. read := 0
  296. conn.SetReadDeadline(time.Now().Add(3 * time.Second))
  297. read, err = conn.Read(buf)
  298. if err != nil {
  299. if err, ok := err.(*net.OpError); ok {
  300. if err.Err == syscall.ECONNRESET {
  301. t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec")
  302. conn.Close()
  303. time.Sleep(time.Second)
  304. continue
  305. }
  306. if err.Timeout() {
  307. t.Log("Timeout, trying again")
  308. conn.Close()
  309. continue
  310. }
  311. }
  312. t.Fatal(err)
  313. }
  314. output := string(buf[:read])
  315. if !strings.Contains(output, "well hello there") {
  316. t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output))
  317. } else {
  318. return
  319. }
  320. }
  321. t.Fatal("No reply from the container")
  322. }
  323. // Run a container with an UDP port allocated, and test that it can receive connections on localhost
  324. func TestAllocateUDPPortLocalhost(t *testing.T) {
  325. runtime, container, port := startEchoServerContainer(t, "udp")
  326. defer nuke(runtime)
  327. defer container.Kill()
  328. conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port))
  329. if err != nil {
  330. t.Fatal(err)
  331. }
  332. defer conn.Close()
  333. input := bytes.NewBufferString("well hello there\n")
  334. buf := make([]byte, 16)
  335. // Try for a minute, for some reason the select in socat may take ages
  336. // to return even though everything on the path seems fine (i.e: the
  337. // UDPProxy forwards the traffic correctly and you can see the packets
  338. // on the interface from within the container).
  339. for i := 0; i != 120; i++ {
  340. _, err := conn.Write(input.Bytes())
  341. if err != nil {
  342. t.Fatal(err)
  343. }
  344. conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond))
  345. read, err := conn.Read(buf)
  346. if err == nil {
  347. output := string(buf[:read])
  348. if strings.Contains(output, "well hello there") {
  349. return
  350. }
  351. }
  352. }
  353. t.Fatal("No reply from the container")
  354. }
  355. func TestRestore(t *testing.T) {
  356. runtime1 := mkRuntime(t)
  357. defer nuke(runtime1)
  358. // Create a container with one instance of docker
  359. container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
  360. defer runtime1.Destroy(container1)
  361. // Create a second container meant to be killed
  362. container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
  363. defer runtime1.Destroy(container2)
  364. // Start the container non blocking
  365. hostConfig := &HostConfig{}
  366. if err := container2.Start(hostConfig); err != nil {
  367. t.Fatal(err)
  368. }
  369. if !container2.State.Running {
  370. t.Fatalf("Container %v should appear as running but isn't", container2.ID)
  371. }
  372. // Simulate a crash/manual quit of dockerd: process dies, states stays 'Running'
  373. cStdin, _ := container2.StdinPipe()
  374. cStdin.Close()
  375. if err := container2.WaitTimeout(2 * time.Second); err != nil {
  376. t.Fatal(err)
  377. }
  378. container2.State.Running = true
  379. container2.ToDisk()
  380. if len(runtime1.List()) != 2 {
  381. t.Errorf("Expected 2 container, %v found", len(runtime1.List()))
  382. }
  383. if err := container1.Run(); err != nil {
  384. t.Fatal(err)
  385. }
  386. if !container2.State.Running {
  387. t.Fatalf("Container %v should appear as running but isn't", container2.ID)
  388. }
  389. // Here are are simulating a docker restart - that is, reloading all containers
  390. // from scratch
  391. runtime2, err := NewRuntimeFromDirectory(runtime1.root, false)
  392. if err != nil {
  393. t.Fatal(err)
  394. }
  395. defer nuke(runtime2)
  396. if len(runtime2.List()) != 2 {
  397. t.Errorf("Expected 2 container, %v found", len(runtime2.List()))
  398. }
  399. runningCount := 0
  400. for _, c := range runtime2.List() {
  401. if c.State.Running {
  402. t.Errorf("Running container found: %v (%v)", c.ID, c.Path)
  403. runningCount++
  404. }
  405. }
  406. if runningCount != 0 {
  407. t.Fatalf("Expected 0 container alive, %d found", runningCount)
  408. }
  409. container3 := runtime2.Get(container1.ID)
  410. if container3 == nil {
  411. t.Fatal("Unable to Get container")
  412. }
  413. if err := container3.Run(); err != nil {
  414. t.Fatal(err)
  415. }
  416. container2.State.Running = false
  417. }