Explorar o código

Update Godeps

Signed-off-by: Jana Radhakrishnan <mrjana@docker.com>
Jana Radhakrishnan %!s(int64=9) %!d(string=hai) anos
pai
achega
01c9083626
Modificáronse 100 ficheiros con 1873 adicións e 6730 borrados
  1. 196 59
      libnetwork/Godeps/Godeps.json
  2. 2 1
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml
  3. 66 0
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md
  4. 97 82
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
  5. 26 0
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/doc.go
  6. 16 4
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
  7. 0 53
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go
  8. 0 40
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go
  9. 0 35
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go
  10. 7 0
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go
  11. 4 0
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go
  12. 0 88
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go
  13. 0 122
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go
  14. 3 3
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go
  15. 0 54
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
  16. 0 28
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md
  17. 0 55
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
  18. 0 26
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
  19. 0 61
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
  20. 0 100
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
  21. 0 97
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
  22. 0 20
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
  23. 0 59
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
  24. 0 26
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
  25. 19 4
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
  26. 78 27
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
  27. 51 2
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go
  28. 0 283
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go
  29. 1 0
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go
  30. 0 20
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go
  31. 3 3
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go
  32. 15 0
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_solaris.go
  33. 2 2
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go
  34. 62 25
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
  35. 0 33
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go
  36. 1 1
      libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go
  37. 22 0
      libnetwork/Godeps/_workspace/src/github.com/armon/go-radix/.gitignore
  38. 3 0
      libnetwork/Godeps/_workspace/src/github.com/armon/go-radix/.travis.yml
  39. 20 0
      libnetwork/Godeps/_workspace/src/github.com/armon/go-radix/LICENSE
  40. 36 0
      libnetwork/Godeps/_workspace/src/github.com/armon/go-radix/README.md
  41. 467 0
      libnetwork/Godeps/_workspace/src/github.com/armon/go-radix/radix.go
  42. 191 0
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/LICENSE
  43. 19 0
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/NOTICE
  44. 0 67
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/envfile.go
  45. 0 142
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/envfile_test.go
  46. 38 36
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/hosts.go
  47. 0 164
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_test.go
  48. 1 1
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_windows.go
  49. 0 54
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/ip_test.go
  50. 49 138
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go
  51. 0 301
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/opts_test.go
  52. 0 56
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/throttledevice.go
  53. 0 52
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/ulimit.go
  54. 0 42
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/ulimit_test.go
  55. 0 56
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/weightdevice.go
  56. 3 7
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/backends.go
  57. 0 131
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/discovery_test.go
  58. 2 5
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/entry.go
  59. 0 109
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/file/file.go
  60. 0 114
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/file/file_test.go
  61. 0 53
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/generator_test.go
  62. 21 3
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/kv/kv.go
  63. 0 324
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/kv/kv_test.go
  64. 0 54
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/nodes/nodes.go
  65. 0 51
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go
  66. 0 24
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/homedir/homedir_test.go
  67. 0 158
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/bytespipe_test.go
  68. 0 17
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt_test.go
  69. 0 149
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader_test.go
  70. 0 94
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go
  71. 41 41
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go
  72. 0 65
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go
  73. 1 1
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE
  74. 0 36
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go
  75. 22 6
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go
  76. 0 516
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag_test.go
  77. 0 137
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go
  78. 0 477
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go
  79. 1 1
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
  80. 6 0
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_windows.go
  81. 1 2
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
  82. 0 331
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go
  83. 0 96
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go
  84. 49 27
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/client.go
  85. 0 127
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/client_test.go
  86. 33 1
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/discovery.go
  87. 0 169
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/discovery_test.go
  88. 33 0
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/errors.go
  89. 0 68
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md
  90. 0 41
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go
  91. 0 91
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go
  92. 0 163
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go
  93. 0 168
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go
  94. 0 97
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go
  95. 91 16
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/plugins.go
  96. 36 0
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/transport/http.go
  97. 36 0
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/transport/transport.go
  98. 0 216
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/proxy/network_proxy_test.go
  99. 0 22
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/random/random_test.go
  100. 2 0
      libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/signal_unix.go

+ 196 - 59
libnetwork/Godeps/Godeps.json

@@ -10,27 +10,35 @@
 			"Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe"
 			"Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe"
 		},
 		},
 		{
 		{
-			"ImportPath": "github.com/Microsoft/hcsshim",
-			"Rev": "116e0e9f5ced0cec94ae46d0aa1b3002a325f532"
+			"ImportPath": "github.com/Azure/go-ansiterm/winterm",
+			"Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe"
+		},
+		{
+			"ImportPath": "github.com/BurntSushi/toml",
+			"Comment": "v0.1.0-16-gf706d00",
+			"Rev": "f706d00e3de6abe700c994cdd545a1a4915af060"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/Microsoft/go-winio",
 			"ImportPath": "github.com/Microsoft/go-winio",
 			"Rev": "8f9387ea7efabb228a981b9c381142be7667967f"
 			"Rev": "8f9387ea7efabb228a981b9c381142be7667967f"
 		},
 		},
 		{
 		{
-			"ImportPath": "github.com/BurntSushi/toml",
-			"Comment": "v0.1.0-16-gf706d00",
-			"Rev": "f706d00e3de6abe700c994cdd545a1a4915af060"
+			"ImportPath": "github.com/Microsoft/hcsshim",
+			"Rev": "116e0e9f5ced0cec94ae46d0aa1b3002a325f532"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/Sirupsen/logrus",
 			"ImportPath": "github.com/Sirupsen/logrus",
-			"Comment": "v0.6.4-12-g467d9d5",
-			"Rev": "467d9d55c2d2c17248441a8fc661561161f40d5e"
+			"Comment": "v0.10.0",
+			"Rev": "4b6ea7319e214d98c938f12692336f7ca9348d6b"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/armon/go-metrics",
 			"ImportPath": "github.com/armon/go-metrics",
 			"Rev": "eb0af217e5e9747e41dd5303755356b62d28e3ec"
 			"Rev": "eb0af217e5e9747e41dd5303755356b62d28e3ec"
 		},
 		},
+		{
+			"ImportPath": "github.com/armon/go-radix",
+			"Rev": "e39d623f12e8e41c7b5529e9a9dd67a1e2261f80"
+		},
 		{
 		{
 			"ImportPath": "github.com/boltdb/bolt",
 			"ImportPath": "github.com/boltdb/bolt",
 			"Comment": "v1.0-117-g0f053fa",
 			"Comment": "v1.0-117-g0f053fa",
@@ -71,110 +79,124 @@
 			"Comment": "v1-26-gef32fa3",
 			"Comment": "v1-26-gef32fa3",
 			"Rev": "ef32fa3046d9f249d399f98ebaf9be944430fd1d"
 			"Rev": "ef32fa3046d9f249d399f98ebaf9be944430fd1d"
 		},
 		},
-		{
-			"ImportPath": "github.com/docker/docker/api/types/blkiodev",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
-		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/opts",
 			"ImportPath": "github.com/docker/docker/opts",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/discovery",
 			"ImportPath": "github.com/docker/docker/pkg/discovery",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
+		},
+		{
+			"ImportPath": "github.com/docker/docker/pkg/discovery/kv",
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/homedir",
 			"ImportPath": "github.com/docker/docker/pkg/homedir",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/ioutils",
 			"ImportPath": "github.com/docker/docker/pkg/ioutils",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/longpath",
 			"ImportPath": "github.com/docker/docker/pkg/longpath",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/mflag",
 			"ImportPath": "github.com/docker/docker/pkg/mflag",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/mount",
 			"ImportPath": "github.com/docker/docker/pkg/mount",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/parsers/kernel",
 			"ImportPath": "github.com/docker/docker/pkg/parsers/kernel",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/plugins",
 			"ImportPath": "github.com/docker/docker/pkg/plugins",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
+		},
+		{
+			"ImportPath": "github.com/docker/docker/pkg/plugins/transport",
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/proxy",
 			"ImportPath": "github.com/docker/docker/pkg/proxy",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/random",
 			"ImportPath": "github.com/docker/docker/pkg/random",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/reexec",
 			"ImportPath": "github.com/docker/docker/pkg/reexec",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/signal",
 			"ImportPath": "github.com/docker/docker/pkg/signal",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
-		},
-		{
-			"ImportPath": "github.com/docker/docker/pkg/sockets",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/stringid",
 			"ImportPath": "github.com/docker/docker/pkg/stringid",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/symlink",
 			"ImportPath": "github.com/docker/docker/pkg/symlink",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/system",
 			"ImportPath": "github.com/docker/docker/pkg/system",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/term",
 			"ImportPath": "github.com/docker/docker/pkg/term",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
+		},
+		{
+			"ImportPath": "github.com/docker/docker/pkg/term/windows",
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/tlsconfig",
 			"ImportPath": "github.com/docker/docker/pkg/tlsconfig",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"Comment": "v1.4.1-11287-geaf138a",
+			"Rev": "eaf138af1fba339d13bc4cccd75e61e37603a51a"
+		},
+		{
+			"ImportPath": "github.com/docker/go-connections/sockets",
+			"Comment": "v0.2.0",
+			"Rev": "34b5052da6b11e27f5f2e357b38b571ddddd3928"
+		},
+		{
+			"ImportPath": "github.com/docker/go-connections/tlsconfig",
+			"Comment": "v0.2.0",
+			"Rev": "34b5052da6b11e27f5f2e357b38b571ddddd3928"
 		},
 		},
 		{
 		{
-			"ImportPath": "github.com/docker/docker/pkg/ulimit",
-			"Comment": "v1.4.1-8734-g577cf61",
-			"Rev": "577cf61afad695f0ba226cdf8a995a8c78883e51"
+			"ImportPath": "github.com/docker/go-events",
+			"Rev": "2e7d352816128aa84f4d29b2a21d400133701a0d"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/docker/go-units",
 			"ImportPath": "github.com/docker/go-units",
@@ -185,6 +207,26 @@
 			"ImportPath": "github.com/docker/libkv",
 			"ImportPath": "github.com/docker/libkv",
 			"Rev": "c2aac5dbbaa5c872211edea7c0f32b3bd67e7410"
 			"Rev": "c2aac5dbbaa5c872211edea7c0f32b3bd67e7410"
 		},
 		},
+		{
+			"ImportPath": "github.com/docker/libkv/store",
+			"Rev": "c2aac5dbbaa5c872211edea7c0f32b3bd67e7410"
+		},
+		{
+			"ImportPath": "github.com/docker/libkv/store/boltdb",
+			"Rev": "c2aac5dbbaa5c872211edea7c0f32b3bd67e7410"
+		},
+		{
+			"ImportPath": "github.com/docker/libkv/store/consul",
+			"Rev": "c2aac5dbbaa5c872211edea7c0f32b3bd67e7410"
+		},
+		{
+			"ImportPath": "github.com/docker/libkv/store/etcd",
+			"Rev": "c2aac5dbbaa5c872211edea7c0f32b3bd67e7410"
+		},
+		{
+			"ImportPath": "github.com/docker/libkv/store/zookeeper",
+			"Rev": "c2aac5dbbaa5c872211edea7c0f32b3bd67e7410"
+		},
 		{
 		{
 			"ImportPath": "github.com/godbus/dbus",
 			"ImportPath": "github.com/godbus/dbus",
 			"Comment": "v3",
 			"Comment": "v3",
@@ -211,14 +253,23 @@
 			"ImportPath": "github.com/hashicorp/go-msgpack/codec",
 			"ImportPath": "github.com/hashicorp/go-msgpack/codec",
 			"Rev": "71c2886f5a673a35f909803f38ece5810165097b"
 			"Rev": "71c2886f5a673a35f909803f38ece5810165097b"
 		},
 		},
+		{
+			"ImportPath": "github.com/hashicorp/go-multierror",
+			"Rev": "2167c8ec40776024589f483a6b836489e47e1049"
+		},
 		{
 		{
 			"ImportPath": "github.com/hashicorp/memberlist",
 			"ImportPath": "github.com/hashicorp/memberlist",
-			"Rev": "9a1e242e454d2443df330bdd51a436d5a9058fc4"
+			"Rev": "88ac4de0d1a0ca6def284b571342db3b777a4c37"
+		},
+		{
+			"ImportPath": "github.com/hashicorp/serf/coordinate",
+			"Comment": "v0.7.0-47-g598c548",
+			"Rev": "598c54895cc5a7b1a24a398d635e8c0ea0959870"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/hashicorp/serf/serf",
 			"ImportPath": "github.com/hashicorp/serf/serf",
-			"Comment": "v0.6.4",
-			"Rev": "7151adcef72687bf95f451a2e0ba15cb19412bf2"
+			"Comment": "v0.7.0-47-g598c548",
+			"Rev": "598c54895cc5a7b1a24a398d635e8c0ea0959870"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/miekg/dns",
 			"ImportPath": "github.com/miekg/dns",
@@ -229,6 +280,76 @@
 			"Comment": "v0.0.6-6-gba1568d",
 			"Comment": "v0.0.6-6-gba1568d",
 			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
 			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
 		},
 		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/label",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/selinux",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/system",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/user",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
+		{
+			"ImportPath": "github.com/opencontainers/runc/libcontainer/utils",
+			"Comment": "v0.0.6-6-gba1568d",
+			"Rev": "ba1568de399395774ad84c2ace65937814c542ed"
+		},
 		{
 		{
 			"ImportPath": "github.com/samuel/go-zookeeper/zk",
 			"ImportPath": "github.com/samuel/go-zookeeper/zk",
 			"Rev": "d0e0d8e11f318e000a8cc434616d69e329edc374"
 			"Rev": "d0e0d8e11f318e000a8cc434616d69e329edc374"
@@ -241,6 +362,10 @@
 			"ImportPath": "github.com/stretchr/testify/assert",
 			"ImportPath": "github.com/stretchr/testify/assert",
 			"Rev": "dab07ac62d4905d3e48d17dc549c684ac3b7c15a"
 			"Rev": "dab07ac62d4905d3e48d17dc549c684ac3b7c15a"
 		},
 		},
+		{
+			"ImportPath": "github.com/stretchr/testify/require",
+			"Rev": "dab07ac62d4905d3e48d17dc549c684ac3b7c15a"
+		},
 		{
 		{
 			"ImportPath": "github.com/syndtr/gocapability/capability",
 			"ImportPath": "github.com/syndtr/gocapability/capability",
 			"Rev": "2c00daeb6c3b45114c80ac44119e7b8801fdd852"
 			"Rev": "2c00daeb6c3b45114c80ac44119e7b8801fdd852"
@@ -253,6 +378,10 @@
 			"ImportPath": "github.com/vishvananda/netlink",
 			"ImportPath": "github.com/vishvananda/netlink",
 			"Rev": "631962935bff4f3d20ff32a72e8944f6d2836a26"
 			"Rev": "631962935bff4f3d20ff32a72e8944f6d2836a26"
 		},
 		},
+		{
+			"ImportPath": "github.com/vishvananda/netlink/nl",
+			"Rev": "631962935bff4f3d20ff32a72e8944f6d2836a26"
+		},
 		{
 		{
 			"ImportPath": "github.com/vishvananda/netns",
 			"ImportPath": "github.com/vishvananda/netns",
 			"Rev": "604eaf189ee867d8c147fafc28def2394e878d25"
 			"Rev": "604eaf189ee867d8c147fafc28def2394e878d25"
@@ -260,6 +389,14 @@
 		{
 		{
 			"ImportPath": "golang.org/x/net/context",
 			"ImportPath": "golang.org/x/net/context",
 			"Rev": "9dd48c277bcb2bb2cc3eb6a6368a486a567d3562"
 			"Rev": "9dd48c277bcb2bb2cc3eb6a6368a486a567d3562"
+		},
+		{
+			"ImportPath": "golang.org/x/net/proxy",
+			"Rev": "9dd48c277bcb2bb2cc3eb6a6368a486a567d3562"
+		},
+		{
+			"ImportPath": "golang.org/x/sys/unix",
+			"Rev": "5eaf0df67e70d6997a9fe0ed24383fa1b01638d3"
 		}
 		}
 	]
 	]
 }
 }

+ 2 - 1
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml

@@ -1,8 +1,9 @@
 language: go
 language: go
 go:
 go:
-  - 1.2
   - 1.3
   - 1.3
   - 1.4
   - 1.4
+  - 1.5
   - tip
   - tip
 install:
 install:
   - go get -t ./...
   - go get -t ./...
+script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./...

+ 66 - 0
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md

@@ -0,0 +1,66 @@
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)

+ 97 - 82
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md

@@ -1,17 +1,18 @@
-# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
 
 
 Logrus is a structured logger for Go (golang), completely API compatible with
 Logrus is a structured logger for Go (golang), completely API compatible with
 the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
 the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
-yet stable (pre 1.0), the core API is unlikely to change much but please version
-control your Logrus to make sure you aren't fetching latest `master` on every
-build.**
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
 
 
 Nicely color-coded in development (when a TTY is attached, otherwise just
 Nicely color-coded in development (when a TTY is attached, otherwise just
 plain text):
 plain text):
 
 
 ![Colored](http://i.imgur.com/PY7qMwd.png)
 ![Colored](http://i.imgur.com/PY7qMwd.png)
 
 
-With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
 or Splunk:
 or Splunk:
 
 
 ```json
 ```json
@@ -31,16 +32,18 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
 "time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
 "time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
 ```
 ```
 
 
-With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
 attached, the output is compatible with the
 attached, the output is compatible with the
 [logfmt](http://godoc.org/github.com/kr/logfmt) format:
 [logfmt](http://godoc.org/github.com/kr/logfmt) format:
 
 
 ```text
 ```text
-time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
-time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
-time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
-time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
-time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
 ```
 ```
 
 
 #### Example
 #### Example
@@ -72,17 +75,12 @@ package main
 import (
 import (
   "os"
   "os"
   log "github.com/Sirupsen/logrus"
   log "github.com/Sirupsen/logrus"
-  "github.com/Sirupsen/logrus/hooks/airbrake"
 )
 )
 
 
 func init() {
 func init() {
   // Log as JSON instead of the default ASCII formatter.
   // Log as JSON instead of the default ASCII formatter.
   log.SetFormatter(&log.JSONFormatter{})
   log.SetFormatter(&log.JSONFormatter{})
 
 
-  // Use the Airbrake hook to report errors that have Error severity or above to
-  // an exception tracker. You can create custom hooks, see the Hooks section.
-  log.AddHook(&logrus_airbrake.AirbrakeHook{})
-
   // Output to stderr instead of stdout, could also be a file.
   // Output to stderr instead of stdout, could also be a file.
   log.SetOutput(os.Stderr)
   log.SetOutput(os.Stderr)
 
 
@@ -105,6 +103,16 @@ func main() {
     "omg":    true,
     "omg":    true,
     "number": 100,
     "number": 100,
   }).Fatal("The ice breaks!")
   }).Fatal("The ice breaks!")
+
+  // A common pattern is to re-use fields between logging statements by re-using
+  // the logrus.Entry returned from WithFields()
+  contextLogger := log.WithFields(log.Fields{
+    "common": "this is a common field",
+    "other": "I also should be logged always",
+  })
+
+  contextLogger.Info("I'll be logged with common and other field")
+  contextLogger.Info("Me too")
 }
 }
 ```
 ```
 
 
@@ -163,54 +171,22 @@ You can add hooks for logging levels. For example to send errors to an exception
 tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
 tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
 multiple places simultaneously, e.g. syslog.
 multiple places simultaneously, e.g. syslog.
 
 
-```go
-// Not the real implementation of the Airbrake hook. Just a simple sample.
-import (
-  log "github.com/Sirupsen/logrus"
-)
-
-func init() {
-  log.AddHook(new(AirbrakeHook))
-}
-
-type AirbrakeHook struct{}
-
-// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
-// the fields for the entry. See the Fields section of the README.
-func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
-  err := airbrake.Notify(entry.Data["error"].(error))
-  if err != nil {
-    log.WithFields(log.Fields{
-      "source":   "airbrake",
-      "endpoint": airbrake.Endpoint,
-    }).Info("Failed to send error to Airbrake")
-  }
-
-  return nil
-}
-
-// `Levels()` returns a slice of `Levels` the hook is fired for.
-func (hook *AirbrakeHook) Levels() []log.Level {
-  return []log.Level{
-    log.ErrorLevel,
-    log.FatalLevel,
-    log.PanicLevel,
-  }
-}
-```
-
-Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
 
 
 ```go
 ```go
 import (
 import (
   log "github.com/Sirupsen/logrus"
   log "github.com/Sirupsen/logrus"
-  "github.com/Sirupsen/logrus/hooks/airbrake"
-  "github.com/Sirupsen/logrus/hooks/syslog"
+  "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+  logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
   "log/syslog"
   "log/syslog"
 )
 )
 
 
 func init() {
 func init() {
-  log.AddHook(new(logrus_airbrake.AirbrakeHook))
+
+  // Use the Airbrake hook to report errors that have Error severity or above to
+  // an exception tracker. You can create custom hooks, see the Hooks section.
+  log.AddHook(airbrake.NewHook(123, "xyz", "production"))
 
 
   hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
   hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
   if err != nil {
   if err != nil {
@@ -220,26 +196,37 @@ func init() {
   }
   }
 }
 }
 ```
 ```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook  | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
 
 
-* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
-  Send errors to an exception tracking service compatible with the Airbrake API.
-  Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
-
-* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
-  Send errors to the Papertrail hosted logging service via UDP.
-
-* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
-  Send errors to remote syslog server.
-  Uses standard library `log/syslog` behind the scenes.
-
-* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
-  Send errors to a channel in hipchat.
-
-* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
-  Send logs to Loggly (https://www.loggly.com/)
-
-* [`github.com/johntdyer/slackrus`](https://github.com/johntdyer/slackrus)
-  Hook for Slack chat.
 
 
 #### Level logging
 #### Level logging
 
 
@@ -295,10 +282,10 @@ init() {
   // do something here to set environment depending on an environment variable
   // do something here to set environment depending on an environment variable
   // or command-line flag
   // or command-line flag
   if Environment == "production" {
   if Environment == "production" {
-    log.SetFormatter(logrus.JSONFormatter)
+    log.SetFormatter(&log.JSONFormatter{})
   } else {
   } else {
     // The TextFormatter is default, you don't actually have to do this.
     // The TextFormatter is default, you don't actually have to do this.
-    log.SetFormatter(logrus.TextFormatter)
+    log.SetFormatter(&log.TextFormatter{})
   }
   }
 }
 }
 ```
 ```
@@ -317,10 +304,16 @@ The built-in logging formatters are:
     field to `true`.  To force no colored output even if there is a TTY  set the
     field to `true`.  To force no colored output even if there is a TTY  set the
     `DisableColors` field to `true`
     `DisableColors` field to `true`
 * `logrus.JSONFormatter`. Logs fields as JSON.
 * `logrus.JSONFormatter`. Logs fields as JSON.
+* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
+
+    ```go
+      logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
+    ```
 
 
 Third party logging formatters:
 Third party logging formatters:
 
 
-* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
 
 
 You can define your formatter by implementing the `Formatter` interface,
 You can define your formatter by implementing the `Formatter` interface,
 requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
 requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
@@ -333,7 +326,7 @@ type MyJSONFormatter struct {
 
 
 log.SetFormatter(new(MyJSONFormatter))
 log.SetFormatter(new(MyJSONFormatter))
 
 
-func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
   // Note this doesn't include Time, Level and Message which are available on
   // Note this doesn't include Time, Level and Message which are available on
   // the Entry. Consult `godoc` on information about those fields or read the
   // the Entry. Consult `godoc` on information about those fields or read the
   // source of the official loggers.
   // source of the official loggers.
@@ -347,7 +340,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
 
 
 #### Logger as an `io.Writer`
 #### Logger as an `io.Writer`
 
 
-Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsability to close it.
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
 
 
 ```go
 ```go
 w := logger.Writer()
 w := logger.Writer()
@@ -366,8 +359,30 @@ and hooks. The level for those entries is `info`.
 #### Rotation
 #### Rotation
 
 
 Log rotation is not provided with Logrus. Log rotation should be done by an
 Log rotation is not provided with Logrus. Log rotation should be done by an
-external program (like `logrotated(8)`) that can compress and delete old log
+external program (like `logrotate(8)`) that can compress and delete old log
 entries. It should not be a feature of the application-level logger.
 entries. It should not be a feature of the application-level logger.
 
 
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
 
 
-[godoc]: https://godoc.org/github.com/Sirupsen/logrus
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```

+ 26 - 0
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/doc.go

@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+  package main
+
+  import (
+    log "github.com/Sirupsen/logrus"
+  )
+
+  func main() {
+    log.WithFields(log.Fields{
+      "animal": "walrus",
+      "number": 1,
+      "size":   10,
+    }).Info("A walrus appears")
+  }
+
+Output:
+  time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus

+ 16 - 4
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go

@@ -8,6 +8,9 @@ import (
 	"time"
 	"time"
 )
 )
 
 
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
 // An entry is the final or intermediate Logrus logging entry. It contains all
 // An entry is the final or intermediate Logrus logging entry. It contains all
 // the fields passed with WithField{,s}. It's finally logged when Debug, Info,
 // the fields passed with WithField{,s}. It's finally logged when Debug, Info,
 // Warn, Error, Fatal or Panic is called on it. These objects can be reused and
 // Warn, Error, Fatal or Panic is called on it. These objects can be reused and
@@ -53,6 +56,11 @@ func (entry *Entry) String() (string, error) {
 	return reader.String(), err
 	return reader.String(), err
 }
 }
 
 
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+	return entry.WithField(ErrorKey, err)
+}
+
 // Add a single field to the Entry.
 // Add a single field to the Entry.
 func (entry *Entry) WithField(key string, value interface{}) *Entry {
 func (entry *Entry) WithField(key string, value interface{}) *Entry {
 	return entry.WithFields(Fields{key: value})
 	return entry.WithFields(Fields{key: value})
@@ -60,7 +68,7 @@ func (entry *Entry) WithField(key string, value interface{}) *Entry {
 
 
 // Add a map of fields to the Entry.
 // Add a map of fields to the Entry.
 func (entry *Entry) WithFields(fields Fields) *Entry {
 func (entry *Entry) WithFields(fields Fields) *Entry {
-	data := Fields{}
+	data := make(Fields, len(entry.Data)+len(fields))
 	for k, v := range entry.Data {
 	for k, v := range entry.Data {
 		data[k] = v
 		data[k] = v
 	}
 	}
@@ -70,12 +78,14 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
 	return &Entry{Logger: entry.Logger, Data: data}
 	return &Entry{Logger: entry.Logger, Data: data}
 }
 }
 
 
-func (entry *Entry) log(level Level, msg string) {
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
 	entry.Time = time.Now()
 	entry.Time = time.Now()
 	entry.Level = level
 	entry.Level = level
 	entry.Message = msg
 	entry.Message = msg
 
 
-	if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
+	if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
 		entry.Logger.mu.Lock()
 		entry.Logger.mu.Lock()
 		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
 		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
 		entry.Logger.mu.Unlock()
 		entry.Logger.mu.Unlock()
@@ -100,7 +110,7 @@ func (entry *Entry) log(level Level, msg string) {
 	// panic() to use in Entry#Panic(), we avoid the allocation by checking
 	// panic() to use in Entry#Panic(), we avoid the allocation by checking
 	// directly here.
 	// directly here.
 	if level <= PanicLevel {
 	if level <= PanicLevel {
-		panic(entry)
+		panic(&entry)
 	}
 	}
 }
 }
 
 
@@ -188,6 +198,7 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) {
 	if entry.Logger.Level >= FatalLevel {
 	if entry.Logger.Level >= FatalLevel {
 		entry.Fatal(fmt.Sprintf(format, args...))
 		entry.Fatal(fmt.Sprintf(format, args...))
 	}
 	}
+	os.Exit(1)
 }
 }
 
 
 func (entry *Entry) Panicf(format string, args ...interface{}) {
 func (entry *Entry) Panicf(format string, args ...interface{}) {
@@ -234,6 +245,7 @@ func (entry *Entry) Fatalln(args ...interface{}) {
 	if entry.Logger.Level >= FatalLevel {
 	if entry.Logger.Level >= FatalLevel {
 		entry.Fatal(entry.sprintlnn(args...))
 		entry.Fatal(entry.sprintlnn(args...))
 	}
 	}
+	os.Exit(1)
 }
 }
 
 
 func (entry *Entry) Panicln(args ...interface{}) {
 func (entry *Entry) Panicln(args ...interface{}) {

+ 0 - 53
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go

@@ -1,53 +0,0 @@
-package logrus
-
-import (
-	"bytes"
-	"fmt"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func TestEntryPanicln(t *testing.T) {
-	errBoom := fmt.Errorf("boom time")
-
-	defer func() {
-		p := recover()
-		assert.NotNil(t, p)
-
-		switch pVal := p.(type) {
-		case *Entry:
-			assert.Equal(t, "kaboom", pVal.Message)
-			assert.Equal(t, errBoom, pVal.Data["err"])
-		default:
-			t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
-		}
-	}()
-
-	logger := New()
-	logger.Out = &bytes.Buffer{}
-	entry := NewEntry(logger)
-	entry.WithField("err", errBoom).Panicln("kaboom")
-}
-
-func TestEntryPanicf(t *testing.T) {
-	errBoom := fmt.Errorf("boom again")
-
-	defer func() {
-		p := recover()
-		assert.NotNil(t, p)
-
-		switch pVal := p.(type) {
-		case *Entry:
-			assert.Equal(t, "kaboom true", pVal.Message)
-			assert.Equal(t, errBoom, pVal.Data["err"])
-		default:
-			t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
-		}
-	}()
-
-	logger := New()
-	logger.Out = &bytes.Buffer{}
-	entry := NewEntry(logger)
-	entry.WithField("err", errBoom).Panicf("kaboom %v", true)
-}

+ 0 - 40
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go

@@ -1,40 +0,0 @@
-package main
-
-import (
-	"github.com/Sirupsen/logrus"
-)
-
-var log = logrus.New()
-
-func init() {
-	log.Formatter = new(logrus.JSONFormatter)
-	log.Formatter = new(logrus.TextFormatter) // default
-}
-
-func main() {
-	defer func() {
-		err := recover()
-		if err != nil {
-			log.WithFields(logrus.Fields{
-				"omg":    true,
-				"err":    err,
-				"number": 100,
-			}).Fatal("The ice breaks!")
-		}
-	}()
-
-	log.WithFields(logrus.Fields{
-		"animal": "walrus",
-		"size":   10,
-	}).Info("A group of walrus emerges from the ocean")
-
-	log.WithFields(logrus.Fields{
-		"omg":    true,
-		"number": 122,
-	}).Warn("The group's number increased tremendously!")
-
-	log.WithFields(logrus.Fields{
-		"animal": "orca",
-		"size":   9009,
-	}).Panic("It's over 9000!")
-}

+ 0 - 35
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go

@@ -1,35 +0,0 @@
-package main
-
-import (
-	"github.com/Sirupsen/logrus"
-	"github.com/Sirupsen/logrus/hooks/airbrake"
-	"github.com/tobi/airbrake-go"
-)
-
-var log = logrus.New()
-
-func init() {
-	log.Formatter = new(logrus.TextFormatter) // default
-	log.Hooks.Add(new(logrus_airbrake.AirbrakeHook))
-}
-
-func main() {
-	airbrake.Endpoint = "https://exceptions.whatever.com/notifier_api/v2/notices.xml"
-	airbrake.ApiKey = "whatever"
-	airbrake.Environment = "production"
-
-	log.WithFields(logrus.Fields{
-		"animal": "walrus",
-		"size":   10,
-	}).Info("A group of walrus emerges from the ocean")
-
-	log.WithFields(logrus.Fields{
-		"omg":    true,
-		"number": 122,
-	}).Warn("The group's number increased tremendously!")
-
-	log.WithFields(logrus.Fields{
-		"omg":    true,
-		"number": 100,
-	}).Fatal("The ice breaks!")
-}

+ 7 - 0
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go

@@ -36,6 +36,8 @@ func SetLevel(level Level) {
 
 
 // GetLevel returns the standard logger level.
 // GetLevel returns the standard logger level.
 func GetLevel() Level {
 func GetLevel() Level {
+	std.mu.Lock()
+	defer std.mu.Unlock()
 	return std.Level
 	return std.Level
 }
 }
 
 
@@ -46,6 +48,11 @@ func AddHook(hook Hook) {
 	std.Hooks.Add(hook)
 	std.Hooks.Add(hook)
 }
 }
 
 
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+	return std.WithField(ErrorKey, err)
+}
+
 // WithField creates an entry from the standard logger and adds a field to
 // WithField creates an entry from the standard logger and adds a field to
 // it. If you want multiple fields, use `WithFields`.
 // it. If you want multiple fields, use `WithFields`.
 //
 //

+ 4 - 0
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go

@@ -1,5 +1,9 @@
 package logrus
 package logrus
 
 
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
 // The Formatter interface is used to implement a custom Formatter. It takes an
 // The Formatter interface is used to implement a custom Formatter. It takes an
 // `Entry`. It exposes all the fields, including the default ones:
 // `Entry`. It exposes all the fields, including the default ones:
 //
 //

+ 0 - 88
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go

@@ -1,88 +0,0 @@
-package logrus
-
-import (
-	"testing"
-	"time"
-)
-
-// smallFields is a small size data set for benchmarking
-var smallFields = Fields{
-	"foo":   "bar",
-	"baz":   "qux",
-	"one":   "two",
-	"three": "four",
-}
-
-// largeFields is a large size data set for benchmarking
-var largeFields = Fields{
-	"foo":       "bar",
-	"baz":       "qux",
-	"one":       "two",
-	"three":     "four",
-	"five":      "six",
-	"seven":     "eight",
-	"nine":      "ten",
-	"eleven":    "twelve",
-	"thirteen":  "fourteen",
-	"fifteen":   "sixteen",
-	"seventeen": "eighteen",
-	"nineteen":  "twenty",
-	"a":         "b",
-	"c":         "d",
-	"e":         "f",
-	"g":         "h",
-	"i":         "j",
-	"k":         "l",
-	"m":         "n",
-	"o":         "p",
-	"q":         "r",
-	"s":         "t",
-	"u":         "v",
-	"w":         "x",
-	"y":         "z",
-	"this":      "will",
-	"make":      "thirty",
-	"entries":   "yeah",
-}
-
-func BenchmarkSmallTextFormatter(b *testing.B) {
-	doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
-}
-
-func BenchmarkLargeTextFormatter(b *testing.B) {
-	doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
-}
-
-func BenchmarkSmallColoredTextFormatter(b *testing.B) {
-	doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
-}
-
-func BenchmarkLargeColoredTextFormatter(b *testing.B) {
-	doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
-}
-
-func BenchmarkSmallJSONFormatter(b *testing.B) {
-	doBenchmark(b, &JSONFormatter{}, smallFields)
-}
-
-func BenchmarkLargeJSONFormatter(b *testing.B) {
-	doBenchmark(b, &JSONFormatter{}, largeFields)
-}
-
-func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
-	entry := &Entry{
-		Time:    time.Time{},
-		Level:   InfoLevel,
-		Message: "message",
-		Data:    fields,
-	}
-	var d []byte
-	var err error
-	for i := 0; i < b.N; i++ {
-		d, err = formatter.Format(entry)
-		if err != nil {
-			b.Fatal(err)
-		}
-		b.SetBytes(int64(len(d)))
-	}
-}

+ 0 - 122
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go

@@ -1,122 +0,0 @@
-package logrus
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-)
-
-type TestHook struct {
-	Fired bool
-}
-
-func (hook *TestHook) Fire(entry *Entry) error {
-	hook.Fired = true
-	return nil
-}
-
-func (hook *TestHook) Levels() []Level {
-	return []Level{
-		DebugLevel,
-		InfoLevel,
-		WarnLevel,
-		ErrorLevel,
-		FatalLevel,
-		PanicLevel,
-	}
-}
-
-func TestHookFires(t *testing.T) {
-	hook := new(TestHook)
-
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Hooks.Add(hook)
-		assert.Equal(t, hook.Fired, false)
-
-		log.Print("test")
-	}, func(fields Fields) {
-		assert.Equal(t, hook.Fired, true)
-	})
-}
-
-type ModifyHook struct {
-}
-
-func (hook *ModifyHook) Fire(entry *Entry) error {
-	entry.Data["wow"] = "whale"
-	return nil
-}
-
-func (hook *ModifyHook) Levels() []Level {
-	return []Level{
-		DebugLevel,
-		InfoLevel,
-		WarnLevel,
-		ErrorLevel,
-		FatalLevel,
-		PanicLevel,
-	}
-}
-
-func TestHookCanModifyEntry(t *testing.T) {
-	hook := new(ModifyHook)
-
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Hooks.Add(hook)
-		log.WithField("wow", "elephant").Print("test")
-	}, func(fields Fields) {
-		assert.Equal(t, fields["wow"], "whale")
-	})
-}
-
-func TestCanFireMultipleHooks(t *testing.T) {
-	hook1 := new(ModifyHook)
-	hook2 := new(TestHook)
-
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Hooks.Add(hook1)
-		log.Hooks.Add(hook2)
-
-		log.WithField("wow", "elephant").Print("test")
-	}, func(fields Fields) {
-		assert.Equal(t, fields["wow"], "whale")
-		assert.Equal(t, hook2.Fired, true)
-	})
-}
-
-type ErrorHook struct {
-	Fired bool
-}
-
-func (hook *ErrorHook) Fire(entry *Entry) error {
-	hook.Fired = true
-	return nil
-}
-
-func (hook *ErrorHook) Levels() []Level {
-	return []Level{
-		ErrorLevel,
-	}
-}
-
-func TestErrorHookShouldntFireOnInfo(t *testing.T) {
-	hook := new(ErrorHook)
-
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Hooks.Add(hook)
-		log.Info("test")
-	}, func(fields Fields) {
-		assert.Equal(t, hook.Fired, false)
-	})
-}
-
-func TestErrorHookShouldFireOnError(t *testing.T) {
-	hook := new(ErrorHook)
-
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Hooks.Add(hook)
-		log.Error("test")
-	}, func(fields Fields) {
-		assert.Equal(t, hook.Fired, true)
-	})
-}

+ 3 - 3
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go

@@ -11,11 +11,11 @@ type Hook interface {
 }
 }
 
 
 // Internal type for storing the hooks on a logger instance.
 // Internal type for storing the hooks on a logger instance.
-type levelHooks map[Level][]Hook
+type LevelHooks map[Level][]Hook
 
 
 // Add a hook to an instance of logger. This is called with
 // Add a hook to an instance of logger. This is called with
 // `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
 // `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
-func (hooks levelHooks) Add(hook Hook) {
+func (hooks LevelHooks) Add(hook Hook) {
 	for _, level := range hook.Levels() {
 	for _, level := range hook.Levels() {
 		hooks[level] = append(hooks[level], hook)
 		hooks[level] = append(hooks[level], hook)
 	}
 	}
@@ -23,7 +23,7 @@ func (hooks levelHooks) Add(hook Hook) {
 
 
 // Fire all the hooks for the passed level. Used by `entry.log` to fire
 // Fire all the hooks for the passed level. Used by `entry.log` to fire
 // appropriate hooks for a log entry.
 // appropriate hooks for a log entry.
-func (hooks levelHooks) Fire(level Level, entry *Entry) error {
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
 	for _, hook := range hooks[level] {
 	for _, hook := range hooks[level] {
 		if err := hook.Fire(entry); err != nil {
 		if err := hook.Fire(entry); err != nil {
 			return err
 			return err

+ 0 - 54
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go

@@ -1,54 +0,0 @@
-package logrus_airbrake
-
-import (
-	"github.com/Sirupsen/logrus"
-	"github.com/tobi/airbrake-go"
-)
-
-// AirbrakeHook to send exceptions to an exception-tracking service compatible
-// with the Airbrake API. You must set:
-// * airbrake.Endpoint
-// * airbrake.ApiKey
-// * airbrake.Environment (only sends exceptions when set to "production")
-//
-// Before using this hook, to send an error. Entries that trigger an Error,
-// Fatal or Panic should now include an "error" field to send to Airbrake.
-type AirbrakeHook struct{}
-
-func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
-	if entry.Data["error"] == nil {
-		entry.Logger.WithFields(logrus.Fields{
-			"source":   "airbrake",
-			"endpoint": airbrake.Endpoint,
-		}).Warn("Exceptions sent to Airbrake must have an 'error' key with the error")
-		return nil
-	}
-
-	err, ok := entry.Data["error"].(error)
-	if !ok {
-		entry.Logger.WithFields(logrus.Fields{
-			"source":   "airbrake",
-			"endpoint": airbrake.Endpoint,
-		}).Warn("Exceptions sent to Airbrake must have an `error` key of type `error`")
-		return nil
-	}
-
-	airErr := airbrake.Notify(err)
-	if airErr != nil {
-		entry.Logger.WithFields(logrus.Fields{
-			"source":   "airbrake",
-			"endpoint": airbrake.Endpoint,
-			"error":    airErr,
-		}).Warn("Failed to send error to Airbrake")
-	}
-
-	return nil
-}
-
-func (hook *AirbrakeHook) Levels() []logrus.Level {
-	return []logrus.Level{
-		logrus.ErrorLevel,
-		logrus.FatalLevel,
-		logrus.PanicLevel,
-	}
-}

+ 0 - 28
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md

@@ -1,28 +0,0 @@
-# Papertrail Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
-
-[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts).
-
-In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible.
-
-## Usage
-
-You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`.
-
-For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs.
-
-```go
-import (
-  "log/syslog"
-  "github.com/Sirupsen/logrus"
-  "github.com/Sirupsen/logrus/hooks/papertrail"
-)
-
-func main() {
-  log       := logrus.New()
-  hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME)
-
-  if err == nil {
-    log.Hooks.Add(hook)
-  }
-}
-```

+ 0 - 55
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go

@@ -1,55 +0,0 @@
-package logrus_papertrail
-
-import (
-	"fmt"
-	"net"
-	"os"
-	"time"
-
-	"github.com/Sirupsen/logrus"
-)
-
-const (
-	format = "Jan 2 15:04:05"
-)
-
-// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
-type PapertrailHook struct {
-	Host    string
-	Port    int
-	AppName string
-	UDPConn net.Conn
-}
-
-// NewPapertrailHook creates a hook to be added to an instance of logger.
-func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
-	conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
-	return &PapertrailHook{host, port, appName, conn}, err
-}
-
-// Fire is called when a log event is fired.
-func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
-	date := time.Now().Format(format)
-	msg, _ := entry.String()
-	payload := fmt.Sprintf("<22> %s %s: %s", date, hook.AppName, msg)
-
-	bytesWritten, err := hook.UDPConn.Write([]byte(payload))
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
-		return err
-	}
-
-	return nil
-}
-
-// Levels returns the available logging levels.
-func (hook *PapertrailHook) Levels() []logrus.Level {
-	return []logrus.Level{
-		logrus.PanicLevel,
-		logrus.FatalLevel,
-		logrus.ErrorLevel,
-		logrus.WarnLevel,
-		logrus.InfoLevel,
-		logrus.DebugLevel,
-	}
-}

+ 0 - 26
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go

@@ -1,26 +0,0 @@
-package logrus_papertrail
-
-import (
-	"fmt"
-	"testing"
-
-	"github.com/Sirupsen/logrus"
-	"github.com/stvp/go-udp-testing"
-)
-
-func TestWritingToUDP(t *testing.T) {
-	port := 16661
-	udp.SetAddr(fmt.Sprintf(":%d", port))
-
-	hook, err := NewPapertrailHook("localhost", port, "test")
-	if err != nil {
-		t.Errorf("Unable to connect to local UDP server.")
-	}
-
-	log := logrus.New()
-	log.Hooks.Add(hook)
-
-	udp.ShouldReceive(t, "foo", func() {
-		log.Info("foo")
-	})
-}

+ 0 - 61
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md

@@ -1,61 +0,0 @@
-# Sentry Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
-
-[Sentry](https://getsentry.com) provides both self-hosted and hosted
-solutions for exception tracking.
-Both client and server are
-[open source](https://github.com/getsentry/sentry).
-
-## Usage
-
-Every sentry application defined on the server gets a different
-[DSN](https://www.getsentry.com/docs/). In the example below replace
-`YOUR_DSN` with the one created for your application.
-
-```go
-import (
-  "github.com/Sirupsen/logrus"
-  "github.com/Sirupsen/logrus/hooks/sentry"
-)
-
-func main() {
-  log       := logrus.New()
-  hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{
-    logrus.PanicLevel,
-    logrus.FatalLevel,
-    logrus.ErrorLevel,
-  })
-
-  if err == nil {
-    log.Hooks.Add(hook)
-  }
-}
-```
-
-## Special fields
-
-Some logrus fields have a special meaning in this hook,
-these are server_name and logger.
-When logs are sent to sentry these fields are treated differently.
-- server_name (also known as hostname) is the name of the server which
-is logging the event (hostname.example.com)
-- logger is the part of the application which is logging the event.
-In go this usually means setting it to the name of the package.
-
-## Timeout
-
-`Timeout` is the time the sentry hook will wait for a response
-from the sentry server.
-
-If this time elapses with no response from
-the server an error will be returned.
-
-If `Timeout` is set to 0 the SentryHook will not wait for a reply
-and will assume a correct delivery.
-
-The SentryHook has a default timeout of `100 milliseconds` when created
-with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field:
-
-```go
-hook, _ := logrus_sentry.NewSentryHook(...)
-hook.Timeout = 20*time.Seconds
-```

+ 0 - 100
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go

@@ -1,100 +0,0 @@
-package logrus_sentry
-
-import (
-	"fmt"
-	"time"
-
-	"github.com/Sirupsen/logrus"
-	"github.com/getsentry/raven-go"
-)
-
-var (
-	severityMap = map[logrus.Level]raven.Severity{
-		logrus.DebugLevel: raven.DEBUG,
-		logrus.InfoLevel:  raven.INFO,
-		logrus.WarnLevel:  raven.WARNING,
-		logrus.ErrorLevel: raven.ERROR,
-		logrus.FatalLevel: raven.FATAL,
-		logrus.PanicLevel: raven.FATAL,
-	}
-)
-
-func getAndDel(d logrus.Fields, key string) (string, bool) {
-	var (
-		ok  bool
-		v   interface{}
-		val string
-	)
-	if v, ok = d[key]; !ok {
-		return "", false
-	}
-
-	if val, ok = v.(string); !ok {
-		return "", false
-	}
-	delete(d, key)
-	return val, true
-}
-
-// SentryHook delivers logs to a sentry server.
-type SentryHook struct {
-	// Timeout sets the time to wait for a delivery error from the sentry server.
-	// If this is set to zero the server will not wait for any response and will
-	// consider the message correctly sent
-	Timeout time.Duration
-
-	client *raven.Client
-	levels []logrus.Level
-}
-
-// NewSentryHook creates a hook to be added to an instance of logger
-// and initializes the raven client.
-// This method sets the timeout to 100 milliseconds.
-func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
-	client, err := raven.NewClient(DSN, nil)
-	if err != nil {
-		return nil, err
-	}
-	return &SentryHook{100 * time.Millisecond, client, levels}, nil
-}
-
-// Called when an event should be sent to sentry
-// Special fields that sentry uses to give more information to the server
-// are extracted from entry.Data (if they are found)
-// These fields are: logger and server_name
-func (hook *SentryHook) Fire(entry *logrus.Entry) error {
-	packet := &raven.Packet{
-		Message:   entry.Message,
-		Timestamp: raven.Timestamp(entry.Time),
-		Level:     severityMap[entry.Level],
-		Platform:  "go",
-	}
-
-	d := entry.Data
-
-	if logger, ok := getAndDel(d, "logger"); ok {
-		packet.Logger = logger
-	}
-	if serverName, ok := getAndDel(d, "server_name"); ok {
-		packet.ServerName = serverName
-	}
-	packet.Extra = map[string]interface{}(d)
-
-	_, errCh := hook.client.Capture(packet, nil)
-	timeout := hook.Timeout
-	if timeout != 0 {
-		timeoutCh := time.After(timeout)
-		select {
-		case err := <-errCh:
-			return err
-		case <-timeoutCh:
-			return fmt.Errorf("no response from sentry server in %s", timeout)
-		}
-	}
-	return nil
-}
-
-// Levels returns the available logging levels.
-func (hook *SentryHook) Levels() []logrus.Level {
-	return hook.levels
-}

+ 0 - 97
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go

@@ -1,97 +0,0 @@
-package logrus_sentry
-
-import (
-	"encoding/json"
-	"fmt"
-	"io/ioutil"
-	"net/http"
-	"net/http/httptest"
-	"strings"
-	"testing"
-
-	"github.com/Sirupsen/logrus"
-	"github.com/getsentry/raven-go"
-)
-
-const (
-	message     = "error message"
-	server_name = "testserver.internal"
-	logger_name = "test.logger"
-)
-
-func getTestLogger() *logrus.Logger {
-	l := logrus.New()
-	l.Out = ioutil.Discard
-	return l
-}
-
-func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) {
-	pch := make(chan *raven.Packet, 1)
-	s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
-		defer req.Body.Close()
-		d := json.NewDecoder(req.Body)
-		p := &raven.Packet{}
-		err := d.Decode(p)
-		if err != nil {
-			t.Fatal(err.Error())
-		}
-
-		pch <- p
-	}))
-	defer s.Close()
-
-	fragments := strings.SplitN(s.URL, "://", 2)
-	dsn := fmt.Sprintf(
-		"%s://public:secret@%s/sentry/project-id",
-		fragments[0],
-		fragments[1],
-	)
-	tf(dsn, pch)
-}
-
-func TestSpecialFields(t *testing.T) {
-	WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
-		logger := getTestLogger()
-
-		hook, err := NewSentryHook(dsn, []logrus.Level{
-			logrus.ErrorLevel,
-		})
-
-		if err != nil {
-			t.Fatal(err.Error())
-		}
-		logger.Hooks.Add(hook)
-		logger.WithFields(logrus.Fields{
-			"server_name": server_name,
-			"logger":      logger_name,
-		}).Error(message)
-
-		packet := <-pch
-		if packet.Logger != logger_name {
-			t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger)
-		}
-
-		if packet.ServerName != server_name {
-			t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName)
-		}
-	})
-}
-
-func TestSentryHandler(t *testing.T) {
-	WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
-		logger := getTestLogger()
-		hook, err := NewSentryHook(dsn, []logrus.Level{
-			logrus.ErrorLevel,
-		})
-		if err != nil {
-			t.Fatal(err.Error())
-		}
-		logger.Hooks.Add(hook)
-
-		logger.Error(message)
-		packet := <-pch
-		if packet.Message != message {
-			t.Errorf("message should have been %s, was %s", message, packet.Message)
-		}
-	})
-}

+ 0 - 20
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md

@@ -1,20 +0,0 @@
-# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
-
-## Usage
-
-```go
-import (
-  "log/syslog"
-  "github.com/Sirupsen/logrus"
-  logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
-)
-
-func main() {
-  log       := logrus.New()
-  hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
-
-  if err == nil {
-    log.Hooks.Add(hook)
-  }
-}
-```

+ 0 - 59
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go

@@ -1,59 +0,0 @@
-package logrus_syslog
-
-import (
-	"fmt"
-	"github.com/Sirupsen/logrus"
-	"log/syslog"
-	"os"
-)
-
-// SyslogHook to send logs via syslog.
-type SyslogHook struct {
-	Writer        *syslog.Writer
-	SyslogNetwork string
-	SyslogRaddr   string
-}
-
-// Creates a hook to be added to an instance of logger. This is called with
-// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
-// `if err == nil { log.Hooks.Add(hook) }`
-func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
-	w, err := syslog.Dial(network, raddr, priority, tag)
-	return &SyslogHook{w, network, raddr}, err
-}
-
-func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
-	line, err := entry.String()
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
-		return err
-	}
-
-	switch entry.Level {
-	case logrus.PanicLevel:
-		return hook.Writer.Crit(line)
-	case logrus.FatalLevel:
-		return hook.Writer.Crit(line)
-	case logrus.ErrorLevel:
-		return hook.Writer.Err(line)
-	case logrus.WarnLevel:
-		return hook.Writer.Warning(line)
-	case logrus.InfoLevel:
-		return hook.Writer.Info(line)
-	case logrus.DebugLevel:
-		return hook.Writer.Debug(line)
-	default:
-		return nil
-	}
-}
-
-func (hook *SyslogHook) Levels() []logrus.Level {
-	return []logrus.Level{
-		logrus.PanicLevel,
-		logrus.FatalLevel,
-		logrus.ErrorLevel,
-		logrus.WarnLevel,
-		logrus.InfoLevel,
-		logrus.DebugLevel,
-	}
-}

+ 0 - 26
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go

@@ -1,26 +0,0 @@
-package logrus_syslog
-
-import (
-	"github.com/Sirupsen/logrus"
-	"log/syslog"
-	"testing"
-)
-
-func TestLocalhostAddAndPrint(t *testing.T) {
-	log := logrus.New()
-	hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
-
-	if err != nil {
-		t.Errorf("Unable to connect to local syslog.")
-	}
-
-	log.Hooks.Add(hook)
-
-	for _, level := range hook.Levels() {
-		if len(log.Hooks[level]) != 1 {
-			t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
-		}
-	}
-
-	log.Info("Congratulations!")
-}

+ 19 - 4
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go

@@ -3,18 +3,33 @@ package logrus
 import (
 import (
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
-	"time"
 )
 )
 
 
-type JSONFormatter struct{}
+type JSONFormatter struct {
+	// TimestampFormat sets the format used for marshaling timestamps.
+	TimestampFormat string
+}
 
 
 func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
 func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
 	data := make(Fields, len(entry.Data)+3)
 	data := make(Fields, len(entry.Data)+3)
 	for k, v := range entry.Data {
 	for k, v := range entry.Data {
-		data[k] = v
+		switch v := v.(type) {
+		case error:
+			// Otherwise errors are ignored by `encoding/json`
+			// https://github.com/Sirupsen/logrus/issues/137
+			data[k] = v.Error()
+		default:
+			data[k] = v
+		}
 	}
 	}
 	prefixFieldClashes(data)
 	prefixFieldClashes(data)
-	data["time"] = entry.Time.Format(time.RFC3339)
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+
+	data["time"] = entry.Time.Format(timestampFormat)
 	data["msg"] = entry.Message
 	data["msg"] = entry.Message
 	data["level"] = entry.Level.String()
 	data["level"] = entry.Level.String()
 
 

+ 78 - 27
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go

@@ -8,13 +8,13 @@ import (
 
 
 type Logger struct {
 type Logger struct {
 	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
 	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
-	// file, or leave it default which is `os.Stdout`. You can also set this to
+	// file, or leave it default which is `os.Stderr`. You can also set this to
 	// something more adventorous, such as logging to Kafka.
 	// something more adventorous, such as logging to Kafka.
 	Out io.Writer
 	Out io.Writer
 	// Hooks for the logger instance. These allow firing events based on logging
 	// Hooks for the logger instance. These allow firing events based on logging
 	// levels and log entries. For example, to send errors to an error tracking
 	// levels and log entries. For example, to send errors to an error tracking
 	// service, log to StatsD or dump the core on fatal errors.
 	// service, log to StatsD or dump the core on fatal errors.
-	Hooks levelHooks
+	Hooks LevelHooks
 	// All log entries pass through the formatter before logged to Out. The
 	// All log entries pass through the formatter before logged to Out. The
 	// included formatters are `TextFormatter` and `JSONFormatter` for which
 	// included formatters are `TextFormatter` and `JSONFormatter` for which
 	// TextFormatter is the default. In development (when a TTY is attached) it
 	// TextFormatter is the default. In development (when a TTY is attached) it
@@ -37,23 +37,23 @@ type Logger struct {
 //    var log = &Logger{
 //    var log = &Logger{
 //      Out: os.Stderr,
 //      Out: os.Stderr,
 //      Formatter: new(JSONFormatter),
 //      Formatter: new(JSONFormatter),
-//      Hooks: make(levelHooks),
+//      Hooks: make(LevelHooks),
 //      Level: logrus.DebugLevel,
 //      Level: logrus.DebugLevel,
 //    }
 //    }
 //
 //
 // It's recommended to make this a global instance called `log`.
 // It's recommended to make this a global instance called `log`.
 func New() *Logger {
 func New() *Logger {
 	return &Logger{
 	return &Logger{
-		Out:       os.Stdout,
+		Out:       os.Stderr,
 		Formatter: new(TextFormatter),
 		Formatter: new(TextFormatter),
-		Hooks:     make(levelHooks),
+		Hooks:     make(LevelHooks),
 		Level:     InfoLevel,
 		Level:     InfoLevel,
 	}
 	}
 }
 }
 
 
 // Adds a field to the log entry, note that you it doesn't log until you call
 // Adds a field to the log entry, note that you it doesn't log until you call
 // Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
 // Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
-// Ff you want multiple fields, use `WithFields`.
+// If you want multiple fields, use `WithFields`.
 func (logger *Logger) WithField(key string, value interface{}) *Entry {
 func (logger *Logger) WithField(key string, value interface{}) *Entry {
 	return NewEntry(logger).WithField(key, value)
 	return NewEntry(logger).WithField(key, value)
 }
 }
@@ -64,12 +64,22 @@ func (logger *Logger) WithFields(fields Fields) *Entry {
 	return NewEntry(logger).WithFields(fields)
 	return NewEntry(logger).WithFields(fields)
 }
 }
 
 
+// Add an error as single field to the log entry.  All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+	return NewEntry(logger).WithError(err)
+}
+
 func (logger *Logger) Debugf(format string, args ...interface{}) {
 func (logger *Logger) Debugf(format string, args ...interface{}) {
-	NewEntry(logger).Debugf(format, args...)
+	if logger.Level >= DebugLevel {
+		NewEntry(logger).Debugf(format, args...)
+	}
 }
 }
 
 
 func (logger *Logger) Infof(format string, args ...interface{}) {
 func (logger *Logger) Infof(format string, args ...interface{}) {
-	NewEntry(logger).Infof(format, args...)
+	if logger.Level >= InfoLevel {
+		NewEntry(logger).Infof(format, args...)
+	}
 }
 }
 
 
 func (logger *Logger) Printf(format string, args ...interface{}) {
 func (logger *Logger) Printf(format string, args ...interface{}) {
@@ -77,31 +87,46 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
 }
 }
 
 
 func (logger *Logger) Warnf(format string, args ...interface{}) {
 func (logger *Logger) Warnf(format string, args ...interface{}) {
-	NewEntry(logger).Warnf(format, args...)
+	if logger.Level >= WarnLevel {
+		NewEntry(logger).Warnf(format, args...)
+	}
 }
 }
 
 
 func (logger *Logger) Warningf(format string, args ...interface{}) {
 func (logger *Logger) Warningf(format string, args ...interface{}) {
-	NewEntry(logger).Warnf(format, args...)
+	if logger.Level >= WarnLevel {
+		NewEntry(logger).Warnf(format, args...)
+	}
 }
 }
 
 
 func (logger *Logger) Errorf(format string, args ...interface{}) {
 func (logger *Logger) Errorf(format string, args ...interface{}) {
-	NewEntry(logger).Errorf(format, args...)
+	if logger.Level >= ErrorLevel {
+		NewEntry(logger).Errorf(format, args...)
+	}
 }
 }
 
 
 func (logger *Logger) Fatalf(format string, args ...interface{}) {
 func (logger *Logger) Fatalf(format string, args ...interface{}) {
-	NewEntry(logger).Fatalf(format, args...)
+	if logger.Level >= FatalLevel {
+		NewEntry(logger).Fatalf(format, args...)
+	}
+	os.Exit(1)
 }
 }
 
 
 func (logger *Logger) Panicf(format string, args ...interface{}) {
 func (logger *Logger) Panicf(format string, args ...interface{}) {
-	NewEntry(logger).Panicf(format, args...)
+	if logger.Level >= PanicLevel {
+		NewEntry(logger).Panicf(format, args...)
+	}
 }
 }
 
 
 func (logger *Logger) Debug(args ...interface{}) {
 func (logger *Logger) Debug(args ...interface{}) {
-	NewEntry(logger).Debug(args...)
+	if logger.Level >= DebugLevel {
+		NewEntry(logger).Debug(args...)
+	}
 }
 }
 
 
 func (logger *Logger) Info(args ...interface{}) {
 func (logger *Logger) Info(args ...interface{}) {
-	NewEntry(logger).Info(args...)
+	if logger.Level >= InfoLevel {
+		NewEntry(logger).Info(args...)
+	}
 }
 }
 
 
 func (logger *Logger) Print(args ...interface{}) {
 func (logger *Logger) Print(args ...interface{}) {
@@ -109,31 +134,46 @@ func (logger *Logger) Print(args ...interface{}) {
 }
 }
 
 
 func (logger *Logger) Warn(args ...interface{}) {
 func (logger *Logger) Warn(args ...interface{}) {
-	NewEntry(logger).Warn(args...)
+	if logger.Level >= WarnLevel {
+		NewEntry(logger).Warn(args...)
+	}
 }
 }
 
 
 func (logger *Logger) Warning(args ...interface{}) {
 func (logger *Logger) Warning(args ...interface{}) {
-	NewEntry(logger).Warn(args...)
+	if logger.Level >= WarnLevel {
+		NewEntry(logger).Warn(args...)
+	}
 }
 }
 
 
 func (logger *Logger) Error(args ...interface{}) {
 func (logger *Logger) Error(args ...interface{}) {
-	NewEntry(logger).Error(args...)
+	if logger.Level >= ErrorLevel {
+		NewEntry(logger).Error(args...)
+	}
 }
 }
 
 
 func (logger *Logger) Fatal(args ...interface{}) {
 func (logger *Logger) Fatal(args ...interface{}) {
-	NewEntry(logger).Fatal(args...)
+	if logger.Level >= FatalLevel {
+		NewEntry(logger).Fatal(args...)
+	}
+	os.Exit(1)
 }
 }
 
 
 func (logger *Logger) Panic(args ...interface{}) {
 func (logger *Logger) Panic(args ...interface{}) {
-	NewEntry(logger).Panic(args...)
+	if logger.Level >= PanicLevel {
+		NewEntry(logger).Panic(args...)
+	}
 }
 }
 
 
 func (logger *Logger) Debugln(args ...interface{}) {
 func (logger *Logger) Debugln(args ...interface{}) {
-	NewEntry(logger).Debugln(args...)
+	if logger.Level >= DebugLevel {
+		NewEntry(logger).Debugln(args...)
+	}
 }
 }
 
 
 func (logger *Logger) Infoln(args ...interface{}) {
 func (logger *Logger) Infoln(args ...interface{}) {
-	NewEntry(logger).Infoln(args...)
+	if logger.Level >= InfoLevel {
+		NewEntry(logger).Infoln(args...)
+	}
 }
 }
 
 
 func (logger *Logger) Println(args ...interface{}) {
 func (logger *Logger) Println(args ...interface{}) {
@@ -141,21 +181,32 @@ func (logger *Logger) Println(args ...interface{}) {
 }
 }
 
 
 func (logger *Logger) Warnln(args ...interface{}) {
 func (logger *Logger) Warnln(args ...interface{}) {
-	NewEntry(logger).Warnln(args...)
+	if logger.Level >= WarnLevel {
+		NewEntry(logger).Warnln(args...)
+	}
 }
 }
 
 
 func (logger *Logger) Warningln(args ...interface{}) {
 func (logger *Logger) Warningln(args ...interface{}) {
-	NewEntry(logger).Warnln(args...)
+	if logger.Level >= WarnLevel {
+		NewEntry(logger).Warnln(args...)
+	}
 }
 }
 
 
 func (logger *Logger) Errorln(args ...interface{}) {
 func (logger *Logger) Errorln(args ...interface{}) {
-	NewEntry(logger).Errorln(args...)
+	if logger.Level >= ErrorLevel {
+		NewEntry(logger).Errorln(args...)
+	}
 }
 }
 
 
 func (logger *Logger) Fatalln(args ...interface{}) {
 func (logger *Logger) Fatalln(args ...interface{}) {
-	NewEntry(logger).Fatalln(args...)
+	if logger.Level >= FatalLevel {
+		NewEntry(logger).Fatalln(args...)
+	}
+	os.Exit(1)
 }
 }
 
 
 func (logger *Logger) Panicln(args ...interface{}) {
 func (logger *Logger) Panicln(args ...interface{}) {
-	NewEntry(logger).Panicln(args...)
+	if logger.Level >= PanicLevel {
+		NewEntry(logger).Panicln(args...)
+	}
 }
 }

+ 51 - 2
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go

@@ -3,6 +3,7 @@ package logrus
 import (
 import (
 	"fmt"
 	"fmt"
 	"log"
 	"log"
+	"strings"
 )
 )
 
 
 // Fields type, used to pass to `WithFields`.
 // Fields type, used to pass to `WithFields`.
@@ -33,7 +34,7 @@ func (level Level) String() string {
 
 
 // ParseLevel takes a string level and returns the Logrus log level constant.
 // ParseLevel takes a string level and returns the Logrus log level constant.
 func ParseLevel(lvl string) (Level, error) {
 func ParseLevel(lvl string) (Level, error) {
-	switch lvl {
+	switch strings.ToLower(lvl) {
 	case "panic":
 	case "panic":
 		return PanicLevel, nil
 		return PanicLevel, nil
 	case "fatal":
 	case "fatal":
@@ -52,6 +53,16 @@ func ParseLevel(lvl string) (Level, error) {
 	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
 	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
 }
 }
 
 
+// A constant exposing all logging levels
+var AllLevels = []Level{
+	PanicLevel,
+	FatalLevel,
+	ErrorLevel,
+	WarnLevel,
+	InfoLevel,
+	DebugLevel,
+}
+
 // These are the different logging levels. You can set the logging level to log
 // These are the different logging levels. You can set the logging level to log
 // on your instance of logger, obtained with `logrus.New()`.
 // on your instance of logger, obtained with `logrus.New()`.
 const (
 const (
@@ -74,7 +85,11 @@ const (
 )
 )
 
 
 // Won't compile if StdLogger can't be realized by a log.Logger
 // Won't compile if StdLogger can't be realized by a log.Logger
-var _ StdLogger = &log.Logger{}
+var (
+	_ StdLogger = &log.Logger{}
+	_ StdLogger = &Entry{}
+	_ StdLogger = &Logger{}
+)
 
 
 // StdLogger is what your logrus-enabled library should take, that way
 // StdLogger is what your logrus-enabled library should take, that way
 // it'll accept a stdlib logger and a logrus logger. There's no standard
 // it'll accept a stdlib logger and a logrus logger. There's no standard
@@ -92,3 +107,37 @@ type StdLogger interface {
 	Panicf(string, ...interface{})
 	Panicf(string, ...interface{})
 	Panicln(...interface{})
 	Panicln(...interface{})
 }
 }
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+	WithField(key string, value interface{}) *Entry
+	WithFields(fields Fields) *Entry
+	WithError(err error) *Entry
+
+	Debugf(format string, args ...interface{})
+	Infof(format string, args ...interface{})
+	Printf(format string, args ...interface{})
+	Warnf(format string, args ...interface{})
+	Warningf(format string, args ...interface{})
+	Errorf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Panicf(format string, args ...interface{})
+
+	Debug(args ...interface{})
+	Info(args ...interface{})
+	Print(args ...interface{})
+	Warn(args ...interface{})
+	Warning(args ...interface{})
+	Error(args ...interface{})
+	Fatal(args ...interface{})
+	Panic(args ...interface{})
+
+	Debugln(args ...interface{})
+	Infoln(args ...interface{})
+	Println(args ...interface{})
+	Warnln(args ...interface{})
+	Warningln(args ...interface{})
+	Errorln(args ...interface{})
+	Fatalln(args ...interface{})
+	Panicln(args ...interface{})
+}

+ 0 - 283
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go

@@ -1,283 +0,0 @@
-package logrus
-
-import (
-	"bytes"
-	"encoding/json"
-	"strconv"
-	"strings"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
-	var buffer bytes.Buffer
-	var fields Fields
-
-	logger := New()
-	logger.Out = &buffer
-	logger.Formatter = new(JSONFormatter)
-
-	log(logger)
-
-	err := json.Unmarshal(buffer.Bytes(), &fields)
-	assert.Nil(t, err)
-
-	assertions(fields)
-}
-
-func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
-	var buffer bytes.Buffer
-
-	logger := New()
-	logger.Out = &buffer
-	logger.Formatter = &TextFormatter{
-		DisableColors: true,
-	}
-
-	log(logger)
-
-	fields := make(map[string]string)
-	for _, kv := range strings.Split(buffer.String(), " ") {
-		if !strings.Contains(kv, "=") {
-			continue
-		}
-		kvArr := strings.Split(kv, "=")
-		key := strings.TrimSpace(kvArr[0])
-		val := kvArr[1]
-		if kvArr[1][0] == '"' {
-			var err error
-			val, err = strconv.Unquote(val)
-			assert.NoError(t, err)
-		}
-		fields[key] = val
-	}
-	assertions(fields)
-}
-
-func TestPrint(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Print("test")
-	}, func(fields Fields) {
-		assert.Equal(t, fields["msg"], "test")
-		assert.Equal(t, fields["level"], "info")
-	})
-}
-
-func TestInfo(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Info("test")
-	}, func(fields Fields) {
-		assert.Equal(t, fields["msg"], "test")
-		assert.Equal(t, fields["level"], "info")
-	})
-}
-
-func TestWarn(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Warn("test")
-	}, func(fields Fields) {
-		assert.Equal(t, fields["msg"], "test")
-		assert.Equal(t, fields["level"], "warning")
-	})
-}
-
-func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Infoln("test", "test")
-	}, func(fields Fields) {
-		assert.Equal(t, fields["msg"], "test test")
-	})
-}
-
-func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Infoln("test", 10)
-	}, func(fields Fields) {
-		assert.Equal(t, fields["msg"], "test 10")
-	})
-}
-
-func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Infoln(10, 10)
-	}, func(fields Fields) {
-		assert.Equal(t, fields["msg"], "10 10")
-	})
-}
-
-func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Infoln(10, 10)
-	}, func(fields Fields) {
-		assert.Equal(t, fields["msg"], "10 10")
-	})
-}
-
-func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Info("test", 10)
-	}, func(fields Fields) {
-		assert.Equal(t, fields["msg"], "test10")
-	})
-}
-
-func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.Info("test", "test")
-	}, func(fields Fields) {
-		assert.Equal(t, fields["msg"], "testtest")
-	})
-}
-
-func TestWithFieldsShouldAllowAssignments(t *testing.T) {
-	var buffer bytes.Buffer
-	var fields Fields
-
-	logger := New()
-	logger.Out = &buffer
-	logger.Formatter = new(JSONFormatter)
-
-	localLog := logger.WithFields(Fields{
-		"key1": "value1",
-	})
-
-	localLog.WithField("key2", "value2").Info("test")
-	err := json.Unmarshal(buffer.Bytes(), &fields)
-	assert.Nil(t, err)
-
-	assert.Equal(t, "value2", fields["key2"])
-	assert.Equal(t, "value1", fields["key1"])
-
-	buffer = bytes.Buffer{}
-	fields = Fields{}
-	localLog.Info("test")
-	err = json.Unmarshal(buffer.Bytes(), &fields)
-	assert.Nil(t, err)
-
-	_, ok := fields["key2"]
-	assert.Equal(t, false, ok)
-	assert.Equal(t, "value1", fields["key1"])
-}
-
-func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.WithField("msg", "hello").Info("test")
-	}, func(fields Fields) {
-		assert.Equal(t, fields["msg"], "test")
-	})
-}
-
-func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.WithField("msg", "hello").Info("test")
-	}, func(fields Fields) {
-		assert.Equal(t, fields["msg"], "test")
-		assert.Equal(t, fields["fields.msg"], "hello")
-	})
-}
-
-func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.WithField("time", "hello").Info("test")
-	}, func(fields Fields) {
-		assert.Equal(t, fields["fields.time"], "hello")
-	})
-}
-
-func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
-	LogAndAssertJSON(t, func(log *Logger) {
-		log.WithField("level", 1).Info("test")
-	}, func(fields Fields) {
-		assert.Equal(t, fields["level"], "info")
-		assert.Equal(t, fields["fields.level"], 1)
-	})
-}
-
-func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
-	LogAndAssertText(t, func(log *Logger) {
-		ll := log.WithField("herp", "derp")
-		ll.Info("hello")
-		ll.Info("bye")
-	}, func(fields map[string]string) {
-		for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
-			if _, ok := fields[fieldName]; ok {
-				t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
-			}
-		}
-	})
-}
-
-func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
-
-	var buffer bytes.Buffer
-	var fields Fields
-
-	logger := New()
-	logger.Out = &buffer
-	logger.Formatter = new(JSONFormatter)
-
-	llog := logger.WithField("context", "eating raw fish")
-
-	llog.Info("looks delicious")
-
-	err := json.Unmarshal(buffer.Bytes(), &fields)
-	assert.NoError(t, err, "should have decoded first message")
-	assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
-	assert.Equal(t, fields["msg"], "looks delicious")
-	assert.Equal(t, fields["context"], "eating raw fish")
-
-	buffer.Reset()
-
-	llog.Warn("omg it is!")
-
-	err = json.Unmarshal(buffer.Bytes(), &fields)
-	assert.NoError(t, err, "should have decoded second message")
-	assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
-	assert.Equal(t, fields["msg"], "omg it is!")
-	assert.Equal(t, fields["context"], "eating raw fish")
-	assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
-
-}
-
-func TestConvertLevelToString(t *testing.T) {
-	assert.Equal(t, "debug", DebugLevel.String())
-	assert.Equal(t, "info", InfoLevel.String())
-	assert.Equal(t, "warning", WarnLevel.String())
-	assert.Equal(t, "error", ErrorLevel.String())
-	assert.Equal(t, "fatal", FatalLevel.String())
-	assert.Equal(t, "panic", PanicLevel.String())
-}
-
-func TestParseLevel(t *testing.T) {
-	l, err := ParseLevel("panic")
-	assert.Nil(t, err)
-	assert.Equal(t, PanicLevel, l)
-
-	l, err = ParseLevel("fatal")
-	assert.Nil(t, err)
-	assert.Equal(t, FatalLevel, l)
-
-	l, err = ParseLevel("error")
-	assert.Nil(t, err)
-	assert.Equal(t, ErrorLevel, l)
-
-	l, err = ParseLevel("warn")
-	assert.Nil(t, err)
-	assert.Equal(t, WarnLevel, l)
-
-	l, err = ParseLevel("warning")
-	assert.Nil(t, err)
-	assert.Equal(t, WarnLevel, l)
-
-	l, err = ParseLevel("info")
-	assert.Nil(t, err)
-	assert.Equal(t, InfoLevel, l)
-
-	l, err = ParseLevel("debug")
-	assert.Nil(t, err)
-	assert.Equal(t, DebugLevel, l)
-
-	l, err = ParseLevel("invalid")
-	assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
-}

+ 1 - 0
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_openbsd.go → libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go

@@ -1,3 +1,4 @@
+// +build darwin freebsd openbsd netbsd dragonfly
 
 
 package logrus
 package logrus
 
 

+ 0 - 20
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go

@@ -1,20 +0,0 @@
-/*
-  Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
-*/
-package logrus
-
-import (
-	"syscall"
-)
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-type Termios struct {
-	Iflag  uint32
-	Oflag  uint32
-	Cflag  uint32
-	Lflag  uint32
-	Cc     [20]uint8
-	Ispeed uint32
-	Ospeed uint32
-}

+ 3 - 3
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go

@@ -3,7 +3,7 @@
 // Use of this source code is governed by a BSD-style
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 // license that can be found in the LICENSE file.
 
 
-// +build linux,!appengine darwin freebsd openbsd
+// +build linux darwin freebsd openbsd netbsd dragonfly
 
 
 package logrus
 package logrus
 
 
@@ -12,9 +12,9 @@ import (
 	"unsafe"
 	"unsafe"
 )
 )
 
 
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns true if stderr's file descriptor is a terminal.
 func IsTerminal() bool {
 func IsTerminal() bool {
-	fd := syscall.Stdout
+	fd := syscall.Stderr
 	var termios Termios
 	var termios Termios
 	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
 	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
 	return err == 0
 	return err == 0

+ 15 - 0
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_solaris.go

@@ -0,0 +1,15 @@
+// +build solaris
+
+package logrus
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+	_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+	return err == nil
+}

+ 2 - 2
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go

@@ -18,9 +18,9 @@ var (
 	procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
 	procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
 )
 )
 
 
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns true if stderr's file descriptor is a terminal.
 func IsTerminal() bool {
 func IsTerminal() bool {
-	fd := syscall.Stdout
+	fd := syscall.Stderr
 	var st uint32
 	var st uint32
 	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
 	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
 	return r != 0 && e == 0
 	return r != 0 && e == 0

+ 62 - 25
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go

@@ -3,7 +3,7 @@ package logrus
 import (
 import (
 	"bytes"
 	"bytes"
 	"fmt"
 	"fmt"
-	"regexp"
+	"runtime"
 	"sort"
 	"sort"
 	"strings"
 	"strings"
 	"time"
 	"time"
@@ -15,12 +15,12 @@ const (
 	green   = 32
 	green   = 32
 	yellow  = 33
 	yellow  = 33
 	blue    = 34
 	blue    = 34
+	gray    = 37
 )
 )
 
 
 var (
 var (
 	baseTimestamp time.Time
 	baseTimestamp time.Time
 	isTerminal    bool
 	isTerminal    bool
-	noQuoteNeeded *regexp.Regexp
 )
 )
 
 
 func init() {
 func init() {
@@ -34,35 +34,59 @@ func miniTS() int {
 
 
 type TextFormatter struct {
 type TextFormatter struct {
 	// Set to true to bypass checking for a TTY before outputting colors.
 	// Set to true to bypass checking for a TTY before outputting colors.
-	ForceColors   bool
+	ForceColors bool
+
+	// Force disabling colors.
 	DisableColors bool
 	DisableColors bool
-	// Set to true to disable timestamp logging (useful when the output
-	// is redirected to a logging system already adding a timestamp)
+
+	// Disable timestamp logging. useful when output is redirected to logging
+	// system that already adds timestamps.
 	DisableTimestamp bool
 	DisableTimestamp bool
+
+	// Enable logging the full timestamp when a TTY is attached instead of just
+	// the time passed since beginning of execution.
+	FullTimestamp bool
+
+	// TimestampFormat to use for display when a full timestamp is printed
+	TimestampFormat string
+
+	// The fields are sorted by default for a consistent output. For applications
+	// that log extremely frequently and don't use the JSON formatter this may not
+	// be desired.
+	DisableSorting bool
 }
 }
 
 
 func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
 func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
-
-	var keys []string
+	var keys []string = make([]string, 0, len(entry.Data))
 	for k := range entry.Data {
 	for k := range entry.Data {
 		keys = append(keys, k)
 		keys = append(keys, k)
 	}
 	}
-	sort.Strings(keys)
+
+	if !f.DisableSorting {
+		sort.Strings(keys)
+	}
 
 
 	b := &bytes.Buffer{}
 	b := &bytes.Buffer{}
 
 
 	prefixFieldClashes(entry.Data)
 	prefixFieldClashes(entry.Data)
 
 
-	isColored := (f.ForceColors || isTerminal) && !f.DisableColors
+	isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+	isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
 
 
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
 	if isColored {
 	if isColored {
-		printColored(b, entry, keys)
+		f.printColored(b, entry, keys, timestampFormat)
 	} else {
 	} else {
 		if !f.DisableTimestamp {
 		if !f.DisableTimestamp {
-			f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
+			f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
 		}
 		}
 		f.appendKeyValue(b, "level", entry.Level.String())
 		f.appendKeyValue(b, "level", entry.Level.String())
-		f.appendKeyValue(b, "msg", entry.Message)
+		if entry.Message != "" {
+			f.appendKeyValue(b, "msg", entry.Message)
+		}
 		for _, key := range keys {
 		for _, key := range keys {
 			f.appendKeyValue(b, key, entry.Data[key])
 			f.appendKeyValue(b, key, entry.Data[key])
 		}
 		}
@@ -72,9 +96,11 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
 	return b.Bytes(), nil
 	return b.Bytes(), nil
 }
 }
 
 
-func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
 	var levelColor int
 	var levelColor int
 	switch entry.Level {
 	switch entry.Level {
+	case DebugLevel:
+		levelColor = gray
 	case WarnLevel:
 	case WarnLevel:
 		levelColor = yellow
 		levelColor = yellow
 	case ErrorLevel, FatalLevel, PanicLevel:
 	case ErrorLevel, FatalLevel, PanicLevel:
@@ -85,10 +111,14 @@ func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
 
 
 	levelText := strings.ToUpper(entry.Level.String())[0:4]
 	levelText := strings.ToUpper(entry.Level.String())[0:4]
 
 
-	fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+	if !f.FullTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+	} else {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+	}
 	for _, k := range keys {
 	for _, k := range keys {
 		v := entry.Data[k]
 		v := entry.Data[k]
-		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
+		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
 	}
 	}
 }
 }
 
 
@@ -96,7 +126,7 @@ func needsQuoting(text string) bool {
 	for _, ch := range text {
 	for _, ch := range text {
 		if !((ch >= 'a' && ch <= 'z') ||
 		if !((ch >= 'a' && ch <= 'z') ||
 			(ch >= 'A' && ch <= 'Z') ||
 			(ch >= 'A' && ch <= 'Z') ||
-			(ch >= '0' && ch < '9') ||
+			(ch >= '0' && ch <= '9') ||
 			ch == '-' || ch == '.') {
 			ch == '-' || ch == '.') {
 			return false
 			return false
 		}
 		}
@@ -104,21 +134,28 @@ func needsQuoting(text string) bool {
 	return true
 	return true
 }
 }
 
 
-func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
-	switch value.(type) {
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+	b.WriteString(key)
+	b.WriteByte('=')
+
+	switch value := value.(type) {
 	case string:
 	case string:
-		if needsQuoting(value.(string)) {
-			fmt.Fprintf(b, "%v=%s ", key, value)
+		if needsQuoting(value) {
+			b.WriteString(value)
 		} else {
 		} else {
-			fmt.Fprintf(b, "%v=%q ", key, value)
+			fmt.Fprintf(b, "%q", value)
 		}
 		}
 	case error:
 	case error:
-		if needsQuoting(value.(error).Error()) {
-			fmt.Fprintf(b, "%v=%s ", key, value)
+		errmsg := value.Error()
+		if needsQuoting(errmsg) {
+			b.WriteString(errmsg)
 		} else {
 		} else {
-			fmt.Fprintf(b, "%v=%q ", key, value)
+			fmt.Fprintf(b, "%q", value)
 		}
 		}
 	default:
 	default:
-		fmt.Fprintf(b, "%v=%v ", key, value)
+		fmt.Fprint(b, value)
 	}
 	}
+
+	b.WriteByte(' ')
 }
 }

+ 0 - 33
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go

@@ -1,33 +0,0 @@
-package logrus
-
-import (
-	"bytes"
-	"errors"
-
-	"testing"
-)
-
-func TestQuoting(t *testing.T) {
-	tf := &TextFormatter{DisableColors: true}
-
-	checkQuoting := func(q bool, value interface{}) {
-		b, _ := tf.Format(WithField("test", value))
-		idx := bytes.Index(b, ([]byte)("test="))
-		cont := bytes.Contains(b[idx+5:], []byte{'"'})
-		if cont != q {
-			if q {
-				t.Errorf("quoting expected for: %#v", value)
-			} else {
-				t.Errorf("quoting not expected for: %#v", value)
-			}
-		}
-	}
-
-	checkQuoting(false, "abcd")
-	checkQuoting(false, "v1.0")
-	checkQuoting(true, "/foobar")
-	checkQuoting(true, "x y")
-	checkQuoting(true, "x,y")
-	checkQuoting(false, errors.New("invalid"))
-	checkQuoting(true, errors.New("invalid argument"))
-}

+ 1 - 1
libnetwork/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go

@@ -6,7 +6,7 @@ import (
 	"runtime"
 	"runtime"
 )
 )
 
 
-func (logger *Logger) Writer() (*io.PipeWriter) {
+func (logger *Logger) Writer() *io.PipeWriter {
 	reader, writer := io.Pipe()
 	reader, writer := io.Pipe()
 
 
 	go logger.writerScanner(reader)
 	go logger.writerScanner(reader)

+ 22 - 0
libnetwork/Godeps/_workspace/src/github.com/armon/go-radix/.gitignore

@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe

+ 3 - 0
libnetwork/Godeps/_workspace/src/github.com/armon/go-radix/.travis.yml

@@ -0,0 +1,3 @@
+language: go
+go:
+  - tip

+ 20 - 0
libnetwork/Godeps/_workspace/src/github.com/armon/go-radix/LICENSE

@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Armon Dadgar
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 36 - 0
libnetwork/Godeps/_workspace/src/github.com/armon/go-radix/README.md

@@ -0,0 +1,36 @@
+go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix)
+=========
+
+Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
+The package only provides a single `Tree` implementation, optimized for sparse nodes.
+
+As a radix tree, it provides the following:
+ * O(k) operations. In many cases, this can be faster than a hash table since
+   the hash function is an O(k) operation, and hash tables have very poor cache locality.
+ * Minimum / Maximum value lookups
+ * Ordered iteration
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix).
+
+Example
+=======
+
+Below is a simple example of usage
+
+```go
+// Create a tree
+r := radix.New()
+r.Insert("foo", 1)
+r.Insert("bar", 2)
+r.Insert("foobar", 2)
+
+// Find the longest prefix match
+m, _, _ := r.LongestPrefix("foozip")
+if m != "foo" {
+    panic("should be foo")
+}
+```
+

+ 467 - 0
libnetwork/Godeps/_workspace/src/github.com/armon/go-radix/radix.go

@@ -0,0 +1,467 @@
+package radix
+
+import (
+	"sort"
+	"strings"
+)
+
+// WalkFn is used when walking the tree. Takes a
+// key and value, returning if iteration should
+// be terminated.
+type WalkFn func(s string, v interface{}) bool
+
+// leafNode is used to represent a value
+type leafNode struct {
+	key string
+	val interface{}
+}
+
+// edge is used to represent an edge node
+type edge struct {
+	label byte
+	node  *node
+}
+
+type node struct {
+	// leaf is used to store possible leaf
+	leaf *leafNode
+
+	// prefix is the common prefix we ignore
+	prefix string
+
+	// Edges should be stored in-order for iteration.
+	// We avoid a fully materialized slice to save memory,
+	// since in most cases we expect to be sparse
+	edges edges
+}
+
+func (n *node) isLeaf() bool {
+	return n.leaf != nil
+}
+
+func (n *node) addEdge(e edge) {
+	n.edges = append(n.edges, e)
+	n.edges.Sort()
+}
+
+func (n *node) replaceEdge(e edge) {
+	num := len(n.edges)
+	idx := sort.Search(num, func(i int) bool {
+		return n.edges[i].label >= e.label
+	})
+	if idx < num && n.edges[idx].label == e.label {
+		n.edges[idx].node = e.node
+		return
+	}
+	panic("replacing missing edge")
+}
+
+func (n *node) getEdge(label byte) *node {
+	num := len(n.edges)
+	idx := sort.Search(num, func(i int) bool {
+		return n.edges[i].label >= label
+	})
+	if idx < num && n.edges[idx].label == label {
+		return n.edges[idx].node
+	}
+	return nil
+}
+
+type edges []edge
+
+func (e edges) Len() int {
+	return len(e)
+}
+
+func (e edges) Less(i, j int) bool {
+	return e[i].label < e[j].label
+}
+
+func (e edges) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e edges) Sort() {
+	sort.Sort(e)
+}
+
+// Tree implements a radix tree. This can be treated as a
+// Dictionary abstract data type. The main advantage over
+// a standard hash map is prefix-based lookups and
+// ordered iteration,
+type Tree struct {
+	root *node
+	size int
+}
+
+// New returns an empty Tree
+func New() *Tree {
+	return NewFromMap(nil)
+}
+
+// NewFromMap returns a new tree containing the keys
+// from an existing map
+func NewFromMap(m map[string]interface{}) *Tree {
+	t := &Tree{root: &node{}}
+	for k, v := range m {
+		t.Insert(k, v)
+	}
+	return t
+}
+
+// Len is used to return the number of elements in the tree
+func (t *Tree) Len() int {
+	return t.size
+}
+
+// longestPrefix finds the length of the shared prefix
+// of two strings
+func longestPrefix(k1, k2 string) int {
+	max := len(k1)
+	if l := len(k2); l < max {
+		max = l
+	}
+	var i int
+	for i = 0; i < max; i++ {
+		if k1[i] != k2[i] {
+			break
+		}
+	}
+	return i
+}
+
+// Insert is used to add a newentry or update
+// an existing entry. Returns if updated.
+func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) {
+	var parent *node
+	n := t.root
+	search := s
+	for {
+		// Handle key exhaution
+		if len(search) == 0 {
+			if n.isLeaf() {
+				old := n.leaf.val
+				n.leaf.val = v
+				return old, true
+			} else {
+				n.leaf = &leafNode{
+					key: s,
+					val: v,
+				}
+				t.size++
+				return nil, false
+			}
+		}
+
+		// Look for the edge
+		parent = n
+		n = n.getEdge(search[0])
+
+		// No edge, create one
+		if n == nil {
+			e := edge{
+				label: search[0],
+				node: &node{
+					leaf: &leafNode{
+						key: s,
+						val: v,
+					},
+					prefix: search,
+				},
+			}
+			parent.addEdge(e)
+			t.size++
+			return nil, false
+		}
+
+		// Determine longest prefix of the search key on match
+		commonPrefix := longestPrefix(search, n.prefix)
+		if commonPrefix == len(n.prefix) {
+			search = search[commonPrefix:]
+			continue
+		}
+
+		// Split the node
+		t.size++
+		child := &node{
+			prefix: search[:commonPrefix],
+		}
+		parent.replaceEdge(edge{
+			label: search[0],
+			node:  child,
+		})
+
+		// Restore the existing node
+		child.addEdge(edge{
+			label: n.prefix[commonPrefix],
+			node:  n,
+		})
+		n.prefix = n.prefix[commonPrefix:]
+
+		// Create a new leaf node
+		leaf := &leafNode{
+			key: s,
+			val: v,
+		}
+
+		// If the new key is a subset, add to to this node
+		search = search[commonPrefix:]
+		if len(search) == 0 {
+			child.leaf = leaf
+			return nil, false
+		}
+
+		// Create a new edge for the node
+		child.addEdge(edge{
+			label: search[0],
+			node: &node{
+				leaf:   leaf,
+				prefix: search,
+			},
+		})
+		return nil, false
+	}
+	return nil, false
+}
+
+// Delete is used to delete a key, returning the previous
+// value and if it was deleted
+func (t *Tree) Delete(s string) (interface{}, bool) {
+	n := t.root
+	search := s
+	for {
+		// Check for key exhaution
+		if len(search) == 0 {
+			if !n.isLeaf() {
+				break
+			}
+			goto DELETE
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			break
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+		} else {
+			break
+		}
+	}
+	return nil, false
+
+DELETE:
+	// Delete the leaf
+	leaf := n.leaf
+	n.leaf = nil
+	t.size--
+
+	// Check if we should merge this node
+	if len(n.edges) == 1 {
+		e := n.edges[0]
+		child := e.node
+		n.prefix = n.prefix + child.prefix
+		n.leaf = child.leaf
+		n.edges = child.edges
+	}
+	return leaf.val, true
+}
+
+// Get is used to lookup a specific key, returning
+// the value and if it was found
+func (t *Tree) Get(s string) (interface{}, bool) {
+	n := t.root
+	search := s
+	for {
+		// Check for key exhaution
+		if len(search) == 0 {
+			if n.isLeaf() {
+				return n.leaf.val, true
+			}
+			break
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			break
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+		} else {
+			break
+		}
+	}
+	return nil, false
+}
+
+// LongestPrefix is like Get, but instead of an
+// exact match, it will return the longest prefix match.
+func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) {
+	var last *leafNode
+	n := t.root
+	search := s
+	for {
+		// Look for a leaf node
+		if n.isLeaf() {
+			last = n.leaf
+		}
+
+		// Check for key exhaution
+		if len(search) == 0 {
+			break
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			break
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+		} else {
+			break
+		}
+	}
+	if last != nil {
+		return last.key, last.val, true
+	}
+	return "", nil, false
+}
+
+// Minimum is used to return the minimum value in the tree
+func (t *Tree) Minimum() (string, interface{}, bool) {
+	n := t.root
+	for {
+		if n.isLeaf() {
+			return n.leaf.key, n.leaf.val, true
+		}
+		if len(n.edges) > 0 {
+			n = n.edges[0].node
+		} else {
+			break
+		}
+	}
+	return "", nil, false
+}
+
+// Maximum is used to return the maximum value in the tree
+func (t *Tree) Maximum() (string, interface{}, bool) {
+	n := t.root
+	for {
+		if num := len(n.edges); num > 0 {
+			n = n.edges[num-1].node
+			continue
+		}
+		if n.isLeaf() {
+			return n.leaf.key, n.leaf.val, true
+		} else {
+			break
+		}
+	}
+	return "", nil, false
+}
+
+// Walk is used to walk the tree
+func (t *Tree) Walk(fn WalkFn) {
+	recursiveWalk(t.root, fn)
+}
+
+// WalkPrefix is used to walk the tree under a prefix
+func (t *Tree) WalkPrefix(prefix string, fn WalkFn) {
+	n := t.root
+	search := prefix
+	for {
+		// Check for key exhaution
+		if len(search) == 0 {
+			recursiveWalk(n, fn)
+			return
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			break
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+
+		} else if strings.HasPrefix(n.prefix, search) {
+			// Child may be under our search prefix
+			recursiveWalk(n, fn)
+			return
+		} else {
+			break
+		}
+	}
+
+}
+
+// WalkPath is used to walk the tree, but only visiting nodes
+// from the root down to a given leaf. Where WalkPrefix walks
+// all the entries *under* the given prefix, this walks the
+// entries *above* the given prefix.
+func (t *Tree) WalkPath(path string, fn WalkFn) {
+	n := t.root
+	search := path
+	for {
+		// Visit the leaf values if any
+		if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+			return
+		}
+
+		// Check for key exhaution
+		if len(search) == 0 {
+			return
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			return
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+		} else {
+			break
+		}
+	}
+}
+
+// recursiveWalk is used to do a pre-order walk of a node
+// recursively. Returns true if the walk should be aborted
+func recursiveWalk(n *node, fn WalkFn) bool {
+	// Visit the leaf values if any
+	if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+		return true
+	}
+
+	// Recurse on the children
+	for _, e := range n.edges {
+		if recursiveWalk(e.node, fn) {
+			return true
+		}
+	}
+	return false
+}
+
+// ToMap is used to walk the tree and convert it into a map
+func (t *Tree) ToMap() map[string]interface{} {
+	out := make(map[string]interface{}, t.size)
+	t.Walk(func(k string, v interface{}) bool {
+		out[k] = v
+		return false
+	})
+	return out
+}

+ 191 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/docker/LICENSE

@@ -0,0 +1,191 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        https://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   Copyright 2013-2016 Docker, Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       https://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 19 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/docker/NOTICE

@@ -0,0 +1,19 @@
+Docker
+Copyright 2012-2016 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+This product contains software (https://github.com/kr/pty) developed
+by Keith Rarick, licensed under the MIT License.
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

+ 0 - 67
libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/envfile.go

@@ -1,67 +0,0 @@
-package opts
-
-import (
-	"bufio"
-	"fmt"
-	"os"
-	"strings"
-)
-
-// ParseEnvFile reads a file with environment variables enumerated by lines
-//
-// ``Environment variable names used by the utilities in the Shell and
-// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase
-// letters, digits, and the '_' (underscore) from the characters defined in
-// Portable Character Set and do not begin with a digit. *But*, other
-// characters may be permitted by an implementation; applications shall
-// tolerate the presence of such names.''
-// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html
-//
-// As of #16585, it's up to application inside docker to validate or not
-// environment variables, that's why we just strip leading whitespace and
-// nothing more.
-func ParseEnvFile(filename string) ([]string, error) {
-	fh, err := os.Open(filename)
-	if err != nil {
-		return []string{}, err
-	}
-	defer fh.Close()
-
-	lines := []string{}
-	scanner := bufio.NewScanner(fh)
-	for scanner.Scan() {
-		// trim the line from all leading whitespace first
-		line := strings.TrimLeft(scanner.Text(), whiteSpaces)
-		// line is not empty, and not starting with '#'
-		if len(line) > 0 && !strings.HasPrefix(line, "#") {
-			data := strings.SplitN(line, "=", 2)
-
-			// trim the front of a variable, but nothing else
-			variable := strings.TrimLeft(data[0], whiteSpaces)
-			if strings.ContainsAny(variable, whiteSpaces) {
-				return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)}
-			}
-
-			if len(data) > 1 {
-
-				// pass the value through, no trimming
-				lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1]))
-			} else {
-				// if only a pass-through variable is given, clean it up.
-				lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line)))
-			}
-		}
-	}
-	return lines, scanner.Err()
-}
-
-var whiteSpaces = " \t"
-
-// ErrBadEnvVariable typed error for bad environment variable
-type ErrBadEnvVariable struct {
-	msg string
-}
-
-func (e ErrBadEnvVariable) Error() string {
-	return fmt.Sprintf("poorly formatted environment: %s", e.msg)
-}

+ 0 - 142
libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/envfile_test.go

@@ -1,142 +0,0 @@
-package opts
-
-import (
-	"bufio"
-	"fmt"
-	"io/ioutil"
-	"os"
-	"reflect"
-	"strings"
-	"testing"
-)
-
-func tmpFileWithContent(content string, t *testing.T) string {
-	tmpFile, err := ioutil.TempFile("", "envfile-test")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer tmpFile.Close()
-
-	tmpFile.WriteString(content)
-	return tmpFile.Name()
-}
-
-// Test ParseEnvFile for a file with a few well formatted lines
-func TestParseEnvFileGoodFile(t *testing.T) {
-	content := `foo=bar
-    baz=quux
-# comment
-
-_foobar=foobaz
-with.dots=working
-and_underscore=working too
-`
-	// Adding a newline + a line with pure whitespace.
-	// This is being done like this instead of the block above
-	// because it's common for editors to trim trailing whitespace
-	// from lines, which becomes annoying since that's the
-	// exact thing we need to test.
-	content += "\n    \t  "
-	tmpFile := tmpFileWithContent(content, t)
-	defer os.Remove(tmpFile)
-
-	lines, err := ParseEnvFile(tmpFile)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	expectedLines := []string{
-		"foo=bar",
-		"baz=quux",
-		"_foobar=foobaz",
-		"with.dots=working",
-		"and_underscore=working too",
-	}
-
-	if !reflect.DeepEqual(lines, expectedLines) {
-		t.Fatal("lines not equal to expected_lines")
-	}
-}
-
-// Test ParseEnvFile for an empty file
-func TestParseEnvFileEmptyFile(t *testing.T) {
-	tmpFile := tmpFileWithContent("", t)
-	defer os.Remove(tmpFile)
-
-	lines, err := ParseEnvFile(tmpFile)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if len(lines) != 0 {
-		t.Fatal("lines not empty; expected empty")
-	}
-}
-
-// Test ParseEnvFile for a non existent file
-func TestParseEnvFileNonExistentFile(t *testing.T) {
-	_, err := ParseEnvFile("foo_bar_baz")
-	if err == nil {
-		t.Fatal("ParseEnvFile succeeded; expected failure")
-	}
-	if _, ok := err.(*os.PathError); !ok {
-		t.Fatalf("Expected a PathError, got [%v]", err)
-	}
-}
-
-// Test ParseEnvFile for a badly formatted file
-func TestParseEnvFileBadlyFormattedFile(t *testing.T) {
-	content := `foo=bar
-    f   =quux
-`
-
-	tmpFile := tmpFileWithContent(content, t)
-	defer os.Remove(tmpFile)
-
-	_, err := ParseEnvFile(tmpFile)
-	if err == nil {
-		t.Fatalf("Expected a ErrBadEnvVariable, got nothing")
-	}
-	if _, ok := err.(ErrBadEnvVariable); !ok {
-		t.Fatalf("Expected a ErrBadEnvVariable, got [%v]", err)
-	}
-	expectedMessage := "poorly formatted environment: variable 'f   ' has white spaces"
-	if err.Error() != expectedMessage {
-		t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error())
-	}
-}
-
-// Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize
-func TestParseEnvFileLineTooLongFile(t *testing.T) {
-	content := strings.Repeat("a", bufio.MaxScanTokenSize+42)
-	content = fmt.Sprint("foo=", content)
-
-	tmpFile := tmpFileWithContent(content, t)
-	defer os.Remove(tmpFile)
-
-	_, err := ParseEnvFile(tmpFile)
-	if err == nil {
-		t.Fatal("ParseEnvFile succeeded; expected failure")
-	}
-}
-
-// ParseEnvFile with a random file, pass through
-func TestParseEnvFileRandomFile(t *testing.T) {
-	content := `first line
-another invalid line`
-	tmpFile := tmpFileWithContent(content, t)
-	defer os.Remove(tmpFile)
-
-	_, err := ParseEnvFile(tmpFile)
-
-	if err == nil {
-		t.Fatalf("Expected a ErrBadEnvVariable, got nothing")
-	}
-	if _, ok := err.(ErrBadEnvVariable); !ok {
-		t.Fatalf("Expected a ErrBadEnvvariable, got [%v]", err)
-	}
-	expectedMessage := "poorly formatted environment: variable 'first line' has white spaces"
-	if err.Error() != expectedMessage {
-		t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error())
-	}
-}

+ 38 - 36
libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/hosts.go

@@ -4,16 +4,12 @@ import (
 	"fmt"
 	"fmt"
 	"net"
 	"net"
 	"net/url"
 	"net/url"
-	"runtime"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
 )
 )
 
 
 var (
 var (
 	// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
 	// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
-	// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
-	// is not supplied. A better longer term solution would be to use a named
-	// pipe as the default on the Windows daemon.
 	// These are the IANA registered port numbers for use with Docker
 	// These are the IANA registered port numbers for use with Docker
 	// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
 	// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
 	DefaultHTTPPort = 2375 // Default HTTP Port
 	DefaultHTTPPort = 2375 // Default HTTP Port
@@ -26,13 +22,19 @@ var (
 	DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
 	DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
 	// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
 	// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
 	DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
 	DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
+	// DefaultNamedPipe defines the default named pipe used by docker on Windows
+	DefaultNamedPipe = `//./pipe/docker_engine`
 )
 )
 
 
 // ValidateHost validates that the specified string is a valid host and returns it.
 // ValidateHost validates that the specified string is a valid host and returns it.
 func ValidateHost(val string) (string, error) {
 func ValidateHost(val string) (string, error) {
-	_, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val)
-	if err != nil {
-		return val, err
+	host := strings.TrimSpace(val)
+	// The empty string means default and is not handled by parseDockerDaemonHost
+	if host != "" {
+		_, err := parseDockerDaemonHost(host)
+		if err != nil {
+			return val, err
+		}
 	}
 	}
 	// Note: unlike most flag validators, we don't return the mutated value here
 	// Note: unlike most flag validators, we don't return the mutated value here
 	//       we need to know what the user entered later (using ParseHost) to adjust for tls
 	//       we need to know what the user entered later (using ParseHost) to adjust for tls
@@ -40,39 +42,39 @@ func ValidateHost(val string) (string, error) {
 }
 }
 
 
 // ParseHost and set defaults for a Daemon host string
 // ParseHost and set defaults for a Daemon host string
-func ParseHost(defaultHost, val string) (string, error) {
-	host, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val)
-	if err != nil {
-		return val, err
+func ParseHost(defaultToTLS bool, val string) (string, error) {
+	host := strings.TrimSpace(val)
+	if host == "" {
+		if defaultToTLS {
+			host = DefaultTLSHost
+		} else {
+			host = DefaultHost
+		}
+	} else {
+		var err error
+		host, err = parseDockerDaemonHost(host)
+		if err != nil {
+			return val, err
+		}
 	}
 	}
 	return host, nil
 	return host, nil
 }
 }
 
 
 // parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
 // parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
-// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr
-// defaultUnixAddr must be a absolute file path (no `unix://` prefix)
-// defaultTCPAddr must be the full `tcp://host:port` form
-func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) {
-	addr = strings.TrimSpace(addr)
-	if addr == "" {
-		if defaultAddr == defaultTLSHost {
-			return defaultTLSHost, nil
-		}
-		if runtime.GOOS != "windows" {
-			return fmt.Sprintf("unix://%s", defaultUnixAddr), nil
-		}
-		return defaultTCPAddr, nil
-	}
+// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
+func parseDockerDaemonHost(addr string) (string, error) {
 	addrParts := strings.Split(addr, "://")
 	addrParts := strings.Split(addr, "://")
-	if len(addrParts) == 1 {
+	if len(addrParts) == 1 && addrParts[0] != "" {
 		addrParts = []string{"tcp", addrParts[0]}
 		addrParts = []string{"tcp", addrParts[0]}
 	}
 	}
 
 
 	switch addrParts[0] {
 	switch addrParts[0] {
 	case "tcp":
 	case "tcp":
-		return parseTCPAddr(addrParts[1], defaultTCPAddr)
+		return parseTCPAddr(addrParts[1], DefaultTCPHost)
 	case "unix":
 	case "unix":
-		return parseUnixAddr(addrParts[1], defaultUnixAddr)
+		return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
+	case "npipe":
+		return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
 	case "fd":
 	case "fd":
 		return addr, nil
 		return addr, nil
 	default:
 	default:
@@ -80,19 +82,19 @@ func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defa
 	}
 	}
 }
 }
 
 
-// parseUnixAddr parses and validates that the specified address is a valid UNIX
-// socket address. It returns a formatted UNIX socket address, either using the
-// address parsed from addr, or the contents of defaultAddr if addr is a blank
-// string.
-func parseUnixAddr(addr string, defaultAddr string) (string, error) {
-	addr = strings.TrimPrefix(addr, "unix://")
+// parseSimpleProtoAddr parses and validates that the specified address is a valid
+// socket address for simple protocols like unix and npipe. It returns a formatted
+// socket address, either using the address parsed from addr, or the contents of
+// defaultAddr if addr is a blank string.
+func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
+	addr = strings.TrimPrefix(addr, proto+"://")
 	if strings.Contains(addr, "://") {
 	if strings.Contains(addr, "://") {
-		return "", fmt.Errorf("Invalid proto, expected unix: %s", addr)
+		return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
 	}
 	}
 	if addr == "" {
 	if addr == "" {
 		addr = defaultAddr
 		addr = defaultAddr
 	}
 	}
-	return fmt.Sprintf("unix://%s", addr), nil
+	return fmt.Sprintf("%s://%s", proto, addr), nil
 }
 }
 
 
 // parseTCPAddr parses and validates that the specified address is a valid TCP
 // parseTCPAddr parses and validates that the specified address is a valid TCP

+ 0 - 164
libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_test.go

@@ -1,164 +0,0 @@
-package opts
-
-import (
-	"runtime"
-	"testing"
-)
-
-func TestParseHost(t *testing.T) {
-	invalid := map[string]string{
-		"anything":              "Invalid bind address format: anything",
-		"something with spaces": "Invalid bind address format: something with spaces",
-		"://":                "Invalid bind address format: ://",
-		"unknown://":         "Invalid bind address format: unknown://",
-		"tcp://:port":        "Invalid bind address format: :port",
-		"tcp://invalid":      "Invalid bind address format: invalid",
-		"tcp://invalid:port": "Invalid bind address format: invalid:port",
-	}
-	const defaultHTTPHost = "tcp://127.0.0.1:2375"
-	var defaultHOST = "unix:///var/run/docker.sock"
-
-	if runtime.GOOS == "windows" {
-		defaultHOST = defaultHTTPHost
-	}
-	valid := map[string]string{
-		"":                         defaultHOST,
-		"fd://":                    "fd://",
-		"fd://something":           "fd://something",
-		"tcp://host:":              "tcp://host:2375",
-		"tcp://":                   "tcp://localhost:2375",
-		"tcp://:2375":              "tcp://localhost:2375", // default ip address
-		"tcp://:2376":              "tcp://localhost:2376", // default ip address
-		"tcp://0.0.0.0:8080":       "tcp://0.0.0.0:8080",
-		"tcp://192.168.0.0:12000":  "tcp://192.168.0.0:12000",
-		"tcp://192.168:8080":       "tcp://192.168:8080",
-		"tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P
-		"tcp://docker.com:2375":    "tcp://docker.com:2375",
-		"unix://":                  "unix:///var/run/docker.sock", // default unix:// value
-		"unix://path/to/socket":    "unix://path/to/socket",
-	}
-
-	for value, errorMessage := range invalid {
-		if _, err := ParseHost(defaultHTTPHost, value); err == nil || err.Error() != errorMessage {
-			t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err)
-		}
-	}
-	for value, expected := range valid {
-		if actual, err := ParseHost(defaultHTTPHost, value); err != nil || actual != expected {
-			t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err)
-		}
-	}
-}
-
-func TestParseDockerDaemonHost(t *testing.T) {
-	var (
-		defaultHTTPHost  = "tcp://localhost:2375"
-		defaultHTTPSHost = "tcp://localhost:2376"
-		defaultUnix      = "/var/run/docker.sock"
-		defaultHOST      = "unix:///var/run/docker.sock"
-	)
-	if runtime.GOOS == "windows" {
-		defaultHOST = defaultHTTPHost
-	}
-	invalids := map[string]string{
-		"0.0.0.0":                       "Invalid bind address format: 0.0.0.0",
-		"tcp:a.b.c.d":                   "Invalid bind address format: tcp:a.b.c.d",
-		"tcp:a.b.c.d/path":              "Invalid bind address format: tcp:a.b.c.d/path",
-		"udp://127.0.0.1":               "Invalid bind address format: udp://127.0.0.1",
-		"udp://127.0.0.1:2375":          "Invalid bind address format: udp://127.0.0.1:2375",
-		"tcp://unix:///run/docker.sock": "Invalid bind address format: unix",
-		"tcp":  "Invalid bind address format: tcp",
-		"unix": "Invalid bind address format: unix",
-		"fd":   "Invalid bind address format: fd",
-	}
-	valids := map[string]string{
-		"0.0.0.1:":                    "tcp://0.0.0.1:2375",
-		"0.0.0.1:5555":                "tcp://0.0.0.1:5555",
-		"0.0.0.1:5555/path":           "tcp://0.0.0.1:5555/path",
-		"[::1]:":                      "tcp://[::1]:2375",
-		"[::1]:5555/path":             "tcp://[::1]:5555/path",
-		"[0:0:0:0:0:0:0:1]:":          "tcp://[0:0:0:0:0:0:0:1]:2375",
-		"[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path",
-		":6666":                   "tcp://localhost:6666",
-		":6666/path":              "tcp://localhost:6666/path",
-		"":                        defaultHOST,
-		" ":                       defaultHOST,
-		"  ":                      defaultHOST,
-		"tcp://":                  defaultHTTPHost,
-		"tcp://:7777":             "tcp://localhost:7777",
-		"tcp://:7777/path":        "tcp://localhost:7777/path",
-		" tcp://:7777/path ":      "tcp://localhost:7777/path",
-		"unix:///run/docker.sock": "unix:///run/docker.sock",
-		"unix://":                 "unix:///var/run/docker.sock",
-		"fd://":                   "fd://",
-		"fd://something":          "fd://something",
-		"localhost:":              "tcp://localhost:2375",
-		"localhost:5555":          "tcp://localhost:5555",
-		"localhost:5555/path":     "tcp://localhost:5555/path",
-	}
-	for invalidAddr, expectedError := range invalids {
-		if addr, err := parseDockerDaemonHost(defaultHTTPHost, defaultHTTPSHost, defaultUnix, "", invalidAddr); err == nil || err.Error() != expectedError {
-			t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr)
-		}
-	}
-	for validAddr, expectedAddr := range valids {
-		if addr, err := parseDockerDaemonHost(defaultHTTPHost, defaultHTTPSHost, defaultUnix, "", validAddr); err != nil || addr != expectedAddr {
-			t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr)
-		}
-	}
-}
-
-func TestParseTCP(t *testing.T) {
-	var (
-		defaultHTTPHost = "tcp://127.0.0.1:2376"
-	)
-	invalids := map[string]string{
-		"0.0.0.0":              "Invalid bind address format: 0.0.0.0",
-		"tcp:a.b.c.d":          "Invalid bind address format: tcp:a.b.c.d",
-		"tcp:a.b.c.d/path":     "Invalid bind address format: tcp:a.b.c.d/path",
-		"udp://127.0.0.1":      "Invalid proto, expected tcp: udp://127.0.0.1",
-		"udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375",
-	}
-	valids := map[string]string{
-		"":                            defaultHTTPHost,
-		"tcp://":                      defaultHTTPHost,
-		"0.0.0.1:":                    "tcp://0.0.0.1:2376",
-		"0.0.0.1:5555":                "tcp://0.0.0.1:5555",
-		"0.0.0.1:5555/path":           "tcp://0.0.0.1:5555/path",
-		":6666":                       "tcp://127.0.0.1:6666",
-		":6666/path":                  "tcp://127.0.0.1:6666/path",
-		"tcp://:7777":                 "tcp://127.0.0.1:7777",
-		"tcp://:7777/path":            "tcp://127.0.0.1:7777/path",
-		"[::1]:":                      "tcp://[::1]:2376",
-		"[::1]:5555":                  "tcp://[::1]:5555",
-		"[::1]:5555/path":             "tcp://[::1]:5555/path",
-		"[0:0:0:0:0:0:0:1]:":          "tcp://[0:0:0:0:0:0:0:1]:2376",
-		"[0:0:0:0:0:0:0:1]:5555":      "tcp://[0:0:0:0:0:0:0:1]:5555",
-		"[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path",
-		"localhost:":                  "tcp://localhost:2376",
-		"localhost:5555":              "tcp://localhost:5555",
-		"localhost:5555/path":         "tcp://localhost:5555/path",
-	}
-	for invalidAddr, expectedError := range invalids {
-		if addr, err := parseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError {
-			t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr)
-		}
-	}
-	for validAddr, expectedAddr := range valids {
-		if addr, err := parseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr {
-			t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr)
-		}
-	}
-}
-
-func TestParseInvalidUnixAddrInvalid(t *testing.T) {
-	if _, err := parseUnixAddr("tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" {
-		t.Fatalf("Expected an error, got %v", err)
-	}
-	if _, err := parseUnixAddr("unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" {
-		t.Fatalf("Expected an error, got %v", err)
-	}
-	if v, err := parseUnixAddr("", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" {
-		t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock")
-	}
-}

+ 1 - 1
libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/hosts_windows.go

@@ -3,4 +3,4 @@
 package opts
 package opts
 
 
 // DefaultHost constant defines the default host string used by docker on Windows
 // DefaultHost constant defines the default host string used by docker on Windows
-var DefaultHost = DefaultTCPHost
+var DefaultHost = "npipe://" + DefaultNamedPipe

+ 0 - 54
libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/ip_test.go

@@ -1,54 +0,0 @@
-package opts
-
-import (
-	"net"
-	"testing"
-)
-
-func TestIpOptString(t *testing.T) {
-	addresses := []string{"", "0.0.0.0"}
-	var ip net.IP
-
-	for _, address := range addresses {
-		stringAddress := NewIPOpt(&ip, address).String()
-		if stringAddress != address {
-			t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress)
-		}
-	}
-}
-
-func TestNewIpOptInvalidDefaultVal(t *testing.T) {
-	ip := net.IPv4(127, 0, 0, 1)
-	defaultVal := "Not an ip"
-
-	ipOpt := NewIPOpt(&ip, defaultVal)
-
-	expected := "127.0.0.1"
-	if ipOpt.String() != expected {
-		t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String())
-	}
-}
-
-func TestNewIpOptValidDefaultVal(t *testing.T) {
-	ip := net.IPv4(127, 0, 0, 1)
-	defaultVal := "192.168.1.1"
-
-	ipOpt := NewIPOpt(&ip, defaultVal)
-
-	expected := "192.168.1.1"
-	if ipOpt.String() != expected {
-		t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String())
-	}
-}
-
-func TestIpOptSetInvalidVal(t *testing.T) {
-	ip := net.IPv4(127, 0, 0, 1)
-	ipOpt := &IPOpt{IP: &ip}
-
-	invalidIP := "invalid ip"
-	expectedError := "invalid ip is not an ip address"
-	err := ipOpt.Set(invalidIP)
-	if err == nil || err.Error() != expectedError {
-		t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error())
-	}
-}

+ 49 - 138
libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go

@@ -3,13 +3,8 @@ package opts
 import (
 import (
 	"fmt"
 	"fmt"
 	"net"
 	"net"
-	"os"
 	"regexp"
 	"regexp"
-	"strconv"
 	"strings"
 	"strings"
-
-	"github.com/docker/docker/api/types/blkiodev"
-	"github.com/docker/go-units"
 )
 )
 
 
 var (
 var (
@@ -105,6 +100,35 @@ func (opts *ListOpts) Len() int {
 	return len((*opts.values))
 	return len((*opts.values))
 }
 }
 
 
+// NamedOption is an interface that list and map options
+// with names implement.
+type NamedOption interface {
+	Name() string
+}
+
+// NamedListOpts is a ListOpts with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedListOpts struct {
+	name string
+	ListOpts
+}
+
+var _ NamedOption = &NamedListOpts{}
+
+// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
+func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
+	return &NamedListOpts{
+		name:     name,
+		ListOpts: *NewListOptsRef(values, validator),
+	}
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *NamedListOpts) Name() string {
+	return o.name
+}
+
 //MapOpts holds a map of values and a validation function.
 //MapOpts holds a map of values and a validation function.
 type MapOpts struct {
 type MapOpts struct {
 	values    map[string]string
 	values    map[string]string
@@ -150,114 +174,34 @@ func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
 	}
 	}
 }
 }
 
 
-// ValidatorFctType defines a validator function that returns a validated string and/or an error.
-type ValidatorFctType func(val string) (string, error)
-
-// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error.
-type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error)
-
-// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error.
-type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error)
-
-// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
-type ValidatorFctListType func(val string) ([]string, error)
-
-// ValidateAttach validates that the specified string is a valid attach option.
-func ValidateAttach(val string) (string, error) {
-	s := strings.ToLower(val)
-	for _, str := range []string{"stdin", "stdout", "stderr"} {
-		if s == str {
-			return s, nil
-		}
-	}
-	return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR")
+// NamedMapOpts is a MapOpts struct with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedMapOpts struct {
+	name string
+	MapOpts
 }
 }
 
 
-// ValidateWeightDevice validates that the specified string has a valid device-weight format.
-func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) {
-	split := strings.SplitN(val, ":", 2)
-	if len(split) != 2 {
-		return nil, fmt.Errorf("bad format: %s", val)
-	}
-	if !strings.HasPrefix(split[0], "/dev/") {
-		return nil, fmt.Errorf("bad format for device path: %s", val)
-	}
-	weight, err := strconv.ParseUint(split[1], 10, 0)
-	if err != nil {
-		return nil, fmt.Errorf("invalid weight for device: %s", val)
-	}
-	if weight > 0 && (weight < 10 || weight > 1000) {
-		return nil, fmt.Errorf("invalid weight for device: %s", val)
-	}
-
-	return &blkiodev.WeightDevice{
-		Path:   split[0],
-		Weight: uint16(weight),
-	}, nil
-}
+var _ NamedOption = &NamedMapOpts{}
 
 
-// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format.
-func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
-	split := strings.SplitN(val, ":", 2)
-	if len(split) != 2 {
-		return nil, fmt.Errorf("bad format: %s", val)
-	}
-	if !strings.HasPrefix(split[0], "/dev/") {
-		return nil, fmt.Errorf("bad format for device path: %s", val)
+// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
+func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
+	return &NamedMapOpts{
+		name:    name,
+		MapOpts: *NewMapOpts(values, validator),
 	}
 	}
-	rate, err := units.RAMInBytes(split[1])
-	if err != nil {
-		return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>[<unit>]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val)
-	}
-	if rate < 0 {
-		return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>[<unit>]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val)
-	}
-
-	return &blkiodev.ThrottleDevice{
-		Path: split[0],
-		Rate: uint64(rate),
-	}, nil
 }
 }
 
 
-// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format.
-func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
-	split := strings.SplitN(val, ":", 2)
-	if len(split) != 2 {
-		return nil, fmt.Errorf("bad format: %s", val)
-	}
-	if !strings.HasPrefix(split[0], "/dev/") {
-		return nil, fmt.Errorf("bad format for device path: %s", val)
-	}
-	rate, err := strconv.ParseUint(split[1], 10, 64)
-	if err != nil {
-		return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
-	}
-	if rate < 0 {
-		return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
-	}
-
-	return &blkiodev.ThrottleDevice{
-		Path: split[0],
-		Rate: uint64(rate),
-	}, nil
+// Name returns the name of the NamedMapOpts in the configuration.
+func (o *NamedMapOpts) Name() string {
+	return o.name
 }
 }
 
 
-// ValidateEnv validates an environment variable and returns it.
-// If no value is specified, it returns the current value using os.Getenv.
-//
-// As on ParseEnvFile and related to #16585, environment variable names
-// are not validate what so ever, it's up to application inside docker
-// to validate them or not.
-func ValidateEnv(val string) (string, error) {
-	arr := strings.Split(val, "=")
-	if len(arr) > 1 {
-		return val, nil
-	}
-	if !doesEnvExist(val) {
-		return val, nil
-	}
-	return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
-}
+// ValidatorFctType defines a validator function that returns a validated string and/or an error.
+type ValidatorFctType func(val string) (string, error)
+
+// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
+type ValidatorFctListType func(val string) ([]string, error)
 
 
 // ValidateIPAddress validates an Ip address.
 // ValidateIPAddress validates an Ip address.
 func ValidateIPAddress(val string) (string, error) {
 func ValidateIPAddress(val string) (string, error) {
@@ -268,15 +212,6 @@ func ValidateIPAddress(val string) (string, error) {
 	return "", fmt.Errorf("%s is not an ip address", val)
 	return "", fmt.Errorf("%s is not an ip address", val)
 }
 }
 
 
-// ValidateMACAddress validates a MAC address.
-func ValidateMACAddress(val string) (string, error) {
-	_, err := net.ParseMAC(strings.TrimSpace(val))
-	if err != nil {
-		return "", err
-	}
-	return val, nil
-}
-
 // ValidateDNSSearch validates domain for resolvconf search configuration.
 // ValidateDNSSearch validates domain for resolvconf search configuration.
 // A zero length domain is represented by a dot (.).
 // A zero length domain is represented by a dot (.).
 func ValidateDNSSearch(val string) (string, error) {
 func ValidateDNSSearch(val string) (string, error) {
@@ -297,20 +232,6 @@ func validateDomain(val string) (string, error) {
 	return "", fmt.Errorf("%s is not a valid domain", val)
 	return "", fmt.Errorf("%s is not a valid domain", val)
 }
 }
 
 
-// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
-// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6).
-func ValidateExtraHost(val string) (string, error) {
-	// allow for IPv6 addresses in extra hosts by only splitting on first ":"
-	arr := strings.SplitN(val, ":", 2)
-	if len(arr) != 2 || len(arr[0]) == 0 {
-		return "", fmt.Errorf("bad format for add-host: %q", val)
-	}
-	if _, err := ValidateIPAddress(arr[1]); err != nil {
-		return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
-	}
-	return val, nil
-}
-
 // ValidateLabel validates that the specified string is a valid label, and returns it.
 // ValidateLabel validates that the specified string is a valid label, and returns it.
 // Labels are in the form on key=value.
 // Labels are in the form on key=value.
 func ValidateLabel(val string) (string, error) {
 func ValidateLabel(val string) (string, error) {
@@ -319,13 +240,3 @@ func ValidateLabel(val string) (string, error) {
 	}
 	}
 	return val, nil
 	return val, nil
 }
 }
-
-func doesEnvExist(name string) bool {
-	for _, entry := range os.Environ() {
-		parts := strings.SplitN(entry, "=", 2)
-		if parts[0] == name {
-			return true
-		}
-	}
-	return false
-}

+ 0 - 301
libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/opts_test.go

@@ -1,301 +0,0 @@
-package opts
-
-import (
-	"fmt"
-	"os"
-	"strings"
-	"testing"
-)
-
-func TestValidateIPAddress(t *testing.T) {
-	if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" {
-		t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err)
-	}
-
-	if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" {
-		t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err)
-	}
-
-	if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" {
-		t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err)
-	}
-
-	if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" {
-		t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err)
-	}
-
-	if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" {
-		t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err)
-	}
-
-}
-
-func TestMapOpts(t *testing.T) {
-	tmpMap := make(map[string]string)
-	o := NewMapOpts(tmpMap, logOptsValidator)
-	o.Set("max-size=1")
-	if o.String() != "map[max-size:1]" {
-		t.Errorf("%s != [map[max-size:1]", o.String())
-	}
-
-	o.Set("max-file=2")
-	if len(tmpMap) != 2 {
-		t.Errorf("map length %d != 2", len(tmpMap))
-	}
-
-	if tmpMap["max-file"] != "2" {
-		t.Errorf("max-file = %s != 2", tmpMap["max-file"])
-	}
-
-	if tmpMap["max-size"] != "1" {
-		t.Errorf("max-size = %s != 1", tmpMap["max-size"])
-	}
-	if o.Set("dummy-val=3") == nil {
-		t.Errorf("validator is not being called")
-	}
-}
-
-func TestValidateMACAddress(t *testing.T) {
-	if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil {
-		t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err)
-	}
-
-	if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil {
-		t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC")
-	}
-
-	if _, err := ValidateMACAddress(`random invalid string`); err == nil {
-		t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC")
-	}
-}
-
-func TestListOptsWithoutValidator(t *testing.T) {
-	o := NewListOpts(nil)
-	o.Set("foo")
-	if o.String() != "[foo]" {
-		t.Errorf("%s != [foo]", o.String())
-	}
-	o.Set("bar")
-	if o.Len() != 2 {
-		t.Errorf("%d != 2", o.Len())
-	}
-	o.Set("bar")
-	if o.Len() != 3 {
-		t.Errorf("%d != 3", o.Len())
-	}
-	if !o.Get("bar") {
-		t.Error("o.Get(\"bar\") == false")
-	}
-	if o.Get("baz") {
-		t.Error("o.Get(\"baz\") == true")
-	}
-	o.Delete("foo")
-	if o.String() != "[bar bar]" {
-		t.Errorf("%s != [bar bar]", o.String())
-	}
-	listOpts := o.GetAll()
-	if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" {
-		t.Errorf("Expected [[bar bar]], got [%v]", listOpts)
-	}
-	mapListOpts := o.GetMap()
-	if len(mapListOpts) != 1 {
-		t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts)
-	}
-
-}
-
-func TestListOptsWithValidator(t *testing.T) {
-	// Re-using logOptsvalidator (used by MapOpts)
-	o := NewListOpts(logOptsValidator)
-	o.Set("foo")
-	if o.String() != "[]" {
-		t.Errorf("%s != []", o.String())
-	}
-	o.Set("foo=bar")
-	if o.String() != "[]" {
-		t.Errorf("%s != []", o.String())
-	}
-	o.Set("max-file=2")
-	if o.Len() != 1 {
-		t.Errorf("%d != 1", o.Len())
-	}
-	if !o.Get("max-file=2") {
-		t.Error("o.Get(\"max-file=2\") == false")
-	}
-	if o.Get("baz") {
-		t.Error("o.Get(\"baz\") == true")
-	}
-	o.Delete("max-file=2")
-	if o.String() != "[]" {
-		t.Errorf("%s != []", o.String())
-	}
-}
-
-func TestValidateDNSSearch(t *testing.T) {
-	valid := []string{
-		`.`,
-		`a`,
-		`a.`,
-		`1.foo`,
-		`17.foo`,
-		`foo.bar`,
-		`foo.bar.baz`,
-		`foo.bar.`,
-		`foo.bar.baz`,
-		`foo1.bar2`,
-		`foo1.bar2.baz`,
-		`1foo.2bar.`,
-		`1foo.2bar.baz`,
-		`foo-1.bar-2`,
-		`foo-1.bar-2.baz`,
-		`foo-1.bar-2.`,
-		`foo-1.bar-2.baz`,
-		`1-foo.2-bar`,
-		`1-foo.2-bar.baz`,
-		`1-foo.2-bar.`,
-		`1-foo.2-bar.baz`,
-	}
-
-	invalid := []string{
-		``,
-		` `,
-		`  `,
-		`17`,
-		`17.`,
-		`.17`,
-		`17-.`,
-		`17-.foo`,
-		`.foo`,
-		`foo-.bar`,
-		`-foo.bar`,
-		`foo.bar-`,
-		`foo.bar-.baz`,
-		`foo.-bar`,
-		`foo.-bar.baz`,
-		`foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`,
-	}
-
-	for _, domain := range valid {
-		if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" {
-			t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err)
-		}
-	}
-
-	for _, domain := range invalid {
-		if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" {
-			t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err)
-		}
-	}
-}
-
-func TestValidateExtraHosts(t *testing.T) {
-	valid := []string{
-		`myhost:192.168.0.1`,
-		`thathost:10.0.2.1`,
-		`anipv6host:2003:ab34:e::1`,
-		`ipv6local:::1`,
-	}
-
-	invalid := map[string]string{
-		`myhost:192.notanipaddress.1`:  `invalid IP`,
-		`thathost-nosemicolon10.0.0.1`: `bad format`,
-		`anipv6host:::::1`:             `invalid IP`,
-		`ipv6local:::0::`:              `invalid IP`,
-	}
-
-	for _, extrahost := range valid {
-		if _, err := ValidateExtraHost(extrahost); err != nil {
-			t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err)
-		}
-	}
-
-	for extraHost, expectedError := range invalid {
-		if _, err := ValidateExtraHost(extraHost); err == nil {
-			t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost)
-		} else {
-			if !strings.Contains(err.Error(), expectedError) {
-				t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError)
-			}
-		}
-	}
-}
-
-func TestValidateAttach(t *testing.T) {
-	valid := []string{
-		"stdin",
-		"stdout",
-		"stderr",
-		"STDIN",
-		"STDOUT",
-		"STDERR",
-	}
-	if _, err := ValidateAttach("invalid"); err == nil {
-		t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing")
-	}
-
-	for _, attach := range valid {
-		value, err := ValidateAttach(attach)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if value != strings.ToLower(attach) {
-			t.Fatalf("Expected [%v], got [%v]", attach, value)
-		}
-	}
-}
-
-func TestValidateEnv(t *testing.T) {
-	valids := map[string]string{
-		"a":                   "a",
-		"something":           "something",
-		"_=a":                 "_=a",
-		"env1=value1":         "env1=value1",
-		"_env1=value1":        "_env1=value1",
-		"env2=value2=value3":  "env2=value2=value3",
-		"env3=abc!qwe":        "env3=abc!qwe",
-		"env_4=value 4":       "env_4=value 4",
-		"PATH":                fmt.Sprintf("PATH=%v", os.Getenv("PATH")),
-		"PATH=something":      "PATH=something",
-		"asd!qwe":             "asd!qwe",
-		"1asd":                "1asd",
-		"123":                 "123",
-		"some space":          "some space",
-		"  some space before": "  some space before",
-		"some space after  ":  "some space after  ",
-	}
-	for value, expected := range valids {
-		actual, err := ValidateEnv(value)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if actual != expected {
-			t.Fatalf("Expected [%v], got [%v]", expected, actual)
-		}
-	}
-}
-
-func TestValidateLabel(t *testing.T) {
-	if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" {
-		t.Fatalf("Expected an error [bad attribute format: label], go %v", err)
-	}
-	if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" {
-		t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err)
-	}
-	// Validate it's working with more than one =
-	if actual, err := ValidateLabel("key1=value1=value2"); err != nil {
-		t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err)
-	}
-	// Validate it's working with one more
-	if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil {
-		t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err)
-	}
-}
-
-func logOptsValidator(val string) (string, error) {
-	allowedKeys := map[string]string{"max-size": "1", "max-file": "2"}
-	vals := strings.Split(val, "=")
-	if allowedKeys[vals[0]] != "" {
-		return val, nil
-	}
-	return "", fmt.Errorf("invalid key %s", vals[0])
-}

+ 0 - 56
libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/throttledevice.go

@@ -1,56 +0,0 @@
-package opts
-
-import (
-	"fmt"
-
-	"github.com/docker/docker/api/types/blkiodev"
-)
-
-// ThrottledeviceOpt defines a map of ThrottleDevices
-type ThrottledeviceOpt struct {
-	values    []*blkiodev.ThrottleDevice
-	validator ValidatorThrottleFctType
-}
-
-// NewThrottledeviceOpt creates a new ThrottledeviceOpt
-func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt {
-	values := []*blkiodev.ThrottleDevice{}
-	return ThrottledeviceOpt{
-		values:    values,
-		validator: validator,
-	}
-}
-
-// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt
-func (opt *ThrottledeviceOpt) Set(val string) error {
-	var value *blkiodev.ThrottleDevice
-	if opt.validator != nil {
-		v, err := opt.validator(val)
-		if err != nil {
-			return err
-		}
-		value = v
-	}
-	(opt.values) = append((opt.values), value)
-	return nil
-}
-
-// String returns ThrottledeviceOpt values as a string.
-func (opt *ThrottledeviceOpt) String() string {
-	var out []string
-	for _, v := range opt.values {
-		out = append(out, v.String())
-	}
-
-	return fmt.Sprintf("%v", out)
-}
-
-// GetList returns a slice of pointers to ThrottleDevices.
-func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice {
-	var throttledevice []*blkiodev.ThrottleDevice
-	for _, v := range opt.values {
-		throttledevice = append(throttledevice, v)
-	}
-
-	return throttledevice
-}

+ 0 - 52
libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/ulimit.go

@@ -1,52 +0,0 @@
-package opts
-
-import (
-	"fmt"
-
-	"github.com/docker/docker/pkg/ulimit"
-)
-
-// UlimitOpt defines a map of Ulimits
-type UlimitOpt struct {
-	values *map[string]*ulimit.Ulimit
-}
-
-// NewUlimitOpt creates a new UlimitOpt
-func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt {
-	if ref == nil {
-		ref = &map[string]*ulimit.Ulimit{}
-	}
-	return &UlimitOpt{ref}
-}
-
-// Set validates a Ulimit and sets its name as a key in UlimitOpt
-func (o *UlimitOpt) Set(val string) error {
-	l, err := ulimit.Parse(val)
-	if err != nil {
-		return err
-	}
-
-	(*o.values)[l.Name] = l
-
-	return nil
-}
-
-// String returns Ulimit values as a string.
-func (o *UlimitOpt) String() string {
-	var out []string
-	for _, v := range *o.values {
-		out = append(out, v.String())
-	}
-
-	return fmt.Sprintf("%v", out)
-}
-
-// GetList returns a slice of pointers to Ulimits.
-func (o *UlimitOpt) GetList() []*ulimit.Ulimit {
-	var ulimits []*ulimit.Ulimit
-	for _, v := range *o.values {
-		ulimits = append(ulimits, v)
-	}
-
-	return ulimits
-}

+ 0 - 42
libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/ulimit_test.go

@@ -1,42 +0,0 @@
-package opts
-
-import (
-	"testing"
-
-	"github.com/docker/docker/pkg/ulimit"
-)
-
-func TestUlimitOpt(t *testing.T) {
-	ulimitMap := map[string]*ulimit.Ulimit{
-		"nofile": {"nofile", 1024, 512},
-	}
-
-	ulimitOpt := NewUlimitOpt(&ulimitMap)
-
-	expected := "[nofile=512:1024]"
-	if ulimitOpt.String() != expected {
-		t.Fatalf("Expected %v, got %v", expected, ulimitOpt)
-	}
-
-	// Valid ulimit append to opts
-	if err := ulimitOpt.Set("core=1024:1024"); err != nil {
-		t.Fatal(err)
-	}
-
-	// Invalid ulimit type returns an error and do not append to opts
-	if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil {
-		t.Fatalf("Expected error on invalid ulimit type")
-	}
-	expected = "[nofile=512:1024 core=1024:1024]"
-	expected2 := "[core=1024:1024 nofile=512:1024]"
-	result := ulimitOpt.String()
-	if result != expected && result != expected2 {
-		t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt)
-	}
-
-	// And test GetList
-	ulimits := ulimitOpt.GetList()
-	if len(ulimits) != 2 {
-		t.Fatalf("Expected a ulimit list of 2, got %v", ulimits)
-	}
-}

+ 0 - 56
libnetwork/Godeps/_workspace/src/github.com/docker/docker/opts/weightdevice.go

@@ -1,56 +0,0 @@
-package opts
-
-import (
-	"fmt"
-
-	"github.com/docker/docker/api/types/blkiodev"
-)
-
-// WeightdeviceOpt defines a map of WeightDevices
-type WeightdeviceOpt struct {
-	values    []*blkiodev.WeightDevice
-	validator ValidatorWeightFctType
-}
-
-// NewWeightdeviceOpt creates a new WeightdeviceOpt
-func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt {
-	values := []*blkiodev.WeightDevice{}
-	return WeightdeviceOpt{
-		values:    values,
-		validator: validator,
-	}
-}
-
-// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt
-func (opt *WeightdeviceOpt) Set(val string) error {
-	var value *blkiodev.WeightDevice
-	if opt.validator != nil {
-		v, err := opt.validator(val)
-		if err != nil {
-			return err
-		}
-		value = v
-	}
-	(opt.values) = append((opt.values), value)
-	return nil
-}
-
-// String returns WeightdeviceOpt values as a string.
-func (opt *WeightdeviceOpt) String() string {
-	var out []string
-	for _, v := range opt.values {
-		out = append(out, v.String())
-	}
-
-	return fmt.Sprintf("%v", out)
-}
-
-// GetList returns a slice of pointers to WeightDevices.
-func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice {
-	var weightdevice []*blkiodev.WeightDevice
-	for _, v := range opt.values {
-		weightdevice = append(weightdevice, v)
-	}
-
-	return weightdevice
-}

+ 3 - 7
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/backends.go

@@ -12,12 +12,8 @@ import (
 var (
 var (
 	// Backends is a global map of discovery backends indexed by their
 	// Backends is a global map of discovery backends indexed by their
 	// associated scheme.
 	// associated scheme.
-	backends map[string]Backend
-)
-
-func init() {
 	backends = make(map[string]Backend)
 	backends = make(map[string]Backend)
-}
+)
 
 
 // Register makes a discovery backend available by the provided scheme.
 // Register makes a discovery backend available by the provided scheme.
 // If Register is called twice with the same scheme an error is returned.
 // If Register is called twice with the same scheme an error is returned.
@@ -42,7 +38,7 @@ func parse(rawurl string) (string, string) {
 
 
 // ParseAdvertise parses the --cluster-advertise daemon config which accepts
 // ParseAdvertise parses the --cluster-advertise daemon config which accepts
 // <ip-address>:<port> or <interface-name>:<port>
 // <ip-address>:<port> or <interface-name>:<port>
-func ParseAdvertise(store, advertise string) (string, error) {
+func ParseAdvertise(advertise string) (string, error) {
 	var (
 	var (
 		iface *net.Interface
 		iface *net.Interface
 		addrs []net.Addr
 		addrs []net.Addr
@@ -93,7 +89,7 @@ func ParseAdvertise(store, advertise string) (string, error) {
 		return "", fmt.Errorf("couldnt find a valid ip-address in interface %s", advertise)
 		return "", fmt.Errorf("couldnt find a valid ip-address in interface %s", advertise)
 	}
 	}
 
 
-	addr = fmt.Sprintf("%s:%s", addr, port)
+	addr = net.JoinHostPort(addr, port)
 	return addr, nil
 	return addr, nil
 }
 }
 
 

+ 0 - 131
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/discovery_test.go

@@ -1,131 +0,0 @@
-package discovery
-
-import (
-	"testing"
-
-	"github.com/go-check/check"
-)
-
-// Hook up gocheck into the "go test" runner.
-func Test(t *testing.T) { check.TestingT(t) }
-
-type DiscoverySuite struct{}
-
-var _ = check.Suite(&DiscoverySuite{})
-
-func (s *DiscoverySuite) TestNewEntry(c *check.C) {
-	entry, err := NewEntry("127.0.0.1:2375")
-	c.Assert(err, check.IsNil)
-	c.Assert(entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true)
-	c.Assert(entry.String(), check.Equals, "127.0.0.1:2375")
-
-	_, err = NewEntry("127.0.0.1")
-	c.Assert(err, check.NotNil)
-}
-
-func (s *DiscoverySuite) TestParse(c *check.C) {
-	scheme, uri := parse("127.0.0.1:2375")
-	c.Assert(scheme, check.Equals, "nodes")
-	c.Assert(uri, check.Equals, "127.0.0.1:2375")
-
-	scheme, uri = parse("localhost:2375")
-	c.Assert(scheme, check.Equals, "nodes")
-	c.Assert(uri, check.Equals, "localhost:2375")
-
-	scheme, uri = parse("scheme://127.0.0.1:2375")
-	c.Assert(scheme, check.Equals, "scheme")
-	c.Assert(uri, check.Equals, "127.0.0.1:2375")
-
-	scheme, uri = parse("scheme://localhost:2375")
-	c.Assert(scheme, check.Equals, "scheme")
-	c.Assert(uri, check.Equals, "localhost:2375")
-
-	scheme, uri = parse("")
-	c.Assert(scheme, check.Equals, "nodes")
-	c.Assert(uri, check.Equals, "")
-}
-
-func (s *DiscoverySuite) TestCreateEntries(c *check.C) {
-	entries, err := CreateEntries(nil)
-	c.Assert(entries, check.DeepEquals, Entries{})
-	c.Assert(err, check.IsNil)
-
-	entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""})
-	c.Assert(err, check.IsNil)
-	expected := Entries{
-		&Entry{Host: "127.0.0.1", Port: "2375"},
-		&Entry{Host: "127.0.0.2", Port: "2375"},
-	}
-	c.Assert(entries.Equals(expected), check.Equals, true)
-
-	_, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"})
-	c.Assert(err, check.NotNil)
-}
-
-func (s *DiscoverySuite) TestContainsEntry(c *check.C) {
-	entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""})
-	c.Assert(err, check.IsNil)
-	c.Assert(entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true)
-	c.Assert(entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"}), check.Equals, false)
-}
-
-func (s *DiscoverySuite) TestEntriesEquality(c *check.C) {
-	entries := Entries{
-		&Entry{Host: "127.0.0.1", Port: "2375"},
-		&Entry{Host: "127.0.0.2", Port: "2375"},
-	}
-
-	// Same
-	c.Assert(entries.Equals(Entries{
-		&Entry{Host: "127.0.0.1", Port: "2375"},
-		&Entry{Host: "127.0.0.2", Port: "2375"},
-	}), check.
-		Equals, true)
-
-	// Different size
-	c.Assert(entries.Equals(Entries{
-		&Entry{Host: "127.0.0.1", Port: "2375"},
-		&Entry{Host: "127.0.0.2", Port: "2375"},
-		&Entry{Host: "127.0.0.3", Port: "2375"},
-	}), check.
-		Equals, false)
-
-	// Different content
-	c.Assert(entries.Equals(Entries{
-		&Entry{Host: "127.0.0.1", Port: "2375"},
-		&Entry{Host: "127.0.0.42", Port: "2375"},
-	}), check.
-		Equals, false)
-
-}
-
-func (s *DiscoverySuite) TestEntriesDiff(c *check.C) {
-	entry1 := &Entry{Host: "1.1.1.1", Port: "1111"}
-	entry2 := &Entry{Host: "2.2.2.2", Port: "2222"}
-	entry3 := &Entry{Host: "3.3.3.3", Port: "3333"}
-	entries := Entries{entry1, entry2}
-
-	// No diff
-	added, removed := entries.Diff(Entries{entry2, entry1})
-	c.Assert(added, check.HasLen, 0)
-	c.Assert(removed, check.HasLen, 0)
-
-	// Add
-	added, removed = entries.Diff(Entries{entry2, entry3, entry1})
-	c.Assert(added, check.HasLen, 1)
-	c.Assert(added.Contains(entry3), check.Equals, true)
-	c.Assert(removed, check.HasLen, 0)
-
-	// Remove
-	added, removed = entries.Diff(Entries{entry2})
-	c.Assert(added, check.HasLen, 0)
-	c.Assert(removed, check.HasLen, 1)
-	c.Assert(removed.Contains(entry1), check.Equals, true)
-
-	// Add and remove
-	added, removed = entries.Diff(Entries{entry1, entry3})
-	c.Assert(added, check.HasLen, 1)
-	c.Assert(added.Contains(entry3), check.Equals, true)
-	c.Assert(removed, check.HasLen, 1)
-	c.Assert(removed.Contains(entry2), check.Equals, true)
-}

+ 2 - 5
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/entry.go

@@ -1,9 +1,6 @@
 package discovery
 package discovery
 
 
-import (
-	"fmt"
-	"net"
-)
+import "net"
 
 
 // NewEntry creates a new entry.
 // NewEntry creates a new entry.
 func NewEntry(url string) (*Entry, error) {
 func NewEntry(url string) (*Entry, error) {
@@ -27,7 +24,7 @@ func (e *Entry) Equals(cmp *Entry) bool {
 
 
 // String returns the string form of an entry.
 // String returns the string form of an entry.
 func (e *Entry) String() string {
 func (e *Entry) String() string {
-	return fmt.Sprintf("%s:%s", e.Host, e.Port)
+	return net.JoinHostPort(e.Host, e.Port)
 }
 }
 
 
 // Entries is a list of *Entry with some helpers.
 // Entries is a list of *Entry with some helpers.

+ 0 - 109
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/file/file.go

@@ -1,109 +0,0 @@
-package file
-
-import (
-	"fmt"
-	"io/ioutil"
-	"strings"
-	"time"
-
-	"github.com/docker/docker/pkg/discovery"
-)
-
-// Discovery is exported
-type Discovery struct {
-	heartbeat time.Duration
-	path      string
-}
-
-func init() {
-	Init()
-}
-
-// Init is exported
-func Init() {
-	discovery.Register("file", &Discovery{})
-}
-
-// Initialize is exported
-func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error {
-	s.path = path
-	s.heartbeat = heartbeat
-	return nil
-}
-
-func parseFileContent(content []byte) []string {
-	var result []string
-	for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") {
-		line = strings.TrimSpace(line)
-		// Ignoring line starts with #
-		if strings.HasPrefix(line, "#") {
-			continue
-		}
-		// Inlined # comment also ignored.
-		if strings.Contains(line, "#") {
-			line = line[0:strings.Index(line, "#")]
-			// Trim additional spaces caused by above stripping.
-			line = strings.TrimSpace(line)
-		}
-		for _, ip := range discovery.Generate(line) {
-			result = append(result, ip)
-		}
-	}
-	return result
-}
-
-func (s *Discovery) fetch() (discovery.Entries, error) {
-	fileContent, err := ioutil.ReadFile(s.path)
-	if err != nil {
-		return nil, fmt.Errorf("failed to read '%s': %v", s.path, err)
-	}
-	return discovery.CreateEntries(parseFileContent(fileContent))
-}
-
-// Watch is exported
-func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
-	ch := make(chan discovery.Entries)
-	errCh := make(chan error)
-	ticker := time.NewTicker(s.heartbeat)
-
-	go func() {
-		defer close(errCh)
-		defer close(ch)
-
-		// Send the initial entries if available.
-		currentEntries, err := s.fetch()
-		if err != nil {
-			errCh <- err
-		} else {
-			ch <- currentEntries
-		}
-
-		// Periodically send updates.
-		for {
-			select {
-			case <-ticker.C:
-				newEntries, err := s.fetch()
-				if err != nil {
-					errCh <- err
-					continue
-				}
-
-				// Check if the file has really changed.
-				if !newEntries.Equals(currentEntries) {
-					ch <- newEntries
-				}
-				currentEntries = newEntries
-			case <-stopCh:
-				ticker.Stop()
-				return
-			}
-		}
-	}()
-
-	return ch, errCh
-}
-
-// Register is exported
-func (s *Discovery) Register(addr string) error {
-	return discovery.ErrNotImplemented
-}

+ 0 - 114
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/file/file_test.go

@@ -1,114 +0,0 @@
-package file
-
-import (
-	"io/ioutil"
-	"os"
-	"testing"
-
-	"github.com/docker/docker/pkg/discovery"
-
-	"github.com/go-check/check"
-)
-
-// Hook up gocheck into the "go test" runner.
-func Test(t *testing.T) { check.TestingT(t) }
-
-type DiscoverySuite struct{}
-
-var _ = check.Suite(&DiscoverySuite{})
-
-func (s *DiscoverySuite) TestInitialize(c *check.C) {
-	d := &Discovery{}
-	d.Initialize("/path/to/file", 1000, 0, nil)
-	c.Assert(d.path, check.Equals, "/path/to/file")
-}
-
-func (s *DiscoverySuite) TestNew(c *check.C) {
-	d, err := discovery.New("file:///path/to/file", 0, 0, nil)
-	c.Assert(err, check.IsNil)
-	c.Assert(d.(*Discovery).path, check.Equals, "/path/to/file")
-}
-
-func (s *DiscoverySuite) TestContent(c *check.C) {
-	data := `
-1.1.1.[1:2]:1111
-2.2.2.[2:4]:2222
-`
-	ips := parseFileContent([]byte(data))
-	c.Assert(ips, check.HasLen, 5)
-	c.Assert(ips[0], check.Equals, "1.1.1.1:1111")
-	c.Assert(ips[1], check.Equals, "1.1.1.2:1111")
-	c.Assert(ips[2], check.Equals, "2.2.2.2:2222")
-	c.Assert(ips[3], check.Equals, "2.2.2.3:2222")
-	c.Assert(ips[4], check.Equals, "2.2.2.4:2222")
-}
-
-func (s *DiscoverySuite) TestRegister(c *check.C) {
-	discovery := &Discovery{path: "/path/to/file"}
-	c.Assert(discovery.Register("0.0.0.0"), check.NotNil)
-}
-
-func (s *DiscoverySuite) TestParsingContentsWithComments(c *check.C) {
-	data := `
-### test ###
-1.1.1.1:1111 # inline comment
-# 2.2.2.2:2222
-      ### empty line with comment
-    3.3.3.3:3333
-### test ###
-`
-	ips := parseFileContent([]byte(data))
-	c.Assert(ips, check.HasLen, 2)
-	c.Assert("1.1.1.1:1111", check.Equals, ips[0])
-	c.Assert("3.3.3.3:3333", check.Equals, ips[1])
-}
-
-func (s *DiscoverySuite) TestWatch(c *check.C) {
-	data := `
-1.1.1.1:1111
-2.2.2.2:2222
-`
-	expected := discovery.Entries{
-		&discovery.Entry{Host: "1.1.1.1", Port: "1111"},
-		&discovery.Entry{Host: "2.2.2.2", Port: "2222"},
-	}
-
-	// Create a temporary file and remove it.
-	tmp, err := ioutil.TempFile(os.TempDir(), "discovery-file-test")
-	c.Assert(err, check.IsNil)
-	c.Assert(tmp.Close(), check.IsNil)
-	c.Assert(os.Remove(tmp.Name()), check.IsNil)
-
-	// Set up file discovery.
-	d := &Discovery{}
-	d.Initialize(tmp.Name(), 1000, 0, nil)
-	stopCh := make(chan struct{})
-	ch, errCh := d.Watch(stopCh)
-
-	// Make sure it fires errors since the file doesn't exist.
-	c.Assert(<-errCh, check.NotNil)
-	// We have to drain the error channel otherwise Watch will get stuck.
-	go func() {
-		for range errCh {
-		}
-	}()
-
-	// Write the file and make sure we get the expected value back.
-	c.Assert(ioutil.WriteFile(tmp.Name(), []byte(data), 0600), check.IsNil)
-	c.Assert(<-ch, check.DeepEquals, expected)
-
-	// Add a new entry and look it up.
-	expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"})
-	f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600)
-	c.Assert(err, check.IsNil)
-	c.Assert(f, check.NotNil)
-	_, err = f.WriteString("\n3.3.3.3:3333\n")
-	c.Assert(err, check.IsNil)
-	f.Close()
-	c.Assert(<-ch, check.DeepEquals, expected)
-
-	// Stop and make sure it closes all channels.
-	close(stopCh)
-	c.Assert(<-ch, check.IsNil)
-	c.Assert(<-errCh, check.IsNil)
-}

+ 0 - 53
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/generator_test.go

@@ -1,53 +0,0 @@
-package discovery
-
-import (
-	"github.com/go-check/check"
-)
-
-func (s *DiscoverySuite) TestGeneratorNotGenerate(c *check.C) {
-	ips := Generate("127.0.0.1")
-	c.Assert(len(ips), check.Equals, 1)
-	c.Assert(ips[0], check.Equals, "127.0.0.1")
-}
-
-func (s *DiscoverySuite) TestGeneratorWithPortNotGenerate(c *check.C) {
-	ips := Generate("127.0.0.1:8080")
-	c.Assert(len(ips), check.Equals, 1)
-	c.Assert(ips[0], check.Equals, "127.0.0.1:8080")
-}
-
-func (s *DiscoverySuite) TestGeneratorMatchFailedNotGenerate(c *check.C) {
-	ips := Generate("127.0.0.[1]")
-	c.Assert(len(ips), check.Equals, 1)
-	c.Assert(ips[0], check.Equals, "127.0.0.[1]")
-}
-
-func (s *DiscoverySuite) TestGeneratorWithPort(c *check.C) {
-	ips := Generate("127.0.0.[1:11]:2375")
-	c.Assert(len(ips), check.Equals, 11)
-	c.Assert(ips[0], check.Equals, "127.0.0.1:2375")
-	c.Assert(ips[1], check.Equals, "127.0.0.2:2375")
-	c.Assert(ips[2], check.Equals, "127.0.0.3:2375")
-	c.Assert(ips[3], check.Equals, "127.0.0.4:2375")
-	c.Assert(ips[4], check.Equals, "127.0.0.5:2375")
-	c.Assert(ips[5], check.Equals, "127.0.0.6:2375")
-	c.Assert(ips[6], check.Equals, "127.0.0.7:2375")
-	c.Assert(ips[7], check.Equals, "127.0.0.8:2375")
-	c.Assert(ips[8], check.Equals, "127.0.0.9:2375")
-	c.Assert(ips[9], check.Equals, "127.0.0.10:2375")
-	c.Assert(ips[10], check.Equals, "127.0.0.11:2375")
-}
-
-func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeStart(c *check.C) {
-	malformedInput := "127.0.0.[x:11]:2375"
-	ips := Generate(malformedInput)
-	c.Assert(len(ips), check.Equals, 1)
-	c.Assert(ips[0], check.Equals, malformedInput)
-}
-
-func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeEnd(c *check.C) {
-	malformedInput := "127.0.0.[1:x]:2375"
-	ips := Generate(malformedInput)
-	c.Assert(len(ips), check.Equals, 1)
-	c.Assert(ips[0], check.Equals, malformedInput)
-}

+ 21 - 3
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/kv/kv.go

@@ -8,7 +8,7 @@ import (
 
 
 	log "github.com/Sirupsen/logrus"
 	log "github.com/Sirupsen/logrus"
 	"github.com/docker/docker/pkg/discovery"
 	"github.com/docker/docker/pkg/discovery"
-	"github.com/docker/docker/pkg/tlsconfig"
+	"github.com/docker/go-connections/tlsconfig"
 	"github.com/docker/libkv"
 	"github.com/docker/libkv"
 	"github.com/docker/libkv/store"
 	"github.com/docker/libkv/store"
 	"github.com/docker/libkv/store/consul"
 	"github.com/docker/libkv/store/consul"
@@ -17,7 +17,7 @@ import (
 )
 )
 
 
 const (
 const (
-	discoveryPath = "docker/nodes"
+	defaultDiscoveryPath = "docker/nodes"
 )
 )
 
 
 // Discovery is exported
 // Discovery is exported
@@ -62,7 +62,14 @@ func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Du
 
 
 	s.heartbeat = heartbeat
 	s.heartbeat = heartbeat
 	s.ttl = ttl
 	s.ttl = ttl
-	s.path = path.Join(s.prefix, discoveryPath)
+
+	// Use a custom path if specified in discovery options
+	dpath := defaultDiscoveryPath
+	if clusterOpts["kv.path"] != "" {
+		dpath = clusterOpts["kv.path"]
+	}
+
+	s.path = path.Join(s.prefix, dpath)
 
 
 	var config *store.Config
 	var config *store.Config
 	if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" {
 	if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" {
@@ -138,6 +145,17 @@ func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-c
 		// Forever: Create a store watch, watch until we get an error and then try again.
 		// Forever: Create a store watch, watch until we get an error and then try again.
 		// Will only stop if we receive a stopCh request.
 		// Will only stop if we receive a stopCh request.
 		for {
 		for {
+			// Create the path to watch if it does not exist yet
+			exists, err := s.store.Exists(s.path)
+			if err != nil {
+				errCh <- err
+			}
+			if !exists {
+				if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil {
+					errCh <- err
+				}
+			}
+
 			// Set up a watch.
 			// Set up a watch.
 			watchCh, err := s.store.WatchTree(s.path, stopCh)
 			watchCh, err := s.store.WatchTree(s.path, stopCh)
 			if err != nil {
 			if err != nil {

+ 0 - 324
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/kv/kv_test.go

@@ -1,324 +0,0 @@
-package kv
-
-import (
-	"errors"
-	"io/ioutil"
-	"os"
-	"path"
-	"testing"
-	"time"
-
-	"github.com/docker/docker/pkg/discovery"
-	"github.com/docker/libkv"
-	"github.com/docker/libkv/store"
-
-	"github.com/go-check/check"
-)
-
-// Hook up gocheck into the "go test" runner.
-func Test(t *testing.T) { check.TestingT(t) }
-
-type DiscoverySuite struct{}
-
-var _ = check.Suite(&DiscoverySuite{})
-
-func (ds *DiscoverySuite) TestInitialize(c *check.C) {
-	storeMock := &FakeStore{
-		Endpoints: []string{"127.0.0.1"},
-	}
-	d := &Discovery{backend: store.CONSUL}
-	d.Initialize("127.0.0.1", 0, 0, nil)
-	d.store = storeMock
-
-	s := d.store.(*FakeStore)
-	c.Assert(s.Endpoints, check.HasLen, 1)
-	c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1")
-	c.Assert(d.path, check.Equals, discoveryPath)
-
-	storeMock = &FakeStore{
-		Endpoints: []string{"127.0.0.1:1234"},
-	}
-	d = &Discovery{backend: store.CONSUL}
-	d.Initialize("127.0.0.1:1234/path", 0, 0, nil)
-	d.store = storeMock
-
-	s = d.store.(*FakeStore)
-	c.Assert(s.Endpoints, check.HasLen, 1)
-	c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234")
-	c.Assert(d.path, check.Equals, "path/"+discoveryPath)
-
-	storeMock = &FakeStore{
-		Endpoints: []string{"127.0.0.1:1234", "127.0.0.2:1234", "127.0.0.3:1234"},
-	}
-	d = &Discovery{backend: store.CONSUL}
-	d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0, nil)
-	d.store = storeMock
-
-	s = d.store.(*FakeStore)
-	c.Assert(s.Endpoints, check.HasLen, 3)
-	c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234")
-	c.Assert(s.Endpoints[1], check.Equals, "127.0.0.2:1234")
-	c.Assert(s.Endpoints[2], check.Equals, "127.0.0.3:1234")
-
-	c.Assert(d.path, check.Equals, "path/"+discoveryPath)
-}
-
-// Extremely limited mock store so we can test initialization
-type Mock struct {
-	// Endpoints passed to InitializeMock
-	Endpoints []string
-
-	// Options passed to InitializeMock
-	Options *store.Config
-}
-
-func NewMock(endpoints []string, options *store.Config) (store.Store, error) {
-	s := &Mock{}
-	s.Endpoints = endpoints
-	s.Options = options
-	return s, nil
-}
-func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error {
-	return errors.New("Put not supported")
-}
-func (s *Mock) Get(key string) (*store.KVPair, error) {
-	return nil, errors.New("Get not supported")
-}
-func (s *Mock) Delete(key string) error {
-	return errors.New("Delete not supported")
-}
-
-// Exists mock
-func (s *Mock) Exists(key string) (bool, error) {
-	return false, errors.New("Exists not supported")
-}
-
-// Watch mock
-func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
-	return nil, errors.New("Watch not supported")
-}
-
-// WatchTree mock
-func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
-	return nil, errors.New("WatchTree not supported")
-}
-
-// NewLock mock
-func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
-	return nil, errors.New("NewLock not supported")
-}
-
-// List mock
-func (s *Mock) List(prefix string) ([]*store.KVPair, error) {
-	return nil, errors.New("List not supported")
-}
-
-// DeleteTree mock
-func (s *Mock) DeleteTree(prefix string) error {
-	return errors.New("DeleteTree not supported")
-}
-
-// AtomicPut mock
-func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) {
-	return false, nil, errors.New("AtomicPut not supported")
-}
-
-// AtomicDelete mock
-func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
-	return false, errors.New("AtomicDelete not supported")
-}
-
-// Close mock
-func (s *Mock) Close() {
-	return
-}
-
-func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) {
-	cert := `-----BEGIN CERTIFICATE-----
-MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT
-B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD
-VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC
-O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds
-+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q
-V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb
-UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55
-Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT
-V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/
-BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j
-BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz
-7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI
-xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M
-ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY
-8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn
-t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX
-FpTxDmJHEV4bzUzh
------END CERTIFICATE-----
-`
-	key := `-----BEGIN RSA PRIVATE KEY-----
-MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4
-+zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR
-SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr
-pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe
-rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj
-xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj
-i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx
-qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO
-1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5
-5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony
-MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0
-ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP
-L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N
-XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT
-Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B
-LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU
-t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+
-QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV
-xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj
-xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc
-qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa
-V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV
-PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk
-dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL
-BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I=
------END RSA PRIVATE KEY-----
-`
-	certFile, err := ioutil.TempFile("", "cert")
-	c.Assert(err, check.IsNil)
-	defer os.Remove(certFile.Name())
-	certFile.Write([]byte(cert))
-	certFile.Close()
-	keyFile, err := ioutil.TempFile("", "key")
-	c.Assert(err, check.IsNil)
-	defer os.Remove(keyFile.Name())
-	keyFile.Write([]byte(key))
-	keyFile.Close()
-
-	libkv.AddStore("mock", NewMock)
-	d := &Discovery{backend: "mock"}
-	err = d.Initialize("127.0.0.3:1234", 0, 0, map[string]string{
-		"kv.cacertfile": certFile.Name(),
-		"kv.certfile":   certFile.Name(),
-		"kv.keyfile":    keyFile.Name(),
-	})
-	c.Assert(err, check.IsNil)
-	s := d.store.(*Mock)
-	c.Assert(s.Options.TLS, check.NotNil)
-	c.Assert(s.Options.TLS.RootCAs, check.NotNil)
-	c.Assert(s.Options.TLS.Certificates, check.HasLen, 1)
-}
-
-func (ds *DiscoverySuite) TestWatch(c *check.C) {
-	mockCh := make(chan []*store.KVPair)
-
-	storeMock := &FakeStore{
-		Endpoints:  []string{"127.0.0.1:1234"},
-		mockKVChan: mockCh,
-	}
-
-	d := &Discovery{backend: store.CONSUL}
-	d.Initialize("127.0.0.1:1234/path", 0, 0, nil)
-	d.store = storeMock
-
-	expected := discovery.Entries{
-		&discovery.Entry{Host: "1.1.1.1", Port: "1111"},
-		&discovery.Entry{Host: "2.2.2.2", Port: "2222"},
-	}
-	kvs := []*store.KVPair{
-		{Key: path.Join("path", discoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")},
-		{Key: path.Join("path", discoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")},
-	}
-
-	stopCh := make(chan struct{})
-	ch, errCh := d.Watch(stopCh)
-
-	// It should fire an error since the first WatchTree call failed.
-	c.Assert(<-errCh, check.ErrorMatches, "test error")
-	// We have to drain the error channel otherwise Watch will get stuck.
-	go func() {
-		for range errCh {
-		}
-	}()
-
-	// Push the entries into the store channel and make sure discovery emits.
-	mockCh <- kvs
-	c.Assert(<-ch, check.DeepEquals, expected)
-
-	// Add a new entry.
-	expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"})
-	kvs = append(kvs, &store.KVPair{Key: path.Join("path", discoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")})
-	mockCh <- kvs
-	c.Assert(<-ch, check.DeepEquals, expected)
-
-	close(mockCh)
-	// Give it enough time to call WatchTree.
-	time.Sleep(3)
-
-	// Stop and make sure it closes all channels.
-	close(stopCh)
-	c.Assert(<-ch, check.IsNil)
-	c.Assert(<-errCh, check.IsNil)
-}
-
-// FakeStore implements store.Store methods. It mocks all store
-// function in a simple, naive way.
-type FakeStore struct {
-	Endpoints  []string
-	Options    *store.Config
-	mockKVChan <-chan []*store.KVPair
-
-	watchTreeCallCount int
-}
-
-func (s *FakeStore) Put(key string, value []byte, options *store.WriteOptions) error {
-	return nil
-}
-
-func (s *FakeStore) Get(key string) (*store.KVPair, error) {
-	return nil, nil
-}
-
-func (s *FakeStore) Delete(key string) error {
-	return nil
-}
-
-func (s *FakeStore) Exists(key string) (bool, error) {
-	return true, nil
-}
-
-func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
-	return nil, nil
-}
-
-// WatchTree will fail the first time, and return the mockKVchan afterwards.
-// This is the behavior we need for testing.. If we need 'moar', should update this.
-func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
-	if s.watchTreeCallCount == 0 {
-		s.watchTreeCallCount = 1
-		return nil, errors.New("test error")
-	}
-	// First calls error
-	return s.mockKVChan, nil
-}
-
-func (s *FakeStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
-	return nil, nil
-}
-
-func (s *FakeStore) List(directory string) ([]*store.KVPair, error) {
-	return []*store.KVPair{}, nil
-}
-
-func (s *FakeStore) DeleteTree(directory string) error {
-	return nil
-}
-
-func (s *FakeStore) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) {
-	return true, nil, nil
-}
-
-func (s *FakeStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
-	return true, nil
-}
-
-func (s *FakeStore) Close() {
-}

+ 0 - 54
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/nodes/nodes.go

@@ -1,54 +0,0 @@
-package nodes
-
-import (
-	"fmt"
-	"strings"
-	"time"
-
-	"github.com/docker/docker/pkg/discovery"
-)
-
-// Discovery is exported
-type Discovery struct {
-	entries discovery.Entries
-}
-
-func init() {
-	Init()
-}
-
-// Init is exported
-func Init() {
-	discovery.Register("nodes", &Discovery{})
-}
-
-// Initialize is exported
-func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error {
-	for _, input := range strings.Split(uris, ",") {
-		for _, ip := range discovery.Generate(input) {
-			entry, err := discovery.NewEntry(ip)
-			if err != nil {
-				return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error())
-			}
-			s.entries = append(s.entries, entry)
-		}
-	}
-
-	return nil
-}
-
-// Watch is exported
-func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
-	ch := make(chan discovery.Entries)
-	go func() {
-		defer close(ch)
-		ch <- s.entries
-		<-stopCh
-	}()
-	return ch, nil
-}
-
-// Register is exported
-func (s *Discovery) Register(addr string) error {
-	return discovery.ErrNotImplemented
-}

+ 0 - 51
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go

@@ -1,51 +0,0 @@
-package nodes
-
-import (
-	"testing"
-
-	"github.com/docker/docker/pkg/discovery"
-
-	"github.com/go-check/check"
-)
-
-// Hook up gocheck into the "go test" runner.
-func Test(t *testing.T) { check.TestingT(t) }
-
-type DiscoverySuite struct{}
-
-var _ = check.Suite(&DiscoverySuite{})
-
-func (s *DiscoverySuite) TestInitialize(c *check.C) {
-	d := &Discovery{}
-	d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil)
-	c.Assert(len(d.entries), check.Equals, 2)
-	c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111")
-	c.Assert(d.entries[1].String(), check.Equals, "2.2.2.2:2222")
-}
-
-func (s *DiscoverySuite) TestInitializeWithPattern(c *check.C) {
-	d := &Discovery{}
-	d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0, nil)
-	c.Assert(len(d.entries), check.Equals, 5)
-	c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111")
-	c.Assert(d.entries[1].String(), check.Equals, "1.1.1.2:1111")
-	c.Assert(d.entries[2].String(), check.Equals, "2.2.2.2:2222")
-	c.Assert(d.entries[3].String(), check.Equals, "2.2.2.3:2222")
-	c.Assert(d.entries[4].String(), check.Equals, "2.2.2.4:2222")
-}
-
-func (s *DiscoverySuite) TestWatch(c *check.C) {
-	d := &Discovery{}
-	d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil)
-	expected := discovery.Entries{
-		&discovery.Entry{Host: "1.1.1.1", Port: "1111"},
-		&discovery.Entry{Host: "2.2.2.2", Port: "2222"},
-	}
-	ch, _ := d.Watch(nil)
-	c.Assert(expected.Equals(<-ch), check.Equals, true)
-}
-
-func (s *DiscoverySuite) TestRegister(c *check.C) {
-	d := &Discovery{}
-	c.Assert(d.Register("0.0.0.0"), check.NotNil)
-}

+ 0 - 24
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/homedir/homedir_test.go

@@ -1,24 +0,0 @@
-package homedir
-
-import (
-	"path/filepath"
-	"testing"
-)
-
-func TestGet(t *testing.T) {
-	home := Get()
-	if home == "" {
-		t.Fatal("returned home directory is empty")
-	}
-
-	if !filepath.IsAbs(home) {
-		t.Fatalf("returned path is not absolute: %s", home)
-	}
-}
-
-func TestGetShortcutString(t *testing.T) {
-	shortcut := GetShortcutString()
-	if shortcut == "" {
-		t.Fatal("returned shortcut string is empty")
-	}
-}

+ 0 - 158
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/bytespipe_test.go

@@ -1,158 +0,0 @@
-package ioutils
-
-import (
-	"crypto/sha1"
-	"encoding/hex"
-	"math/rand"
-	"testing"
-	"time"
-)
-
-func TestBytesPipeRead(t *testing.T) {
-	buf := NewBytesPipe(nil)
-	buf.Write([]byte("12"))
-	buf.Write([]byte("34"))
-	buf.Write([]byte("56"))
-	buf.Write([]byte("78"))
-	buf.Write([]byte("90"))
-	rd := make([]byte, 4)
-	n, err := buf.Read(rd)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if n != 4 {
-		t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4)
-	}
-	if string(rd) != "1234" {
-		t.Fatalf("Read %s, but must be %s", rd, "1234")
-	}
-	n, err = buf.Read(rd)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if n != 4 {
-		t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4)
-	}
-	if string(rd) != "5678" {
-		t.Fatalf("Read %s, but must be %s", rd, "5679")
-	}
-	n, err = buf.Read(rd)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if n != 2 {
-		t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2)
-	}
-	if string(rd[:n]) != "90" {
-		t.Fatalf("Read %s, but must be %s", rd, "90")
-	}
-}
-
-func TestBytesPipeWrite(t *testing.T) {
-	buf := NewBytesPipe(nil)
-	buf.Write([]byte("12"))
-	buf.Write([]byte("34"))
-	buf.Write([]byte("56"))
-	buf.Write([]byte("78"))
-	buf.Write([]byte("90"))
-	if string(buf.buf[0]) != "1234567890" {
-		t.Fatalf("Buffer %s, must be %s", buf.buf, "1234567890")
-	}
-}
-
-// Write and read in different speeds/chunk sizes and check valid data is read.
-func TestBytesPipeWriteRandomChunks(t *testing.T) {
-	cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{
-		{100, 10, 1},
-		{1000, 10, 5},
-		{1000, 100, 0},
-		{1000, 5, 6},
-		{10000, 50, 25},
-	}
-
-	testMessage := []byte("this is a random string for testing")
-	// random slice sizes to read and write
-	writeChunks := []int{25, 35, 15, 20}
-	readChunks := []int{5, 45, 20, 25}
-
-	for _, c := range cases {
-		// first pass: write directly to hash
-		hash := sha1.New()
-		for i := 0; i < c.iterations*c.writesPerLoop; i++ {
-			if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil {
-				t.Fatal(err)
-			}
-		}
-		expected := hex.EncodeToString(hash.Sum(nil))
-
-		// write/read through buffer
-		buf := NewBytesPipe(nil)
-		hash.Reset()
-
-		done := make(chan struct{})
-
-		go func() {
-			// random delay before read starts
-			<-time.After(time.Duration(rand.Intn(10)) * time.Millisecond)
-			for i := 0; ; i++ {
-				p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)])
-				n, _ := buf.Read(p)
-				if n == 0 {
-					break
-				}
-				hash.Write(p[:n])
-			}
-
-			close(done)
-		}()
-
-		for i := 0; i < c.iterations; i++ {
-			for w := 0; w < c.writesPerLoop; w++ {
-				buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]])
-			}
-		}
-		buf.Close()
-		<-done
-
-		actual := hex.EncodeToString(hash.Sum(nil))
-
-		if expected != actual {
-			t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual)
-		}
-
-	}
-}
-
-func BenchmarkBytesPipeWrite(b *testing.B) {
-	for i := 0; i < b.N; i++ {
-		readBuf := make([]byte, 1024)
-		buf := NewBytesPipe(nil)
-		go func() {
-			var err error
-			for err == nil {
-				_, err = buf.Read(readBuf)
-			}
-		}()
-		for j := 0; j < 1000; j++ {
-			buf.Write([]byte("pretty short line, because why not?"))
-		}
-		buf.Close()
-	}
-}
-
-func BenchmarkBytesPipeRead(b *testing.B) {
-	rd := make([]byte, 512)
-	for i := 0; i < b.N; i++ {
-		b.StopTimer()
-		buf := NewBytesPipe(nil)
-		for j := 0; j < 500; j++ {
-			buf.Write(make([]byte, 1024))
-		}
-		b.StartTimer()
-		for j := 0; j < 1000; j++ {
-			if n, _ := buf.Read(rd); n != 512 {
-				b.Fatalf("Wrong number of bytes: %d", n)
-			}
-		}
-	}
-}

+ 0 - 17
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/fmt_test.go

@@ -1,17 +0,0 @@
-package ioutils
-
-import "testing"
-
-func TestFprintfIfNotEmpty(t *testing.T) {
-	wc := NewWriteCounter(&NopWriter{})
-	n, _ := FprintfIfNotEmpty(wc, "foo%s", "")
-
-	if wc.Count != 0 || n != 0 {
-		t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n)
-	}
-
-	n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar")
-	if wc.Count != 6 || n != 6 {
-		t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n)
-	}
-}

+ 0 - 149
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/multireader_test.go

@@ -1,149 +0,0 @@
-package ioutils
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"strings"
-	"testing"
-)
-
-func TestMultiReadSeekerReadAll(t *testing.T) {
-	str := "hello world"
-	s1 := strings.NewReader(str + " 1")
-	s2 := strings.NewReader(str + " 2")
-	s3 := strings.NewReader(str + " 3")
-	mr := MultiReadSeeker(s1, s2, s3)
-
-	expectedSize := int64(s1.Len() + s2.Len() + s3.Len())
-
-	b, err := ioutil.ReadAll(mr)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	expected := "hello world 1hello world 2hello world 3"
-	if string(b) != expected {
-		t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected)
-	}
-
-	size, err := mr.Seek(0, os.SEEK_END)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if size != expectedSize {
-		t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize)
-	}
-
-	// Reset the position and read again
-	pos, err := mr.Seek(0, os.SEEK_SET)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if pos != 0 {
-		t.Fatalf("expected position to be set to 0, got %d", pos)
-	}
-
-	b, err = ioutil.ReadAll(mr)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if string(b) != expected {
-		t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected)
-	}
-}
-
-func TestMultiReadSeekerReadEach(t *testing.T) {
-	str := "hello world"
-	s1 := strings.NewReader(str + " 1")
-	s2 := strings.NewReader(str + " 2")
-	s3 := strings.NewReader(str + " 3")
-	mr := MultiReadSeeker(s1, s2, s3)
-
-	var totalBytes int64
-	for i, s := range []*strings.Reader{s1, s2, s3} {
-		sLen := int64(s.Len())
-		buf := make([]byte, s.Len())
-		expected := []byte(fmt.Sprintf("%s %d", str, i+1))
-
-		if _, err := mr.Read(buf); err != nil && err != io.EOF {
-			t.Fatal(err)
-		}
-
-		if !bytes.Equal(buf, expected) {
-			t.Fatalf("expected %q to be %q", string(buf), string(expected))
-		}
-
-		pos, err := mr.Seek(0, os.SEEK_CUR)
-		if err != nil {
-			t.Fatalf("iteration: %d, error: %v", i+1, err)
-		}
-
-		// check that the total bytes read is the current position of the seeker
-		totalBytes += sLen
-		if pos != totalBytes {
-			t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1)
-		}
-
-		// This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well
-		newPos, err := mr.Seek(pos, os.SEEK_SET)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if newPos != pos {
-			t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos)
-		}
-	}
-}
-
-func TestMultiReadSeekerReadSpanningChunks(t *testing.T) {
-	str := "hello world"
-	s1 := strings.NewReader(str + " 1")
-	s2 := strings.NewReader(str + " 2")
-	s3 := strings.NewReader(str + " 3")
-	mr := MultiReadSeeker(s1, s2, s3)
-
-	buf := make([]byte, s1.Len()+3)
-	_, err := mr.Read(buf)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	// expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string
-	expected := "hello world 1hel"
-	if string(buf) != expected {
-		t.Fatalf("expected %s to be %s", string(buf), expected)
-	}
-}
-
-func TestMultiReadSeekerNegativeSeek(t *testing.T) {
-	str := "hello world"
-	s1 := strings.NewReader(str + " 1")
-	s2 := strings.NewReader(str + " 2")
-	s3 := strings.NewReader(str + " 3")
-	mr := MultiReadSeeker(s1, s2, s3)
-
-	s1Len := s1.Len()
-	s2Len := s2.Len()
-	s3Len := s3.Len()
-
-	s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if s != int64(s1Len+s2Len) {
-		t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len())
-	}
-
-	buf := make([]byte, s3Len)
-	if _, err := mr.Read(buf); err != nil && err != io.EOF {
-		t.Fatal(err)
-	}
-	expected := fmt.Sprintf("%s %d", str, 3)
-	if string(buf) != fmt.Sprintf("%s %d", str, 3) {
-		t.Fatalf("expected %q to be %q", string(buf), expected)
-	}
-}

+ 0 - 94
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers_test.go

@@ -1,94 +0,0 @@
-package ioutils
-
-import (
-	"fmt"
-	"io/ioutil"
-	"strings"
-	"testing"
-	"time"
-
-	"golang.org/x/net/context"
-)
-
-// Implement io.Reader
-type errorReader struct{}
-
-func (r *errorReader) Read(p []byte) (int, error) {
-	return 0, fmt.Errorf("Error reader always fail.")
-}
-
-func TestReadCloserWrapperClose(t *testing.T) {
-	reader := strings.NewReader("A string reader")
-	wrapper := NewReadCloserWrapper(reader, func() error {
-		return fmt.Errorf("This will be called when closing")
-	})
-	err := wrapper.Close()
-	if err == nil || !strings.Contains(err.Error(), "This will be called when closing") {
-		t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.")
-	}
-}
-
-func TestReaderErrWrapperReadOnError(t *testing.T) {
-	called := false
-	reader := &errorReader{}
-	wrapper := NewReaderErrWrapper(reader, func() {
-		called = true
-	})
-	_, err := wrapper.Read([]byte{})
-	if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") {
-		t.Fatalf("readErrWrapper should returned an error")
-	}
-	if !called {
-		t.Fatalf("readErrWrapper should have call the anonymous function on failure")
-	}
-}
-
-func TestReaderErrWrapperRead(t *testing.T) {
-	reader := strings.NewReader("a string reader.")
-	wrapper := NewReaderErrWrapper(reader, func() {
-		t.Fatalf("readErrWrapper should not have called the anonymous function")
-	})
-	// Read 20 byte (should be ok with the string above)
-	num, err := wrapper.Read(make([]byte, 20))
-	if err != nil {
-		t.Fatal(err)
-	}
-	if num != 16 {
-		t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num)
-	}
-}
-
-func TestHashData(t *testing.T) {
-	reader := strings.NewReader("hash-me")
-	actual, err := HashData(reader)
-	if err != nil {
-		t.Fatal(err)
-	}
-	expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa"
-	if actual != expected {
-		t.Fatalf("Expecting %s, got %s", expected, actual)
-	}
-}
-
-type perpetualReader struct{}
-
-func (p *perpetualReader) Read(buf []byte) (n int, err error) {
-	for i := 0; i != len(buf); i++ {
-		buf[i] = 'a'
-	}
-	return len(buf), nil
-}
-
-func TestCancelReadCloser(t *testing.T) {
-	ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
-	cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{}))
-	for {
-		var buf [128]byte
-		_, err := cancelReadCloser.Read(buf[:])
-		if err == context.DeadlineExceeded {
-			break
-		} else if err != nil {
-			t.Fatalf("got unexpected error: %v", err)
-		}
-	}
-}

+ 41 - 41
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writeflusher.go

@@ -1,9 +1,7 @@
 package ioutils
 package ioutils
 
 
 import (
 import (
-	"errors"
 	"io"
 	"io"
-	"net/http"
 	"sync"
 	"sync"
 )
 )
 
 
@@ -11,45 +9,43 @@ import (
 // is a flush. In addition, the Close method can be called to intercept
 // is a flush. In addition, the Close method can be called to intercept
 // Read/Write calls if the targets lifecycle has already ended.
 // Read/Write calls if the targets lifecycle has already ended.
 type WriteFlusher struct {
 type WriteFlusher struct {
-	mu      sync.Mutex
-	w       io.Writer
-	flusher http.Flusher
-	flushed bool
-	closed  error
+	w           io.Writer
+	flusher     flusher
+	flushed     chan struct{}
+	flushedOnce sync.Once
+	closed      chan struct{}
+	closeLock   sync.Mutex
+}
 
 
-	// TODO(stevvooe): Use channel for closed instead, remove mutex. Using a
-	// channel will allow one to properly order the operations.
+type flusher interface {
+	Flush()
 }
 }
 
 
-var errWriteFlusherClosed = errors.New("writeflusher: closed")
+var errWriteFlusherClosed = io.EOF
 
 
 func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
 func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
-	wf.mu.Lock()
-	defer wf.mu.Unlock()
-	if wf.closed != nil {
-		return 0, wf.closed
+	select {
+	case <-wf.closed:
+		return 0, errWriteFlusherClosed
+	default:
 	}
 	}
 
 
 	n, err = wf.w.Write(b)
 	n, err = wf.w.Write(b)
-	wf.flush() // every write is a flush.
+	wf.Flush() // every write is a flush.
 	return n, err
 	return n, err
 }
 }
 
 
 // Flush the stream immediately.
 // Flush the stream immediately.
 func (wf *WriteFlusher) Flush() {
 func (wf *WriteFlusher) Flush() {
-	wf.mu.Lock()
-	defer wf.mu.Unlock()
-
-	wf.flush()
-}
-
-// flush the stream immediately without taking a lock. Used internally.
-func (wf *WriteFlusher) flush() {
-	if wf.closed != nil {
+	select {
+	case <-wf.closed:
 		return
 		return
+	default:
 	}
 	}
 
 
-	wf.flushed = true
+	wf.flushedOnce.Do(func() {
+		close(wf.flushed)
+	})
 	wf.flusher.Flush()
 	wf.flusher.Flush()
 }
 }
 
 
@@ -59,34 +55,38 @@ func (wf *WriteFlusher) Flushed() bool {
 	// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
 	// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
 	// be used to detect whether or a response code has been issued or not.
 	// be used to detect whether or a response code has been issued or not.
 	// Another hook should be used instead.
 	// Another hook should be used instead.
-	wf.mu.Lock()
-	defer wf.mu.Unlock()
-
-	return wf.flushed
+	var flushed bool
+	select {
+	case <-wf.flushed:
+		flushed = true
+	default:
+	}
+	return flushed
 }
 }
 
 
 // Close closes the write flusher, disallowing any further writes to the
 // Close closes the write flusher, disallowing any further writes to the
 // target. After the flusher is closed, all calls to write or flush will
 // target. After the flusher is closed, all calls to write or flush will
 // result in an error.
 // result in an error.
 func (wf *WriteFlusher) Close() error {
 func (wf *WriteFlusher) Close() error {
-	wf.mu.Lock()
-	defer wf.mu.Unlock()
-
-	if wf.closed != nil {
-		return wf.closed
+	wf.closeLock.Lock()
+	defer wf.closeLock.Unlock()
+
+	select {
+	case <-wf.closed:
+		return errWriteFlusherClosed
+	default:
+		close(wf.closed)
 	}
 	}
-
-	wf.closed = errWriteFlusherClosed
 	return nil
 	return nil
 }
 }
 
 
 // NewWriteFlusher returns a new WriteFlusher.
 // NewWriteFlusher returns a new WriteFlusher.
 func NewWriteFlusher(w io.Writer) *WriteFlusher {
 func NewWriteFlusher(w io.Writer) *WriteFlusher {
-	var flusher http.Flusher
-	if f, ok := w.(http.Flusher); ok {
-		flusher = f
+	var fl flusher
+	if f, ok := w.(flusher); ok {
+		fl = f
 	} else {
 	} else {
-		flusher = &NopFlusher{}
+		fl = &NopFlusher{}
 	}
 	}
-	return &WriteFlusher{w: w, flusher: flusher}
+	return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
 }
 }

+ 0 - 65
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go

@@ -1,65 +0,0 @@
-package ioutils
-
-import (
-	"bytes"
-	"strings"
-	"testing"
-)
-
-func TestWriteCloserWrapperClose(t *testing.T) {
-	called := false
-	writer := bytes.NewBuffer([]byte{})
-	wrapper := NewWriteCloserWrapper(writer, func() error {
-		called = true
-		return nil
-	})
-	if err := wrapper.Close(); err != nil {
-		t.Fatal(err)
-	}
-	if !called {
-		t.Fatalf("writeCloserWrapper should have call the anonymous function.")
-	}
-}
-
-func TestNopWriteCloser(t *testing.T) {
-	writer := bytes.NewBuffer([]byte{})
-	wrapper := NopWriteCloser(writer)
-	if err := wrapper.Close(); err != nil {
-		t.Fatal("NopWriteCloser always return nil on Close.")
-	}
-
-}
-
-func TestNopWriter(t *testing.T) {
-	nw := &NopWriter{}
-	l, err := nw.Write([]byte{'c'})
-	if err != nil {
-		t.Fatal(err)
-	}
-	if l != 1 {
-		t.Fatalf("Expected 1 got %d", l)
-	}
-}
-
-func TestWriteCounter(t *testing.T) {
-	dummy1 := "This is a dummy string."
-	dummy2 := "This is another dummy string."
-	totalLength := int64(len(dummy1) + len(dummy2))
-
-	reader1 := strings.NewReader(dummy1)
-	reader2 := strings.NewReader(dummy2)
-
-	var buffer bytes.Buffer
-	wc := NewWriteCounter(&buffer)
-
-	reader1.WriteTo(wc)
-	reader2.WriteTo(wc)
-
-	if wc.Count != totalLength {
-		t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength)
-	}
-
-	if buffer.String() != dummy1+dummy2 {
-		t.Error("Wrong message written")
-	}
-}

+ 1 - 1
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE

@@ -1,4 +1,4 @@
-Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved.
+Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved.
 
 
 Redistribution and use in source and binary forms, with or without
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
 modification, are permitted provided that the following conditions are

+ 0 - 36
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/example/example.go

@@ -1,36 +0,0 @@
-package main
-
-import (
-	"fmt"
-
-	flag "github.com/docker/docker/pkg/mflag"
-)
-
-var (
-	i        int
-	str      string
-	b, b2, h bool
-)
-
-func init() {
-	flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp")
-	flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool")
-	flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool")
-	flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool")
-	flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer")
-	flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage
-	flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help")
-	flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3")
-	flag.Parse()
-}
-func main() {
-	if h {
-		flag.PrintDefaults()
-	} else {
-		fmt.Printf("s/#hidden/-string: %s\n", str)
-		fmt.Printf("b: %t\n", b)
-		fmt.Printf("-bool: %t\n", b2)
-		fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String())
-		fmt.Printf("ARGS: %v\n", flag.Args())
-	}
-}

+ 22 - 6
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go

@@ -1,4 +1,4 @@
-// Copyright 2014-2015 The Docker & Go Authors. All rights reserved.
+// Copyright 2014-2016 The Docker & Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 // license that can be found in the LICENSE file.
 
 
@@ -571,10 +571,7 @@ func (fs *FlagSet) PrintDefaults() {
 				format := "  -%s=%s"
 				format := "  -%s=%s"
 				fmt.Fprintf(writer, format, strings.Join(names, ", -"), val)
 				fmt.Fprintf(writer, format, strings.Join(names, ", -"), val)
 			}
 			}
-			for i, line := range strings.Split(flag.Usage, "\n") {
-				if i != 0 {
-					line = "  " + line
-				}
+			for _, line := range strings.Split(flag.Usage, "\n") {
 				fmt.Fprintln(writer, "\t", line)
 				fmt.Fprintln(writer, "\t", line)
 			}
 			}
 		}
 		}
@@ -1166,7 +1163,7 @@ func (fs *FlagSet) ReportError(str string, withHelp bool) {
 			str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'"
 			str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'"
 		}
 		}
 	}
 	}
-	fmt.Fprintf(fs.Out(), "docker: %s.\n", str)
+	fmt.Fprintf(fs.Out(), "%s: %s.\n", os.Args[0], str)
 }
 }
 
 
 // Parsed reports whether fs.Parse has been called.
 // Parsed reports whether fs.Parse has been called.
@@ -1226,11 +1223,27 @@ func (v mergeVal) IsBoolFlag() bool {
 	return false
 	return false
 }
 }
 
 
+// Name returns the name of a mergeVal.
+// If the original value had a name, return the original name,
+// otherwise, return the key asinged to this mergeVal.
+func (v mergeVal) Name() string {
+	type namedValue interface {
+		Name() string
+	}
+	if nVal, ok := v.Value.(namedValue); ok {
+		return nVal.Name()
+	}
+	return v.key
+}
+
 // Merge is an helper function that merges n FlagSets into a single dest FlagSet
 // Merge is an helper function that merges n FlagSets into a single dest FlagSet
 // In case of name collision between the flagsets it will apply
 // In case of name collision between the flagsets it will apply
 // the destination FlagSet's errorHandling behavior.
 // the destination FlagSet's errorHandling behavior.
 func Merge(dest *FlagSet, flagsets ...*FlagSet) error {
 func Merge(dest *FlagSet, flagsets ...*FlagSet) error {
 	for _, fset := range flagsets {
 	for _, fset := range flagsets {
+		if fset.formal == nil {
+			continue
+		}
 		for k, f := range fset.formal {
 		for k, f := range fset.formal {
 			if _, ok := dest.formal[k]; ok {
 			if _, ok := dest.formal[k]; ok {
 				var err error
 				var err error
@@ -1252,6 +1265,9 @@ func Merge(dest *FlagSet, flagsets ...*FlagSet) error {
 			}
 			}
 			newF := *f
 			newF := *f
 			newF.Value = mergeVal{f.Value, k, fset}
 			newF.Value = mergeVal{f.Value, k, fset}
+			if dest.formal == nil {
+				dest.formal = make(map[string]*Flag)
+			}
 			dest.formal[k] = &newF
 			dest.formal[k] = &newF
 		}
 		}
 	}
 	}

+ 0 - 516
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag_test.go

@@ -1,516 +0,0 @@
-// Copyright 2014-2015 The Docker & Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mflag
-
-import (
-	"bytes"
-	"fmt"
-	"os"
-	"sort"
-	"strings"
-	"testing"
-	"time"
-)
-
-// ResetForTesting clears all flag state and sets the usage function as directed.
-// After calling ResetForTesting, parse errors in flag handling will not
-// exit the program.
-func ResetForTesting(usage func()) {
-	CommandLine = NewFlagSet(os.Args[0], ContinueOnError)
-	Usage = usage
-}
-func boolString(s string) string {
-	if s == "0" {
-		return "false"
-	}
-	return "true"
-}
-
-func TestEverything(t *testing.T) {
-	ResetForTesting(nil)
-	Bool([]string{"test_bool"}, false, "bool value")
-	Int([]string{"test_int"}, 0, "int value")
-	Int64([]string{"test_int64"}, 0, "int64 value")
-	Uint([]string{"test_uint"}, 0, "uint value")
-	Uint64([]string{"test_uint64"}, 0, "uint64 value")
-	String([]string{"test_string"}, "0", "string value")
-	Float64([]string{"test_float64"}, 0, "float64 value")
-	Duration([]string{"test_duration"}, 0, "time.Duration value")
-
-	m := make(map[string]*Flag)
-	desired := "0"
-	visitor := func(f *Flag) {
-		for _, name := range f.Names {
-			if len(name) > 5 && name[0:5] == "test_" {
-				m[name] = f
-				ok := false
-				switch {
-				case f.Value.String() == desired:
-					ok = true
-				case name == "test_bool" && f.Value.String() == boolString(desired):
-					ok = true
-				case name == "test_duration" && f.Value.String() == desired+"s":
-					ok = true
-				}
-				if !ok {
-					t.Error("Visit: bad value", f.Value.String(), "for", name)
-				}
-			}
-		}
-	}
-	VisitAll(visitor)
-	if len(m) != 8 {
-		t.Error("VisitAll misses some flags")
-		for k, v := range m {
-			t.Log(k, *v)
-		}
-	}
-	m = make(map[string]*Flag)
-	Visit(visitor)
-	if len(m) != 0 {
-		t.Errorf("Visit sees unset flags")
-		for k, v := range m {
-			t.Log(k, *v)
-		}
-	}
-	// Now set all flags
-	Set("test_bool", "true")
-	Set("test_int", "1")
-	Set("test_int64", "1")
-	Set("test_uint", "1")
-	Set("test_uint64", "1")
-	Set("test_string", "1")
-	Set("test_float64", "1")
-	Set("test_duration", "1s")
-	desired = "1"
-	Visit(visitor)
-	if len(m) != 8 {
-		t.Error("Visit fails after set")
-		for k, v := range m {
-			t.Log(k, *v)
-		}
-	}
-	// Now test they're visited in sort order.
-	var flagNames []string
-	Visit(func(f *Flag) {
-		for _, name := range f.Names {
-			flagNames = append(flagNames, name)
-		}
-	})
-	if !sort.StringsAreSorted(flagNames) {
-		t.Errorf("flag names not sorted: %v", flagNames)
-	}
-}
-
-func TestGet(t *testing.T) {
-	ResetForTesting(nil)
-	Bool([]string{"test_bool"}, true, "bool value")
-	Int([]string{"test_int"}, 1, "int value")
-	Int64([]string{"test_int64"}, 2, "int64 value")
-	Uint([]string{"test_uint"}, 3, "uint value")
-	Uint64([]string{"test_uint64"}, 4, "uint64 value")
-	String([]string{"test_string"}, "5", "string value")
-	Float64([]string{"test_float64"}, 6, "float64 value")
-	Duration([]string{"test_duration"}, 7, "time.Duration value")
-
-	visitor := func(f *Flag) {
-		for _, name := range f.Names {
-			if len(name) > 5 && name[0:5] == "test_" {
-				g, ok := f.Value.(Getter)
-				if !ok {
-					t.Errorf("Visit: value does not satisfy Getter: %T", f.Value)
-					return
-				}
-				switch name {
-				case "test_bool":
-					ok = g.Get() == true
-				case "test_int":
-					ok = g.Get() == int(1)
-				case "test_int64":
-					ok = g.Get() == int64(2)
-				case "test_uint":
-					ok = g.Get() == uint(3)
-				case "test_uint64":
-					ok = g.Get() == uint64(4)
-				case "test_string":
-					ok = g.Get() == "5"
-				case "test_float64":
-					ok = g.Get() == float64(6)
-				case "test_duration":
-					ok = g.Get() == time.Duration(7)
-				}
-				if !ok {
-					t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name)
-				}
-			}
-		}
-	}
-	VisitAll(visitor)
-}
-
-func testParse(f *FlagSet, t *testing.T) {
-	if f.Parsed() {
-		t.Error("f.Parse() = true before Parse")
-	}
-	boolFlag := f.Bool([]string{"bool"}, false, "bool value")
-	bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value")
-	f.Bool([]string{"bool3"}, false, "bool3 value")
-	bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value")
-	intFlag := f.Int([]string{"-int"}, 0, "int value")
-	int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value")
-	uintFlag := f.Uint([]string{"uint"}, 0, "uint value")
-	uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value")
-	stringFlag := f.String([]string{"string"}, "0", "string value")
-	f.String([]string{"string2"}, "0", "string2 value")
-	singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value")
-	doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value")
-	mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value")
-	mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value")
-	nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value")
-	nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value")
-	float64Flag := f.Float64([]string{"float64"}, 0, "float64 value")
-	durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value")
-	extra := "one-extra-argument"
-	args := []string{
-		"-bool",
-		"-bool2=true",
-		"-bool4=false",
-		"--int", "22",
-		"--int64", "0x23",
-		"-uint", "24",
-		"--uint64", "25",
-		"-string", "hello",
-		"-squote='single'",
-		`-dquote="double"`,
-		`-mquote='mixed"`,
-		`-mquote2="mixed2'`,
-		`-nquote="'single nested'"`,
-		`-nquote2='"double nested"'`,
-		"-float64", "2718e28",
-		"-duration", "2m",
-		extra,
-	}
-	if err := f.Parse(args); err != nil {
-		t.Fatal(err)
-	}
-	if !f.Parsed() {
-		t.Error("f.Parse() = false after Parse")
-	}
-	if *boolFlag != true {
-		t.Error("bool flag should be true, is ", *boolFlag)
-	}
-	if *bool2Flag != true {
-		t.Error("bool2 flag should be true, is ", *bool2Flag)
-	}
-	if !f.IsSet("bool2") {
-		t.Error("bool2 should be marked as set")
-	}
-	if f.IsSet("bool3") {
-		t.Error("bool3 should not be marked as set")
-	}
-	if !f.IsSet("bool4") {
-		t.Error("bool4 should be marked as set")
-	}
-	if *bool4Flag != false {
-		t.Error("bool4 flag should be false, is ", *bool4Flag)
-	}
-	if *intFlag != 22 {
-		t.Error("int flag should be 22, is ", *intFlag)
-	}
-	if *int64Flag != 0x23 {
-		t.Error("int64 flag should be 0x23, is ", *int64Flag)
-	}
-	if *uintFlag != 24 {
-		t.Error("uint flag should be 24, is ", *uintFlag)
-	}
-	if *uint64Flag != 25 {
-		t.Error("uint64 flag should be 25, is ", *uint64Flag)
-	}
-	if *stringFlag != "hello" {
-		t.Error("string flag should be `hello`, is ", *stringFlag)
-	}
-	if !f.IsSet("string") {
-		t.Error("string flag should be marked as set")
-	}
-	if f.IsSet("string2") {
-		t.Error("string2 flag should not be marked as set")
-	}
-	if *singleQuoteFlag != "single" {
-		t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag)
-	}
-	if *doubleQuoteFlag != "double" {
-		t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag)
-	}
-	if *mixedQuoteFlag != `'mixed"` {
-		t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag)
-	}
-	if *mixed2QuoteFlag != `"mixed2'` {
-		t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag)
-	}
-	if *nestedQuoteFlag != "'single nested'" {
-		t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag)
-	}
-	if *nested2QuoteFlag != `"double nested"` {
-		t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag)
-	}
-	if *float64Flag != 2718e28 {
-		t.Error("float64 flag should be 2718e28, is ", *float64Flag)
-	}
-	if *durationFlag != 2*time.Minute {
-		t.Error("duration flag should be 2m, is ", *durationFlag)
-	}
-	if len(f.Args()) != 1 {
-		t.Error("expected one argument, got", len(f.Args()))
-	} else if f.Args()[0] != extra {
-		t.Errorf("expected argument %q got %q", extra, f.Args()[0])
-	}
-}
-
-func testPanic(f *FlagSet, t *testing.T) {
-	f.Int([]string{"-int"}, 0, "int value")
-	if f.Parsed() {
-		t.Error("f.Parse() = true before Parse")
-	}
-	args := []string{
-		"-int", "21",
-	}
-	f.Parse(args)
-}
-
-func TestParsePanic(t *testing.T) {
-	ResetForTesting(func() {})
-	testPanic(CommandLine, t)
-}
-
-func TestParse(t *testing.T) {
-	ResetForTesting(func() { t.Error("bad parse") })
-	testParse(CommandLine, t)
-}
-
-func TestFlagSetParse(t *testing.T) {
-	testParse(NewFlagSet("test", ContinueOnError), t)
-}
-
-// Declare a user-defined flag type.
-type flagVar []string
-
-func (f *flagVar) String() string {
-	return fmt.Sprint([]string(*f))
-}
-
-func (f *flagVar) Set(value string) error {
-	*f = append(*f, value)
-	return nil
-}
-
-func TestUserDefined(t *testing.T) {
-	var flags FlagSet
-	flags.Init("test", ContinueOnError)
-	var v flagVar
-	flags.Var(&v, []string{"v"}, "usage")
-	if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil {
-		t.Error(err)
-	}
-	if len(v) != 3 {
-		t.Fatal("expected 3 args; got ", len(v))
-	}
-	expect := "[1 2 3]"
-	if v.String() != expect {
-		t.Errorf("expected value %q got %q", expect, v.String())
-	}
-}
-
-// Declare a user-defined boolean flag type.
-type boolFlagVar struct {
-	count int
-}
-
-func (b *boolFlagVar) String() string {
-	return fmt.Sprintf("%d", b.count)
-}
-
-func (b *boolFlagVar) Set(value string) error {
-	if value == "true" {
-		b.count++
-	}
-	return nil
-}
-
-func (b *boolFlagVar) IsBoolFlag() bool {
-	return b.count < 4
-}
-
-func TestUserDefinedBool(t *testing.T) {
-	var flags FlagSet
-	flags.Init("test", ContinueOnError)
-	var b boolFlagVar
-	var err error
-	flags.Var(&b, []string{"b"}, "usage")
-	if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil {
-		if b.count < 4 {
-			t.Error(err)
-		}
-	}
-
-	if b.count != 4 {
-		t.Errorf("want: %d; got: %d", 4, b.count)
-	}
-
-	if err == nil {
-		t.Error("expected error; got none")
-	}
-}
-
-func TestSetOutput(t *testing.T) {
-	var flags FlagSet
-	var buf bytes.Buffer
-	flags.SetOutput(&buf)
-	flags.Init("test", ContinueOnError)
-	flags.Parse([]string{"-unknown"})
-	if out := buf.String(); !strings.Contains(out, "-unknown") {
-		t.Logf("expected output mentioning unknown; got %q", out)
-	}
-}
-
-// This tests that one can reset the flags. This still works but not well, and is
-// superseded by FlagSet.
-func TestChangingArgs(t *testing.T) {
-	ResetForTesting(func() { t.Fatal("bad parse") })
-	oldArgs := os.Args
-	defer func() { os.Args = oldArgs }()
-	os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"}
-	before := Bool([]string{"before"}, false, "")
-	if err := CommandLine.Parse(os.Args[1:]); err != nil {
-		t.Fatal(err)
-	}
-	cmd := Arg(0)
-	os.Args = Args()
-	after := Bool([]string{"after"}, false, "")
-	Parse()
-	args := Args()
-
-	if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" {
-		t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args)
-	}
-}
-
-// Test that -help invokes the usage message and returns ErrHelp.
-func TestHelp(t *testing.T) {
-	var helpCalled = false
-	fs := NewFlagSet("help test", ContinueOnError)
-	fs.Usage = func() { helpCalled = true }
-	var flag bool
-	fs.BoolVar(&flag, []string{"flag"}, false, "regular flag")
-	// Regular flag invocation should work
-	err := fs.Parse([]string{"-flag=true"})
-	if err != nil {
-		t.Fatal("expected no error; got ", err)
-	}
-	if !flag {
-		t.Error("flag was not set by -flag")
-	}
-	if helpCalled {
-		t.Error("help called for regular flag")
-		helpCalled = false // reset for next test
-	}
-	// Help flag should work as expected.
-	err = fs.Parse([]string{"-help"})
-	if err == nil {
-		t.Fatal("error expected")
-	}
-	if err != ErrHelp {
-		t.Fatal("expected ErrHelp; got ", err)
-	}
-	if !helpCalled {
-		t.Fatal("help was not called")
-	}
-	// If we define a help flag, that should override.
-	var help bool
-	fs.BoolVar(&help, []string{"help"}, false, "help flag")
-	helpCalled = false
-	err = fs.Parse([]string{"-help"})
-	if err != nil {
-		t.Fatal("expected no error for defined -help; got ", err)
-	}
-	if helpCalled {
-		t.Fatal("help was called; should not have been for defined help flag")
-	}
-}
-
-// Test the flag count functions.
-func TestFlagCounts(t *testing.T) {
-	fs := NewFlagSet("help test", ContinueOnError)
-	var flag bool
-	fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag")
-	fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag")
-	fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag")
-	fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag")
-	fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag")
-	fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag")
-
-	if fs.FlagCount() != 6 {
-		t.Fatal("FlagCount wrong. ", fs.FlagCount())
-	}
-	if fs.FlagCountUndeprecated() != 4 {
-		t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated())
-	}
-	if fs.NFlag() != 0 {
-		t.Fatal("NFlag wrong. ", fs.NFlag())
-	}
-	err := fs.Parse([]string{"-fd", "-g", "-flag4"})
-	if err != nil {
-		t.Fatal("expected no error for defined -help; got ", err)
-	}
-	if fs.NFlag() != 4 {
-		t.Fatal("NFlag wrong. ", fs.NFlag())
-	}
-}
-
-// Show up bug in sortFlags
-func TestSortFlags(t *testing.T) {
-	fs := NewFlagSet("help TestSortFlags", ContinueOnError)
-
-	var err error
-
-	var b bool
-	fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage")
-
-	err = fs.Parse([]string{"--banana=true"})
-	if err != nil {
-		t.Fatal("expected no error; got ", err)
-	}
-
-	count := 0
-
-	fs.VisitAll(func(flag *Flag) {
-		count++
-		if flag == nil {
-			t.Fatal("VisitAll should not return a nil flag")
-		}
-	})
-	flagcount := fs.FlagCount()
-	if flagcount != count {
-		t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count)
-	}
-	// Make sure its idempotent
-	if flagcount != fs.FlagCount() {
-		t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount())
-	}
-
-	count = 0
-	fs.Visit(func(flag *Flag) {
-		count++
-		if flag == nil {
-			t.Fatal("Visit should not return a nil flag")
-		}
-	})
-	nflag := fs.NFlag()
-	if nflag != count {
-		t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count)
-	}
-	if nflag != fs.NFlag() {
-		t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag())
-	}
-}

+ 0 - 137
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount_test.go

@@ -1,137 +0,0 @@
-package mount
-
-import (
-	"os"
-	"path"
-	"testing"
-)
-
-func TestMountOptionsParsing(t *testing.T) {
-	options := "noatime,ro,size=10k"
-
-	flag, data := parseOptions(options)
-
-	if data != "size=10k" {
-		t.Fatalf("Expected size=10 got %s", data)
-	}
-
-	expectedFlag := NOATIME | RDONLY
-
-	if flag != expectedFlag {
-		t.Fatalf("Expected %d got %d", expectedFlag, flag)
-	}
-}
-
-func TestMounted(t *testing.T) {
-	tmp := path.Join(os.TempDir(), "mount-tests")
-	if err := os.MkdirAll(tmp, 0777); err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmp)
-
-	var (
-		sourceDir  = path.Join(tmp, "source")
-		targetDir  = path.Join(tmp, "target")
-		sourcePath = path.Join(sourceDir, "file.txt")
-		targetPath = path.Join(targetDir, "file.txt")
-	)
-
-	os.Mkdir(sourceDir, 0777)
-	os.Mkdir(targetDir, 0777)
-
-	f, err := os.Create(sourcePath)
-	if err != nil {
-		t.Fatal(err)
-	}
-	f.WriteString("hello")
-	f.Close()
-
-	f, err = os.Create(targetPath)
-	if err != nil {
-		t.Fatal(err)
-	}
-	f.Close()
-
-	if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(targetDir); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	mounted, err := Mounted(targetDir)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if !mounted {
-		t.Fatalf("Expected %s to be mounted", targetDir)
-	}
-	if _, err := os.Stat(targetDir); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestMountReadonly(t *testing.T) {
-	tmp := path.Join(os.TempDir(), "mount-tests")
-	if err := os.MkdirAll(tmp, 0777); err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmp)
-
-	var (
-		sourceDir  = path.Join(tmp, "source")
-		targetDir  = path.Join(tmp, "target")
-		sourcePath = path.Join(sourceDir, "file.txt")
-		targetPath = path.Join(targetDir, "file.txt")
-	)
-
-	os.Mkdir(sourceDir, 0777)
-	os.Mkdir(targetDir, 0777)
-
-	f, err := os.Create(sourcePath)
-	if err != nil {
-		t.Fatal(err)
-	}
-	f.WriteString("hello")
-	f.Close()
-
-	f, err = os.Create(targetPath)
-	if err != nil {
-		t.Fatal(err)
-	}
-	f.Close()
-
-	if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(targetDir); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	f, err = os.OpenFile(targetPath, os.O_RDWR, 0777)
-	if err == nil {
-		t.Fatal("Should not be able to open a ro file as rw")
-	}
-}
-
-func TestGetMounts(t *testing.T) {
-	mounts, err := GetMounts()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	root := false
-	for _, entry := range mounts {
-		if entry.Mountpoint == "/" {
-			root = true
-		}
-	}
-
-	if !root {
-		t.Fatal("/ should be mounted at least")
-	}
-}

+ 0 - 477
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go

@@ -1,477 +0,0 @@
-// +build linux
-
-package mount
-
-import (
-	"bytes"
-	"testing"
-)
-
-const (
-	fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw
-    16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel
-    17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755
-    18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw
-    19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw
-    20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel
-    21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000
-    22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755
-    23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755
-    24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
-    25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw
-    26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children
-    27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children
-    28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children
-    29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children
-    30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children
-    31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children
-    32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children
-    33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children
-    34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children
-    35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered
-    36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct
-    37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel
-    38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel
-    39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel
-    40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw
-    41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw
-    42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw
-    43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw
-    45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered
-    46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered
-    47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered
-    48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered
-    121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000
-    124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw
-    165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered
-    167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered
-    171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered
-    175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered
-    179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered
-    183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered
-    187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered
-    191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered
-    195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered
-    199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered
-    203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered
-    207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered
-    211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered
-    215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered
-    219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered
-    223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered
-    227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered
-    231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered
-    235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered
-    239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered
-    243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered
-    247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered
-    31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1`
-
-	ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
-16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
-17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755
-18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
-19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755
-20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered
-21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755
-22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw
-23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw
-24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw
-25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k
-26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children
-27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw
-28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu
-29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755
-30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw
-31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct
-32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory
-33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices
-34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer
-35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio
-36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event
-37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb
-38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd
-39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525
-40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525
-41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525
-42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525
-43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525
-44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525
-45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525
-46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525
-47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525
-48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525
-49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525
-50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525
-51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525
-52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525
-53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525
-54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525
-55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525
-56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525
-57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525
-58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525
-59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525
-60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525
-61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525
-62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525
-63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525
-64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525
-65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525
-66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525
-67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525
-68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525
-69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525
-70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525
-71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525
-72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525
-73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525
-74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525
-75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525
-76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525
-77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525
-78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525
-79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525
-80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525
-81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525
-82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525
-83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525
-84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525
-85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525
-86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525
-87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525
-88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525
-89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525
-90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525
-91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525
-92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525
-93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525
-94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525
-95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525
-96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525
-97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525
-98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525
-99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525
-100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525
-101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525
-102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525
-103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525
-104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525
-105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525
-106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525
-107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525
-108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525
-109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525
-110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525
-111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525
-112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525
-113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525
-114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525
-115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525
-116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525
-117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525
-118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525
-119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525
-120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525
-121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525
-122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525
-123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525
-124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525
-125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525
-126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525
-127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525
-128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525
-129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525
-130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525
-131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525
-132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525
-133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525
-134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525
-135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525
-136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525
-137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525
-138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525
-139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525
-140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525
-141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525
-142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525
-143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525
-144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525`
-
-	gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
-16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
-17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755
-18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755
-19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw
-20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
-21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw
-22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
-23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw
-24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755
-25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc
-26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children
-27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children
-28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children
-29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children
-30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children
-31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children
-32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children
-33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro
-34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota
-35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw
-36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw
-42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw
-43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw
-44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000
-68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c
-85 68 8:6 /var/lib/docker/init/dockerinit-0.7.2-dev//deleted /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerinit rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
-86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
-87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
-88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
-89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
-38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c
-39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c
-40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c
-41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c
-45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c
-46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c
-47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c
-48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c
-49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c
-50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c
-51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c
-52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c
-53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c
-54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c
-55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c
-56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c
-57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c
-59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c
-60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c
-61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c
-62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c
-63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c
-64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c
-65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c
-66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c
-70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c
-71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c
-72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c
-73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c
-76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c
-77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c
-78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c
-79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c
-80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c
-81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c
-82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c
-83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c
-84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c
-94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c
-95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c
-96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c
-97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c
-98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c
-102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c
-103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c
-104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c
-105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c
-106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c
-107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c
-108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c
-109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c
-110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c
-111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c
-112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c
-113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c
-114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c
-117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c
-118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c
-119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c
-120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c
-121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c
-122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c
-123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c
-126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c
-127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c
-128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c
-130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c
-131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c
-132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c
-133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c
-134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c
-135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c
-136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c
-137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c
-138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c
-139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c
-140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c
-141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c
-142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c
-143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c
-144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c
-147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c
-150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c
-151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c
-152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c
-153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c
-154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c
-155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c
-156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c
-157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c
-158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c
-159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c
-160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c
-162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c
-163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c
-164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c
-165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c
-166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c
-167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c
-168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c
-169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c
-170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c
-171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c
-172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c
-173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c
-174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c
-184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c
-187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c
-188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c
-189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c
-190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c
-191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c
-192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c
-193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c
-194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c
-195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c
-196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c
-197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c
-198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c
-199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c
-200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c
-201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c
-202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c
-203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c
-204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c
-205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c
-206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c
-207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c
-208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c
-209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c
-210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c
-211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c
-212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c
-213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c
-214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c
-215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c
-216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c
-217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c
-218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c
-219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c
-220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c
-221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c
-222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c
-223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c
-224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c
-225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c
-226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c
-227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c
-228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c
-229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c
-230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c
-231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c
-232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c
-233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c
-234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c
-235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c
-237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c
-238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c
-239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c
-240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c
-241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c
-242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c
-243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c
-244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c
-245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c
-246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c
-247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c
-249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c
-250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c
-251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c
-252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c
-253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c
-254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c
-255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c
-256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c
-257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c
-259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c
-260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c
-261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c
-262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c
-263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c
-264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c
-58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c
-67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c
-265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c
-270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c
-273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c
-278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c
-281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c
-286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c
-289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c
-99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096`
-)
-
-func TestParseFedoraMountinfo(t *testing.T) {
-	r := bytes.NewBuffer([]byte(fedoraMountinfo))
-	_, err := parseInfoFile(r)
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestParseUbuntuMountinfo(t *testing.T) {
-	r := bytes.NewBuffer([]byte(ubuntuMountInfo))
-	_, err := parseInfoFile(r)
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestParseGentooMountinfo(t *testing.T) {
-	r := bytes.NewBuffer([]byte(gentooMountinfo))
-	_, err := parseInfoFile(r)
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestParseFedoraMountinfoFields(t *testing.T) {
-	r := bytes.NewBuffer([]byte(fedoraMountinfo))
-	infos, err := parseInfoFile(r)
-	if err != nil {
-		t.Fatal(err)
-	}
-	expectedLength := 58
-	if len(infos) != expectedLength {
-		t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos))
-	}
-	mi := Info{
-		ID:         15,
-		Parent:     35,
-		Major:      0,
-		Minor:      3,
-		Root:       "/",
-		Mountpoint: "/proc",
-		Opts:       "rw,nosuid,nodev,noexec,relatime",
-		Optional:   "shared:5",
-		Fstype:     "proc",
-		Source:     "proc",
-		VfsOpts:    "rw",
-	}
-
-	if *infos[0] != mi {
-		t.Fatalf("expected %#v, got %#v", mi, infos[0])
-	}
-}

+ 1 - 1
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go

@@ -1,4 +1,4 @@
-// +build !linux,!freebsd freebsd,!cgo
+// +build !windows,!linux,!freebsd freebsd,!cgo
 
 
 package mount
 package mount
 
 

+ 6 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_windows.go

@@ -0,0 +1,6 @@
+package mount
+
+func parseMountTable() ([]*Info, error) {
+	// Do NOT return an error!
+	return nil, nil
+}

+ 1 - 2
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go

@@ -61,8 +61,7 @@ func ensureMountedAs(mountPoint, options string) error {
 			return err
 			return err
 		}
 		}
 	}
 	}
-	mounted, err = Mounted(mountPoint)
-	if err != nil {
+	if _, err = Mounted(mountPoint); err != nil {
 		return err
 		return err
 	}
 	}
 
 

+ 0 - 331
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go

@@ -1,331 +0,0 @@
-// +build linux
-
-package mount
-
-import (
-	"os"
-	"path"
-	"syscall"
-	"testing"
-)
-
-// nothing is propagated in or out
-func TestSubtreePrivate(t *testing.T) {
-	tmp := path.Join(os.TempDir(), "mount-tests")
-	if err := os.MkdirAll(tmp, 0777); err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmp)
-
-	var (
-		sourceDir   = path.Join(tmp, "source")
-		targetDir   = path.Join(tmp, "target")
-		outside1Dir = path.Join(tmp, "outside1")
-		outside2Dir = path.Join(tmp, "outside2")
-
-		outside1Path      = path.Join(outside1Dir, "file.txt")
-		outside2Path      = path.Join(outside2Dir, "file.txt")
-		outside1CheckPath = path.Join(targetDir, "a", "file.txt")
-		outside2CheckPath = path.Join(sourceDir, "b", "file.txt")
-	)
-	if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.Mkdir(targetDir, 0777); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.Mkdir(outside1Dir, 0777); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.Mkdir(outside2Dir, 0777); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := createFile(outside1Path); err != nil {
-		t.Fatal(err)
-	}
-	if err := createFile(outside2Path); err != nil {
-		t.Fatal(err)
-	}
-
-	// mount the shared directory to a target
-	if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(targetDir); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// next, make the target private
-	if err := MakePrivate(targetDir); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(targetDir); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// mount in an outside path to a mounted path inside the _source_
-	if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(path.Join(sourceDir, "a")); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// check that this file _does_not_ show in the _target_
-	if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) {
-		t.Fatal(err)
-	} else if err == nil {
-		t.Fatalf("%q should not be visible, but is", outside1CheckPath)
-	}
-
-	// next mount outside2Dir into the _target_
-	if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(path.Join(targetDir, "b")); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// check that this file _does_not_ show in the _source_
-	if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) {
-		t.Fatal(err)
-	} else if err == nil {
-		t.Fatalf("%q should not be visible, but is", outside2CheckPath)
-	}
-}
-
-// Testing that when a target is a shared mount,
-// then child mounts propagate to the source
-func TestSubtreeShared(t *testing.T) {
-	tmp := path.Join(os.TempDir(), "mount-tests")
-	if err := os.MkdirAll(tmp, 0777); err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmp)
-
-	var (
-		sourceDir  = path.Join(tmp, "source")
-		targetDir  = path.Join(tmp, "target")
-		outsideDir = path.Join(tmp, "outside")
-
-		outsidePath     = path.Join(outsideDir, "file.txt")
-		sourceCheckPath = path.Join(sourceDir, "a", "file.txt")
-	)
-
-	if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.Mkdir(targetDir, 0777); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.Mkdir(outsideDir, 0777); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := createFile(outsidePath); err != nil {
-		t.Fatal(err)
-	}
-
-	// mount the source as shared
-	if err := MakeShared(sourceDir); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(sourceDir); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// mount the shared directory to a target
-	if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(targetDir); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// mount in an outside path to a mounted path inside the target
-	if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(path.Join(targetDir, "a")); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// NOW, check that the file from the outside directory is available in the source directory
-	if _, err := os.Stat(sourceCheckPath); err != nil {
-		t.Fatal(err)
-	}
-}
-
-// testing that mounts to a shared source show up in the slave target,
-// and that mounts into a slave target do _not_ show up in the shared source
-func TestSubtreeSharedSlave(t *testing.T) {
-	tmp := path.Join(os.TempDir(), "mount-tests")
-	if err := os.MkdirAll(tmp, 0777); err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmp)
-
-	var (
-		sourceDir   = path.Join(tmp, "source")
-		targetDir   = path.Join(tmp, "target")
-		outside1Dir = path.Join(tmp, "outside1")
-		outside2Dir = path.Join(tmp, "outside2")
-
-		outside1Path      = path.Join(outside1Dir, "file.txt")
-		outside2Path      = path.Join(outside2Dir, "file.txt")
-		outside1CheckPath = path.Join(targetDir, "a", "file.txt")
-		outside2CheckPath = path.Join(sourceDir, "b", "file.txt")
-	)
-	if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.Mkdir(targetDir, 0777); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.Mkdir(outside1Dir, 0777); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.Mkdir(outside2Dir, 0777); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := createFile(outside1Path); err != nil {
-		t.Fatal(err)
-	}
-	if err := createFile(outside2Path); err != nil {
-		t.Fatal(err)
-	}
-
-	// mount the source as shared
-	if err := MakeShared(sourceDir); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(sourceDir); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// mount the shared directory to a target
-	if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(targetDir); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// next, make the target slave
-	if err := MakeSlave(targetDir); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(targetDir); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// mount in an outside path to a mounted path inside the _source_
-	if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(path.Join(sourceDir, "a")); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// check that this file _does_ show in the _target_
-	if _, err := os.Stat(outside1CheckPath); err != nil {
-		t.Fatal(err)
-	}
-
-	// next mount outside2Dir into the _target_
-	if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(path.Join(targetDir, "b")); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// check that this file _does_not_ show in the _source_
-	if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) {
-		t.Fatal(err)
-	} else if err == nil {
-		t.Fatalf("%q should not be visible, but is", outside2CheckPath)
-	}
-}
-
-func TestSubtreeUnbindable(t *testing.T) {
-	tmp := path.Join(os.TempDir(), "mount-tests")
-	if err := os.MkdirAll(tmp, 0777); err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmp)
-
-	var (
-		sourceDir = path.Join(tmp, "source")
-		targetDir = path.Join(tmp, "target")
-	)
-	if err := os.MkdirAll(sourceDir, 0777); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.MkdirAll(targetDir, 0777); err != nil {
-		t.Fatal(err)
-	}
-
-	// next, make the source unbindable
-	if err := MakeUnbindable(sourceDir); err != nil {
-		t.Fatal(err)
-	}
-	defer func() {
-		if err := Unmount(sourceDir); err != nil {
-			t.Fatal(err)
-		}
-	}()
-
-	// then attempt to mount it to target. It should fail
-	if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL {
-		t.Fatal(err)
-	} else if err == nil {
-		t.Fatalf("%q should not have been bindable", sourceDir)
-	}
-	defer func() {
-		if err := Unmount(targetDir); err != nil {
-			t.Fatal(err)
-		}
-	}()
-}
-
-func createFile(path string) error {
-	f, err := os.Create(path)
-	if err != nil {
-		return err
-	}
-	f.WriteString("hello world!")
-	return f.Close()
-}

+ 0 - 96
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go

@@ -1,96 +0,0 @@
-// +build !windows
-
-package kernel
-
-import (
-	"fmt"
-	"testing"
-)
-
-func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) {
-	var (
-		a *VersionInfo
-	)
-	a, _ = ParseRelease(release)
-
-	if r := CompareKernelVersion(*a, *b); r != result {
-		t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result)
-	}
-	if a.Flavor != b.Flavor {
-		t.Fatalf("Unexpected parsed kernel flavor.  Found %s, expected %s", a.Flavor, b.Flavor)
-	}
-}
-
-// TestParseRelease tests the ParseRelease() function
-func TestParseRelease(t *testing.T) {
-	assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0)
-	assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0)
-	assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0)
-	assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0)
-	assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0)
-	assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0)
-	assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1)
-	// Errors
-	invalids := []string{
-		"3",
-		"a",
-		"a.a",
-		"a.a.a-a",
-	}
-	for _, invalid := range invalids {
-		expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid)
-		if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage {
-
-		}
-	}
-}
-
-func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) {
-	if r := CompareKernelVersion(a, b); r != result {
-		t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
-	}
-}
-
-// TestCompareKernelVersion tests the CompareKernelVersion() function
-func TestCompareKernelVersion(t *testing.T) {
-	assertKernelVersion(t,
-		VersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		VersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		0)
-	assertKernelVersion(t,
-		VersionInfo{Kernel: 2, Major: 6, Minor: 0},
-		VersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		-1)
-	assertKernelVersion(t,
-		VersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		VersionInfo{Kernel: 2, Major: 6, Minor: 0},
-		1)
-	assertKernelVersion(t,
-		VersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		VersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		0)
-	assertKernelVersion(t,
-		VersionInfo{Kernel: 3, Major: 8, Minor: 5},
-		VersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		1)
-	assertKernelVersion(t,
-		VersionInfo{Kernel: 3, Major: 0, Minor: 20},
-		VersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		-1)
-	assertKernelVersion(t,
-		VersionInfo{Kernel: 3, Major: 7, Minor: 20},
-		VersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		-1)
-	assertKernelVersion(t,
-		VersionInfo{Kernel: 3, Major: 8, Minor: 20},
-		VersionInfo{Kernel: 3, Major: 7, Minor: 0},
-		1)
-	assertKernelVersion(t,
-		VersionInfo{Kernel: 3, Major: 8, Minor: 20},
-		VersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		1)
-	assertKernelVersion(t,
-		VersionInfo{Kernel: 3, Major: 8, Minor: 0},
-		VersionInfo{Kernel: 3, Major: 8, Minor: 20},
-		-1)
-}

+ 49 - 27
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/client.go

@@ -3,21 +3,20 @@ package plugins
 import (
 import (
 	"bytes"
 	"bytes"
 	"encoding/json"
 	"encoding/json"
-	"fmt"
 	"io"
 	"io"
 	"io/ioutil"
 	"io/ioutil"
 	"net/http"
 	"net/http"
-	"strings"
+	"net/url"
 	"time"
 	"time"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/pkg/sockets"
-	"github.com/docker/docker/pkg/tlsconfig"
+	"github.com/docker/docker/pkg/plugins/transport"
+	"github.com/docker/go-connections/sockets"
+	"github.com/docker/go-connections/tlsconfig"
 )
 )
 
 
 const (
 const (
-	versionMimetype = "application/vnd.docker.plugins.v1.1+json"
-	defaultTimeOut  = 30
+	defaultTimeOut = 30
 )
 )
 
 
 // NewClient creates a new plugin client (http).
 // NewClient creates a new plugin client (http).
@@ -30,21 +29,38 @@ func NewClient(addr string, tlsConfig tlsconfig.Options) (*Client, error) {
 	}
 	}
 	tr.TLSClientConfig = c
 	tr.TLSClientConfig = c
 
 
-	protoAndAddr := strings.Split(addr, "://")
-	sockets.ConfigureTCPTransport(tr, protoAndAddr[0], protoAndAddr[1])
+	u, err := url.Parse(addr)
+	if err != nil {
+		return nil, err
+	}
+	socket := u.Host
+	if socket == "" {
+		// valid local socket addresses have the host empty.
+		socket = u.Path
+	}
+	if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil {
+		return nil, err
+	}
+	scheme := httpScheme(u)
 
 
-	scheme := protoAndAddr[0]
-	if scheme != "https" {
-		scheme = "http"
+	clientTransport := transport.NewHTTPTransport(tr, scheme, socket)
+	return NewClientWithTransport(clientTransport), nil
+}
+
+// NewClientWithTransport creates a new plugin client with a given transport.
+func NewClientWithTransport(tr transport.Transport) *Client {
+	return &Client{
+		http: &http.Client{
+			Transport: tr,
+		},
+		requestFactory: tr,
 	}
 	}
-	return &Client{&http.Client{Transport: tr}, scheme, protoAndAddr[1]}, nil
 }
 }
 
 
 // Client represents a plugin client.
 // Client represents a plugin client.
 type Client struct {
 type Client struct {
-	http   *http.Client // http client to use
-	scheme string       // scheme protocol of the plugin
-	addr   string       // http address of the plugin
+	http           *http.Client // http client to use
+	requestFactory transport.RequestFactory
 }
 }
 
 
 // Call calls the specified method with the specified arguments for the plugin.
 // Call calls the specified method with the specified arguments for the plugin.
@@ -85,6 +101,7 @@ func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{})
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
+	defer body.Close()
 	if err := json.NewDecoder(body).Decode(&ret); err != nil {
 	if err := json.NewDecoder(body).Decode(&ret); err != nil {
 		logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err)
 		logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err)
 		return err
 		return err
@@ -93,13 +110,10 @@ func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{})
 }
 }
 
 
 func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) {
 func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) {
-	req, err := http.NewRequest("POST", "/"+serviceMethod, data)
+	req, err := c.requestFactory.NewRequest(serviceMethod, data)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	req.Header.Add("Accept", versionMimetype)
-	req.URL.Scheme = c.scheme
-	req.URL.Host = c.addr
 
 
 	var retries int
 	var retries int
 	start := time.Now()
 	start := time.Now()
@@ -116,15 +130,16 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool)
 				return nil, err
 				return nil, err
 			}
 			}
 			retries++
 			retries++
-			logrus.Warnf("Unable to connect to plugin: %s, retrying in %v", c.addr, timeOff)
+			logrus.Warnf("Unable to connect to plugin: %s:%s, retrying in %v", req.URL.Host, req.URL.Path, timeOff)
 			time.Sleep(timeOff)
 			time.Sleep(timeOff)
 			continue
 			continue
 		}
 		}
 
 
 		if resp.StatusCode != http.StatusOK {
 		if resp.StatusCode != http.StatusOK {
 			b, err := ioutil.ReadAll(resp.Body)
 			b, err := ioutil.ReadAll(resp.Body)
+			resp.Body.Close()
 			if err != nil {
 			if err != nil {
-				return nil, fmt.Errorf("%s: %s", serviceMethod, err)
+				return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()}
 			}
 			}
 
 
 			// Plugins' Response(s) should have an Err field indicating what went
 			// Plugins' Response(s) should have an Err field indicating what went
@@ -134,14 +149,13 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool)
 				Err string
 				Err string
 			}
 			}
 			remoteErr := responseErr{}
 			remoteErr := responseErr{}
-			if err := json.Unmarshal(b, &remoteErr); err != nil {
-				return nil, fmt.Errorf("%s: %s", serviceMethod, err)
-			}
-			if remoteErr.Err != "" {
-				return nil, fmt.Errorf("%s: %s", serviceMethod, remoteErr.Err)
+			if err := json.Unmarshal(b, &remoteErr); err == nil {
+				if remoteErr.Err != "" {
+					return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err}
+				}
 			}
 			}
 			// old way...
 			// old way...
-			return nil, fmt.Errorf("%s: %s", serviceMethod, string(b))
+			return nil, &statusError{resp.StatusCode, serviceMethod, string(b)}
 		}
 		}
 		return resp.Body, nil
 		return resp.Body, nil
 	}
 	}
@@ -162,3 +176,11 @@ func backoff(retries int) time.Duration {
 func abort(start time.Time, timeOff time.Duration) bool {
 func abort(start time.Time, timeOff time.Duration) bool {
 	return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second
 	return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second
 }
 }
+
+func httpScheme(u *url.URL) string {
+	scheme := u.Scheme
+	if scheme != "https" {
+		scheme = "http"
+	}
+	return scheme
+}

+ 0 - 127
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/client_test.go

@@ -1,127 +0,0 @@
-package plugins
-
-import (
-	"io"
-	"net/http"
-	"net/http/httptest"
-	"reflect"
-	"testing"
-	"time"
-
-	"github.com/docker/docker/pkg/tlsconfig"
-)
-
-var (
-	mux    *http.ServeMux
-	server *httptest.Server
-)
-
-func setupRemotePluginServer() string {
-	mux = http.NewServeMux()
-	server = httptest.NewServer(mux)
-	return server.URL
-}
-
-func teardownRemotePluginServer() {
-	if server != nil {
-		server.Close()
-	}
-}
-
-func TestFailedConnection(t *testing.T) {
-	c, _ := NewClient("tcp://127.0.0.1:1", tlsconfig.Options{InsecureSkipVerify: true})
-	_, err := c.callWithRetry("Service.Method", nil, false)
-	if err == nil {
-		t.Fatal("Unexpected successful connection")
-	}
-}
-
-func TestEchoInputOutput(t *testing.T) {
-	addr := setupRemotePluginServer()
-	defer teardownRemotePluginServer()
-
-	m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}}
-
-	mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) {
-		if r.Method != "POST" {
-			t.Fatalf("Expected POST, got %s\n", r.Method)
-		}
-
-		header := w.Header()
-		header.Set("Content-Type", versionMimetype)
-
-		io.Copy(w, r.Body)
-	})
-
-	c, _ := NewClient(addr, tlsconfig.Options{InsecureSkipVerify: true})
-	var output Manifest
-	err := c.Call("Test.Echo", m, &output)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if !reflect.DeepEqual(output, m) {
-		t.Fatalf("Expected %v, was %v\n", m, output)
-	}
-	err = c.Call("Test.Echo", nil, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestBackoff(t *testing.T) {
-	cases := []struct {
-		retries    int
-		expTimeOff time.Duration
-	}{
-		{0, time.Duration(1)},
-		{1, time.Duration(2)},
-		{2, time.Duration(4)},
-		{4, time.Duration(16)},
-		{6, time.Duration(30)},
-		{10, time.Duration(30)},
-	}
-
-	for _, c := range cases {
-		s := c.expTimeOff * time.Second
-		if d := backoff(c.retries); d != s {
-			t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d)
-		}
-	}
-}
-
-func TestAbortRetry(t *testing.T) {
-	cases := []struct {
-		timeOff  time.Duration
-		expAbort bool
-	}{
-		{time.Duration(1), false},
-		{time.Duration(2), false},
-		{time.Duration(10), false},
-		{time.Duration(30), true},
-		{time.Duration(40), true},
-	}
-
-	for _, c := range cases {
-		s := c.timeOff * time.Second
-		if a := abort(time.Now(), s); a != c.expAbort {
-			t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a)
-		}
-	}
-}
-
-func TestClientScheme(t *testing.T) {
-	cases := map[string]string{
-		"tcp://127.0.0.1:8080":          "http",
-		"unix:///usr/local/plugins/foo": "http",
-		"http://127.0.0.1:8080":         "http",
-		"https://127.0.0.1:8080":        "https",
-	}
-
-	for addr, scheme := range cases {
-		c, _ := NewClient(addr, tlsconfig.Options{InsecureSkipVerify: true})
-		if c.scheme != scheme {
-			t.Fatalf("URL scheme mismatch, expected %s, got %s", scheme, c.scheme)
-		}
-	}
-}

+ 33 - 1
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/discovery.go

@@ -13,7 +13,7 @@ import (
 
 
 var (
 var (
 	// ErrNotFound plugin not found
 	// ErrNotFound plugin not found
-	ErrNotFound = errors.New("Plugin not found")
+	ErrNotFound = errors.New("plugin not found")
 	socketsPath = "/run/docker/plugins"
 	socketsPath = "/run/docker/plugins"
 	specsPaths  = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"}
 	specsPaths  = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"}
 )
 )
@@ -25,6 +25,38 @@ func newLocalRegistry() localRegistry {
 	return localRegistry{}
 	return localRegistry{}
 }
 }
 
 
+// Scan scans all the plugin paths and returns all the names it found
+func Scan() ([]string, error) {
+	var names []string
+	if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error {
+		if err != nil {
+			return nil
+		}
+
+		if fi.Mode()&os.ModeSocket != 0 {
+			name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name()))
+			names = append(names, name)
+		}
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+
+	for _, path := range specsPaths {
+		if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error {
+			if err != nil || fi.IsDir() {
+				return nil
+			}
+			name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name()))
+			names = append(names, name)
+			return nil
+		}); err != nil {
+			return nil, err
+		}
+	}
+	return names, nil
+}
+
 // Plugin returns the plugin registered with the given name (or returns an error).
 // Plugin returns the plugin registered with the given name (or returns an error).
 func (l *localRegistry) Plugin(name string) (*Plugin, error) {
 func (l *localRegistry) Plugin(name string) (*Plugin, error) {
 	socketpaths := pluginPaths(socketsPath, name, ".sock")
 	socketpaths := pluginPaths(socketsPath, name, ".sock")

+ 0 - 169
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/discovery_test.go

@@ -1,169 +0,0 @@
-package plugins
-
-import (
-	"fmt"
-	"io/ioutil"
-	"net"
-	"os"
-	"path/filepath"
-	"reflect"
-	"testing"
-)
-
-func setup(t *testing.T) (string, func()) {
-	tmpdir, err := ioutil.TempDir("", "docker-test")
-	if err != nil {
-		t.Fatal(err)
-	}
-	backup := socketsPath
-	socketsPath = tmpdir
-	specsPaths = []string{tmpdir}
-
-	return tmpdir, func() {
-		socketsPath = backup
-		os.RemoveAll(tmpdir)
-	}
-}
-
-func TestLocalSocket(t *testing.T) {
-	tmpdir, unregister := setup(t)
-	defer unregister()
-
-	cases := []string{
-		filepath.Join(tmpdir, "echo.sock"),
-		filepath.Join(tmpdir, "echo", "echo.sock"),
-	}
-
-	for _, c := range cases {
-		if err := os.MkdirAll(filepath.Dir(c), 0755); err != nil {
-			t.Fatal(err)
-		}
-
-		l, err := net.Listen("unix", c)
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		r := newLocalRegistry()
-		p, err := r.Plugin("echo")
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		pp, err := r.Plugin("echo")
-		if err != nil {
-			t.Fatal(err)
-		}
-		if !reflect.DeepEqual(p, pp) {
-			t.Fatalf("Expected %v, was %v\n", p, pp)
-		}
-
-		if p.Name != "echo" {
-			t.Fatalf("Expected plugin `echo`, got %s\n", p.Name)
-		}
-
-		addr := fmt.Sprintf("unix://%s", c)
-		if p.Addr != addr {
-			t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr)
-		}
-		if p.TLSConfig.InsecureSkipVerify != true {
-			t.Fatalf("Expected TLS verification to be skipped")
-		}
-		l.Close()
-	}
-}
-
-func TestFileSpecPlugin(t *testing.T) {
-	tmpdir, unregister := setup(t)
-	defer unregister()
-
-	cases := []struct {
-		path string
-		name string
-		addr string
-		fail bool
-	}{
-		{filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false},
-		{filepath.Join(tmpdir, "echo", "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false},
-		{filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false},
-		{filepath.Join(tmpdir, "foo", "foo.spec"), "foo", "tcp://localhost:8080", false},
-		{filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport
-	}
-
-	for _, c := range cases {
-		if err := os.MkdirAll(filepath.Dir(c.path), 0755); err != nil {
-			t.Fatal(err)
-		}
-		if err := ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil {
-			t.Fatal(err)
-		}
-
-		r := newLocalRegistry()
-		p, err := r.Plugin(c.name)
-		if c.fail && err == nil {
-			continue
-		}
-
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		if p.Name != c.name {
-			t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.Name)
-		}
-
-		if p.Addr != c.addr {
-			t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr)
-		}
-
-		if p.TLSConfig.InsecureSkipVerify != true {
-			t.Fatalf("Expected TLS verification to be skipped")
-		}
-	}
-}
-
-func TestFileJSONSpecPlugin(t *testing.T) {
-	tmpdir, unregister := setup(t)
-	defer unregister()
-
-	p := filepath.Join(tmpdir, "example.json")
-	spec := `{
-  "Name": "plugin-example",
-  "Addr": "https://example.com/docker/plugin",
-  "TLSConfig": {
-    "CAFile": "/usr/shared/docker/certs/example-ca.pem",
-    "CertFile": "/usr/shared/docker/certs/example-cert.pem",
-    "KeyFile": "/usr/shared/docker/certs/example-key.pem"
-	}
-}`
-
-	if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil {
-		t.Fatal(err)
-	}
-
-	r := newLocalRegistry()
-	plugin, err := r.Plugin("example")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if plugin.Name != "example" {
-		t.Fatalf("Expected plugin `plugin-example`, got %s\n", plugin.Name)
-	}
-
-	if plugin.Addr != "https://example.com/docker/plugin" {
-		t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr)
-	}
-
-	if plugin.TLSConfig.CAFile != "/usr/shared/docker/certs/example-ca.pem" {
-		t.Fatalf("Expected plugin CA `/usr/shared/docker/certs/example-ca.pem`, got %s\n", plugin.TLSConfig.CAFile)
-	}
-
-	if plugin.TLSConfig.CertFile != "/usr/shared/docker/certs/example-cert.pem" {
-		t.Fatalf("Expected plugin Certificate `/usr/shared/docker/certs/example-cert.pem`, got %s\n", plugin.TLSConfig.CertFile)
-	}
-
-	if plugin.TLSConfig.KeyFile != "/usr/shared/docker/certs/example-key.pem" {
-		t.Fatalf("Expected plugin Key `/usr/shared/docker/certs/example-key.pem`, got %s\n", plugin.TLSConfig.KeyFile)
-	}
-}

+ 33 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/errors.go

@@ -0,0 +1,33 @@
+package plugins
+
+import (
+	"fmt"
+	"net/http"
+)
+
+type statusError struct {
+	status int
+	method string
+	err    string
+}
+
+// Error returns a formatted string for this error type
+func (e *statusError) Error() string {
+	return fmt.Sprintf("%s: %v", e.method, e.err)
+}
+
+// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin
+func IsNotFound(err error) bool {
+	return isStatusError(err, http.StatusNotFound)
+}
+
+func isStatusError(err error, status int) bool {
+	if err == nil {
+		return false
+	}
+	e, ok := err.(*statusError)
+	if !ok {
+		return false
+	}
+	return e.status == status
+}

+ 0 - 68
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md

@@ -1,68 +0,0 @@
-Plugin RPC Generator
-====================
-
-Generates go code from a Go interface definition for proxying between the plugin
-API and the subsystem being extended.
-
-## Usage
-
-Given an interface definition:
-
-```go
-type volumeDriver interface {
-	Create(name string, opts opts) (err error)
-	Remove(name string) (err error)
-	Path(name string) (mountpoint string, err error)
-	Mount(name string) (mountpoint string, err error)
-	Unmount(name string) (err error)
-}
-```
-
-**Note**: All function options and return values must be named in the definition.
-
-Run the generator:
-
-```bash
-$ pluginrpc-gen --type volumeDriver --name VolumeDriver -i volumes/drivers/extpoint.go -o volumes/drivers/proxy.go
-```
-
-Where:
-- `--type` is the name of the interface to use
-- `--name` is the subsystem that the plugin "Implements"
-- `-i` is the input file containing the interface definition
-- `-o` is the output file where the the generated code should go
-
-**Note**: The generated code will use the same package name as the one defined in the input file
-
-Optionally, you can skip functions on the interface that should not be
-implemented in the generated proxy code by passing in the function name to `--skip`.
-This flag can be specified multiple times.
-
-You can also add build tags that should be prepended to the generated code by
-supplying `--tag`. This flag can be specified multiple times.
-
-## Known issues
-
-The parser can currently only handle types which are not specifically a map or
-a slice.  
-You can, however, create a type that uses a map or a slice internally, for instance:
-
-```go
-type opts map[string]string
-```
-
-This `opts` type will work, whreas using a `map[string]string` directly will not.
-
-## go-generate
-
-You can also use this with go-generate, which is pretty awesome.  
-To do so, place the code at the top of the file which contains the interface
-definition (i.e., the input file):
-
-```go
-//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver
-```
-
-Then cd to the package dir and run `go generate`
-
-**Note**: the `pluginrpc-gen` binary must be within your `$PATH`

+ 0 - 41
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go

@@ -1,41 +0,0 @@
-package foo
-
-type wobble struct {
-	Some      string
-	Val       string
-	Inception *wobble
-}
-
-// Fooer is an empty interface used for tests.
-type Fooer interface{}
-
-// Fooer2 is an interface used for tests.
-type Fooer2 interface {
-	Foo()
-}
-
-// Fooer3 is an interface used for tests.
-type Fooer3 interface {
-	Foo()
-	Bar(a string)
-	Baz(a string) (err error)
-	Qux(a, b string) (val string, err error)
-	Wobble() (w *wobble)
-	Wiggle() (w wobble)
-}
-
-// Fooer4 is an interface used for tests.
-type Fooer4 interface {
-	Foo() error
-}
-
-// Bar is an interface used for tests.
-type Bar interface {
-	Boo(a string, b string) (s string, err error)
-}
-
-// Fooer5 is an interface used for tests.
-type Fooer5 interface {
-	Foo()
-	Bar
-}

+ 0 - 91
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go

@@ -1,91 +0,0 @@
-package main
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"go/format"
-	"io/ioutil"
-	"os"
-	"unicode"
-	"unicode/utf8"
-)
-
-type stringSet struct {
-	values map[string]struct{}
-}
-
-func (s stringSet) String() string {
-	return ""
-}
-
-func (s stringSet) Set(value string) error {
-	s.values[value] = struct{}{}
-	return nil
-}
-func (s stringSet) GetValues() map[string]struct{} {
-	return s.values
-}
-
-var (
-	typeName   = flag.String("type", "", "interface type to generate plugin rpc proxy for")
-	rpcName    = flag.String("name", *typeName, "RPC name, set if different from type")
-	inputFile  = flag.String("i", "", "input file path")
-	outputFile = flag.String("o", *inputFile+"_proxy.go", "output file path")
-
-	skipFuncs   map[string]struct{}
-	flSkipFuncs = stringSet{make(map[string]struct{})}
-
-	flBuildTags = stringSet{make(map[string]struct{})}
-)
-
-func errorOut(msg string, err error) {
-	if err == nil {
-		return
-	}
-	fmt.Fprintf(os.Stderr, "%s: %v\n", msg, err)
-	os.Exit(1)
-}
-
-func checkFlags() error {
-	if *outputFile == "" {
-		return fmt.Errorf("missing required flag `-o`")
-	}
-	if *inputFile == "" {
-		return fmt.Errorf("missing required flag `-i`")
-	}
-	return nil
-}
-
-func main() {
-	flag.Var(flSkipFuncs, "skip", "skip parsing for function")
-	flag.Var(flBuildTags, "tag", "build tags to add to generated files")
-	flag.Parse()
-	skipFuncs = flSkipFuncs.GetValues()
-
-	errorOut("error", checkFlags())
-
-	pkg, err := Parse(*inputFile, *typeName)
-	errorOut(fmt.Sprintf("error parsing requested type %s", *typeName), err)
-
-	var analysis = struct {
-		InterfaceType string
-		RPCName       string
-		BuildTags     map[string]struct{}
-		*ParsedPkg
-	}{toLower(*typeName), *rpcName, flBuildTags.GetValues(), pkg}
-	var buf bytes.Buffer
-
-	errorOut("parser error", generatedTempl.Execute(&buf, analysis))
-	src, err := format.Source(buf.Bytes())
-	errorOut("error formating generated source", err)
-	errorOut("error writing file", ioutil.WriteFile(*outputFile, src, 0644))
-}
-
-func toLower(s string) string {
-	if s == "" {
-		return ""
-	}
-	r, n := utf8.DecodeRuneInString(s)
-	return string(unicode.ToLower(r)) + s[n:]
-}

+ 0 - 163
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go

@@ -1,163 +0,0 @@
-package main
-
-import (
-	"errors"
-	"fmt"
-	"go/ast"
-	"go/parser"
-	"go/token"
-	"reflect"
-)
-
-var errBadReturn = errors.New("found return arg with no name: all args must be named")
-
-type errUnexpectedType struct {
-	expected string
-	actual   interface{}
-}
-
-func (e errUnexpectedType) Error() string {
-	return fmt.Sprintf("got wrong type expecting %s, got: %v", e.expected, reflect.TypeOf(e.actual))
-}
-
-// ParsedPkg holds information about a package that has been parsed,
-// its name and the list of functions.
-type ParsedPkg struct {
-	Name      string
-	Functions []function
-}
-
-type function struct {
-	Name    string
-	Args    []arg
-	Returns []arg
-	Doc     string
-}
-
-type arg struct {
-	Name    string
-	ArgType string
-}
-
-func (a *arg) String() string {
-	return a.Name + " " + a.ArgType
-}
-
-// Parse parses the given file for an interface definition with the given name.
-func Parse(filePath string, objName string) (*ParsedPkg, error) {
-	fs := token.NewFileSet()
-	pkg, err := parser.ParseFile(fs, filePath, nil, parser.AllErrors)
-	if err != nil {
-		return nil, err
-	}
-	p := &ParsedPkg{}
-	p.Name = pkg.Name.Name
-	obj, exists := pkg.Scope.Objects[objName]
-	if !exists {
-		return nil, fmt.Errorf("could not find object %s in %s", objName, filePath)
-	}
-	if obj.Kind != ast.Typ {
-		return nil, fmt.Errorf("exected type, got %s", obj.Kind)
-	}
-	spec, ok := obj.Decl.(*ast.TypeSpec)
-	if !ok {
-		return nil, errUnexpectedType{"*ast.TypeSpec", obj.Decl}
-	}
-	iface, ok := spec.Type.(*ast.InterfaceType)
-	if !ok {
-		return nil, errUnexpectedType{"*ast.InterfaceType", spec.Type}
-	}
-
-	p.Functions, err = parseInterface(iface)
-	if err != nil {
-		return nil, err
-	}
-
-	return p, nil
-}
-
-func parseInterface(iface *ast.InterfaceType) ([]function, error) {
-	var functions []function
-	for _, field := range iface.Methods.List {
-		switch f := field.Type.(type) {
-		case *ast.FuncType:
-			method, err := parseFunc(field)
-			if err != nil {
-				return nil, err
-			}
-			if method == nil {
-				continue
-			}
-			functions = append(functions, *method)
-		case *ast.Ident:
-			spec, ok := f.Obj.Decl.(*ast.TypeSpec)
-			if !ok {
-				return nil, errUnexpectedType{"*ast.TypeSpec", f.Obj.Decl}
-			}
-			iface, ok := spec.Type.(*ast.InterfaceType)
-			if !ok {
-				return nil, errUnexpectedType{"*ast.TypeSpec", spec.Type}
-			}
-			funcs, err := parseInterface(iface)
-			if err != nil {
-				fmt.Println(err)
-				continue
-			}
-			functions = append(functions, funcs...)
-		default:
-			return nil, errUnexpectedType{"*astFuncType or *ast.Ident", f}
-		}
-	}
-	return functions, nil
-}
-
-func parseFunc(field *ast.Field) (*function, error) {
-	f := field.Type.(*ast.FuncType)
-	method := &function{Name: field.Names[0].Name}
-	if _, exists := skipFuncs[method.Name]; exists {
-		fmt.Println("skipping:", method.Name)
-		return nil, nil
-	}
-	if f.Params != nil {
-		args, err := parseArgs(f.Params.List)
-		if err != nil {
-			return nil, err
-		}
-		method.Args = args
-	}
-	if f.Results != nil {
-		returns, err := parseArgs(f.Results.List)
-		if err != nil {
-			return nil, fmt.Errorf("error parsing function returns for %q: %v", method.Name, err)
-		}
-		method.Returns = returns
-	}
-	return method, nil
-}
-
-func parseArgs(fields []*ast.Field) ([]arg, error) {
-	var args []arg
-	for _, f := range fields {
-		if len(f.Names) == 0 {
-			return nil, errBadReturn
-		}
-		for _, name := range f.Names {
-			var typeName string
-			switch argType := f.Type.(type) {
-			case *ast.Ident:
-				typeName = argType.Name
-			case *ast.StarExpr:
-				i, ok := argType.X.(*ast.Ident)
-				if !ok {
-					return nil, errUnexpectedType{"*ast.Ident", f.Type}
-				}
-				typeName = "*" + i.Name
-			default:
-				return nil, errUnexpectedType{"*ast.Ident or *ast.StarExpr", f.Type}
-			}
-
-			args = append(args, arg{name.Name, typeName})
-		}
-	}
-	return args, nil
-}

+ 0 - 168
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go

@@ -1,168 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"path/filepath"
-	"runtime"
-	"strings"
-	"testing"
-)
-
-const testFixture = "fixtures/foo.go"
-
-func TestParseEmptyInterface(t *testing.T) {
-	pkg, err := Parse(testFixture, "Fooer")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	assertName(t, "foo", pkg.Name)
-	assertNum(t, 0, len(pkg.Functions))
-}
-
-func TestParseNonInterfaceType(t *testing.T) {
-	_, err := Parse(testFixture, "wobble")
-	if _, ok := err.(errUnexpectedType); !ok {
-		t.Fatal("expected type error when parsing non-interface type")
-	}
-}
-
-func TestParseWithOneFunction(t *testing.T) {
-	pkg, err := Parse(testFixture, "Fooer2")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	assertName(t, "foo", pkg.Name)
-	assertNum(t, 1, len(pkg.Functions))
-	assertName(t, "Foo", pkg.Functions[0].Name)
-	assertNum(t, 0, len(pkg.Functions[0].Args))
-	assertNum(t, 0, len(pkg.Functions[0].Returns))
-}
-
-func TestParseWithMultipleFuncs(t *testing.T) {
-	pkg, err := Parse(testFixture, "Fooer3")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	assertName(t, "foo", pkg.Name)
-	assertNum(t, 6, len(pkg.Functions))
-
-	f := pkg.Functions[0]
-	assertName(t, "Foo", f.Name)
-	assertNum(t, 0, len(f.Args))
-	assertNum(t, 0, len(f.Returns))
-
-	f = pkg.Functions[1]
-	assertName(t, "Bar", f.Name)
-	assertNum(t, 1, len(f.Args))
-	assertNum(t, 0, len(f.Returns))
-	arg := f.Args[0]
-	assertName(t, "a", arg.Name)
-	assertName(t, "string", arg.ArgType)
-
-	f = pkg.Functions[2]
-	assertName(t, "Baz", f.Name)
-	assertNum(t, 1, len(f.Args))
-	assertNum(t, 1, len(f.Returns))
-	arg = f.Args[0]
-	assertName(t, "a", arg.Name)
-	assertName(t, "string", arg.ArgType)
-	arg = f.Returns[0]
-	assertName(t, "err", arg.Name)
-	assertName(t, "error", arg.ArgType)
-
-	f = pkg.Functions[3]
-	assertName(t, "Qux", f.Name)
-	assertNum(t, 2, len(f.Args))
-	assertNum(t, 2, len(f.Returns))
-	arg = f.Args[0]
-	assertName(t, "a", f.Args[0].Name)
-	assertName(t, "string", f.Args[0].ArgType)
-	arg = f.Args[1]
-	assertName(t, "b", arg.Name)
-	assertName(t, "string", arg.ArgType)
-	arg = f.Returns[0]
-	assertName(t, "val", arg.Name)
-	assertName(t, "string", arg.ArgType)
-	arg = f.Returns[1]
-	assertName(t, "err", arg.Name)
-	assertName(t, "error", arg.ArgType)
-
-	f = pkg.Functions[4]
-	assertName(t, "Wobble", f.Name)
-	assertNum(t, 0, len(f.Args))
-	assertNum(t, 1, len(f.Returns))
-	arg = f.Returns[0]
-	assertName(t, "w", arg.Name)
-	assertName(t, "*wobble", arg.ArgType)
-
-	f = pkg.Functions[5]
-	assertName(t, "Wiggle", f.Name)
-	assertNum(t, 0, len(f.Args))
-	assertNum(t, 1, len(f.Returns))
-	arg = f.Returns[0]
-	assertName(t, "w", arg.Name)
-	assertName(t, "wobble", arg.ArgType)
-}
-
-func TestParseWithUnamedReturn(t *testing.T) {
-	_, err := Parse(testFixture, "Fooer4")
-	if !strings.HasSuffix(err.Error(), errBadReturn.Error()) {
-		t.Fatalf("expected ErrBadReturn, got %v", err)
-	}
-}
-
-func TestEmbeddedInterface(t *testing.T) {
-	pkg, err := Parse(testFixture, "Fooer5")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	assertName(t, "foo", pkg.Name)
-	assertNum(t, 2, len(pkg.Functions))
-
-	f := pkg.Functions[0]
-	assertName(t, "Foo", f.Name)
-	assertNum(t, 0, len(f.Args))
-	assertNum(t, 0, len(f.Returns))
-
-	f = pkg.Functions[1]
-	assertName(t, "Boo", f.Name)
-	assertNum(t, 2, len(f.Args))
-	assertNum(t, 2, len(f.Returns))
-
-	arg := f.Args[0]
-	assertName(t, "a", arg.Name)
-	assertName(t, "string", arg.ArgType)
-
-	arg = f.Args[1]
-	assertName(t, "b", arg.Name)
-	assertName(t, "string", arg.ArgType)
-
-	arg = f.Returns[0]
-	assertName(t, "s", arg.Name)
-	assertName(t, "string", arg.ArgType)
-
-	arg = f.Returns[1]
-	assertName(t, "err", arg.Name)
-	assertName(t, "error", arg.ArgType)
-}
-
-func assertName(t *testing.T, expected, actual string) {
-	if expected != actual {
-		fatalOut(t, fmt.Sprintf("expected name to be `%s`, got: %s", expected, actual))
-	}
-}
-
-func assertNum(t *testing.T, expected, actual int) {
-	if expected != actual {
-		fatalOut(t, fmt.Sprintf("expected number to be %d, got: %d", expected, actual))
-	}
-}
-
-func fatalOut(t *testing.T, msg string) {
-	_, file, ln, _ := runtime.Caller(2)
-	t.Fatalf("%s:%d: %s", filepath.Base(file), ln, msg)
-}

+ 0 - 97
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go

@@ -1,97 +0,0 @@
-package main
-
-import (
-	"strings"
-	"text/template"
-)
-
-func printArgs(args []arg) string {
-	var argStr []string
-	for _, arg := range args {
-		argStr = append(argStr, arg.String())
-	}
-	return strings.Join(argStr, ", ")
-}
-
-func marshalType(t string) string {
-	switch t {
-	case "error":
-		// convert error types to plain strings to ensure the values are encoded/decoded properly
-		return "string"
-	default:
-		return t
-	}
-}
-
-func isErr(t string) bool {
-	switch t {
-	case "error":
-		return true
-	default:
-		return false
-	}
-}
-
-// Need to use this helper due to issues with go-vet
-func buildTag(s string) string {
-	return "+build " + s
-}
-
-var templFuncs = template.FuncMap{
-	"printArgs":   printArgs,
-	"marshalType": marshalType,
-	"isErr":       isErr,
-	"lower":       strings.ToLower,
-	"title":       strings.Title,
-	"tag":         buildTag,
-}
-
-var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(`
-// generated code - DO NOT EDIT
-{{ range $k, $v := .BuildTags }}
-	// {{ tag $k }} {{ end }}
-
-package {{ .Name }}
-
-import "errors"
-
-type client interface{
-	Call(string, interface{}, interface{}) error
-}
-
-type {{ .InterfaceType }}Proxy struct {
-	client
-}
-
-{{ range .Functions }}
-	type {{ $.InterfaceType }}Proxy{{ .Name }}Request struct{
-		{{ range .Args }}
-			{{ title .Name }} {{ .ArgType }} {{ end }}
-	}
-
-	type {{ $.InterfaceType }}Proxy{{ .Name }}Response struct{
-		{{ range .Returns }}
-			{{ title .Name }} {{ marshalType .ArgType }} {{ end }}
-	}
-
-	func (pp *{{ $.InterfaceType }}Proxy) {{ .Name }}({{ printArgs .Args }}) ({{ printArgs .Returns }}) {
-		var(
-			req {{ $.InterfaceType }}Proxy{{ .Name }}Request
-			ret {{ $.InterfaceType }}Proxy{{ .Name }}Response
-		)
-		{{ range .Args }}
-			req.{{ title .Name }} = {{ lower .Name }} {{ end }}
-		if err = pp.Call("{{ $.RPCName }}.{{ .Name }}", req, &ret); err != nil {
-			return
-		}
-		{{ range $r := .Returns }}
-			{{ if isErr .ArgType }}
-				if ret.{{ title .Name }} != "" {
-					{{ lower .Name }} = errors.New(ret.{{ title .Name }})
-				} {{ end }}
-			{{ if isErr .ArgType | not }} {{ lower .Name }} = ret.{{ title .Name }} {{ end }} {{ end }}
-
-		return
-	}
-{{ end }}
-`))

+ 91 - 16
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/plugins.go

@@ -28,7 +28,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/Sirupsen/logrus"
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/pkg/tlsconfig"
+	"github.com/docker/go-connections/tlsconfig"
 )
 )
 
 
 var (
 var (
@@ -65,23 +65,36 @@ type Plugin struct {
 	// Manifest of the plugin (see above)
 	// Manifest of the plugin (see above)
 	Manifest *Manifest `json:"-"`
 	Manifest *Manifest `json:"-"`
 
 
-	activatErr   error
-	activateOnce sync.Once
+	// error produced by activation
+	activateErr error
+	// specifies if the activation sequence is completed (not if it is sucessful or not)
+	activated bool
+	// wait for activation to finish
+	activateWait *sync.Cond
 }
 }
 
 
 func newLocalPlugin(name, addr string) *Plugin {
 func newLocalPlugin(name, addr string) *Plugin {
 	return &Plugin{
 	return &Plugin{
-		Name:      name,
-		Addr:      addr,
-		TLSConfig: tlsconfig.Options{InsecureSkipVerify: true},
+		Name:         name,
+		Addr:         addr,
+		TLSConfig:    tlsconfig.Options{InsecureSkipVerify: true},
+		activateWait: sync.NewCond(&sync.Mutex{}),
 	}
 	}
 }
 }
 
 
 func (p *Plugin) activate() error {
 func (p *Plugin) activate() error {
-	p.activateOnce.Do(func() {
-		p.activatErr = p.activateWithLock()
-	})
-	return p.activatErr
+	p.activateWait.L.Lock()
+	if p.activated {
+		p.activateWait.L.Unlock()
+		return p.activateErr
+	}
+
+	p.activateErr = p.activateWithLock()
+	p.activated = true
+
+	p.activateWait.L.Unlock()
+	p.activateWait.Broadcast()
+	return p.activateErr
 }
 }
 
 
 func (p *Plugin) activateWithLock() error {
 func (p *Plugin) activateWithLock() error {
@@ -96,7 +109,6 @@ func (p *Plugin) activateWithLock() error {
 		return err
 		return err
 	}
 	}
 
 
-	logrus.Debugf("%s's manifest: %v", p.Name, m)
 	p.Manifest = m
 	p.Manifest = m
 
 
 	for _, iface := range m.Implements {
 	for _, iface := range m.Implements {
@@ -109,6 +121,27 @@ func (p *Plugin) activateWithLock() error {
 	return nil
 	return nil
 }
 }
 
 
+func (p *Plugin) waitActive() error {
+	p.activateWait.L.Lock()
+	for !p.activated {
+		p.activateWait.Wait()
+	}
+	p.activateWait.L.Unlock()
+	return p.activateErr
+}
+
+func (p *Plugin) implements(kind string) bool {
+	if err := p.waitActive(); err != nil {
+		return false
+	}
+	for _, driver := range p.Manifest.Implements {
+		if driver == kind {
+			return true
+		}
+	}
+	return false
+}
+
 func load(name string) (*Plugin, error) {
 func load(name string) (*Plugin, error) {
 	return loadWithRetry(name, true)
 	return loadWithRetry(name, true)
 }
 }
@@ -167,11 +200,9 @@ func Get(name, imp string) (*Plugin, error) {
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
-	for _, driver := range pl.Manifest.Implements {
-		logrus.Debugf("%s implements: %s", name, driver)
-		if driver == imp {
-			return pl, nil
-		}
+	if pl.implements(imp) {
+		logrus.Debugf("%s implements: %s", name, imp)
+		return pl, nil
 	}
 	}
 	return nil, ErrNotImplements
 	return nil, ErrNotImplements
 }
 }
@@ -180,3 +211,47 @@ func Get(name, imp string) (*Plugin, error) {
 func Handle(iface string, fn func(string, *Client)) {
 func Handle(iface string, fn func(string, *Client)) {
 	extpointHandlers[iface] = fn
 	extpointHandlers[iface] = fn
 }
 }
+
+// GetAll returns all the plugins for the specified implementation
+func GetAll(imp string) ([]*Plugin, error) {
+	pluginNames, err := Scan()
+	if err != nil {
+		return nil, err
+	}
+
+	type plLoad struct {
+		pl  *Plugin
+		err error
+	}
+
+	chPl := make(chan *plLoad, len(pluginNames))
+	var wg sync.WaitGroup
+	for _, name := range pluginNames {
+		if pl, ok := storage.plugins[name]; ok {
+			chPl <- &plLoad{pl, nil}
+			continue
+		}
+
+		wg.Add(1)
+		go func(name string) {
+			defer wg.Done()
+			pl, err := loadWithRetry(name, false)
+			chPl <- &plLoad{pl, err}
+		}(name)
+	}
+
+	wg.Wait()
+	close(chPl)
+
+	var out []*Plugin
+	for pl := range chPl {
+		if pl.err != nil {
+			logrus.Error(pl.err)
+			continue
+		}
+		if pl.pl.implements(imp) {
+			out = append(out, pl.pl)
+		}
+	}
+	return out, nil
+}

+ 36 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/transport/http.go

@@ -0,0 +1,36 @@
+package transport
+
+import (
+	"io"
+	"net/http"
+)
+
+// httpTransport holds an http.RoundTripper
+// and information about the scheme and address the transport
+// sends request to.
+type httpTransport struct {
+	http.RoundTripper
+	scheme string
+	addr   string
+}
+
+// NewHTTPTransport creates a new httpTransport.
+func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport {
+	return httpTransport{
+		RoundTripper: r,
+		scheme:       scheme,
+		addr:         addr,
+	}
+}
+
+// NewRequest creates a new http.Request and sets the URL
+// scheme and address with the transport's fields.
+func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) {
+	req, err := newHTTPRequest(path, data)
+	if err != nil {
+		return nil, err
+	}
+	req.URL.Scheme = t.scheme
+	req.URL.Host = t.addr
+	return req, nil
+}

+ 36 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/plugins/transport/transport.go

@@ -0,0 +1,36 @@
+package transport
+
+import (
+	"io"
+	"net/http"
+	"strings"
+)
+
+// VersionMimetype is the Content-Type the engine sends to plugins.
+const VersionMimetype = "application/vnd.docker.plugins.v1.2+json"
+
+// RequestFactory defines an interface that
+// transports can implement to create new requests.
+type RequestFactory interface {
+	NewRequest(path string, data io.Reader) (*http.Request, error)
+}
+
+// Transport defines an interface that plugin transports
+// must implement.
+type Transport interface {
+	http.RoundTripper
+	RequestFactory
+}
+
+// newHTTPRequest creates a new request with a path and a body.
+func newHTTPRequest(path string, data io.Reader) (*http.Request, error) {
+	if !strings.HasPrefix(path, "/") {
+		path = "/" + path
+	}
+	req, err := http.NewRequest("POST", path, data)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Add("Accept", VersionMimetype)
+	return req, nil
+}

+ 0 - 216
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/proxy/network_proxy_test.go

@@ -1,216 +0,0 @@
-package proxy
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"net"
-	"strings"
-	"testing"
-	"time"
-)
-
-var testBuf = []byte("Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo")
-var testBufSize = len(testBuf)
-
-type EchoServer interface {
-	Run()
-	Close()
-	LocalAddr() net.Addr
-}
-
-type TCPEchoServer struct {
-	listener net.Listener
-	testCtx  *testing.T
-}
-
-type UDPEchoServer struct {
-	conn    net.PacketConn
-	testCtx *testing.T
-}
-
-func NewEchoServer(t *testing.T, proto, address string) EchoServer {
-	var server EchoServer
-	if strings.HasPrefix(proto, "tcp") {
-		listener, err := net.Listen(proto, address)
-		if err != nil {
-			t.Fatal(err)
-		}
-		server = &TCPEchoServer{listener: listener, testCtx: t}
-	} else {
-		socket, err := net.ListenPacket(proto, address)
-		if err != nil {
-			t.Fatal(err)
-		}
-		server = &UDPEchoServer{conn: socket, testCtx: t}
-	}
-	return server
-}
-
-func (server *TCPEchoServer) Run() {
-	go func() {
-		for {
-			client, err := server.listener.Accept()
-			if err != nil {
-				return
-			}
-			go func(client net.Conn) {
-				if _, err := io.Copy(client, client); err != nil {
-					server.testCtx.Logf("can't echo to the client: %v\n", err.Error())
-				}
-				client.Close()
-			}(client)
-		}
-	}()
-}
-
-func (server *TCPEchoServer) LocalAddr() net.Addr { return server.listener.Addr() }
-func (server *TCPEchoServer) Close()              { server.listener.Addr() }
-
-func (server *UDPEchoServer) Run() {
-	go func() {
-		readBuf := make([]byte, 1024)
-		for {
-			read, from, err := server.conn.ReadFrom(readBuf)
-			if err != nil {
-				return
-			}
-			for i := 0; i != read; {
-				written, err := server.conn.WriteTo(readBuf[i:read], from)
-				if err != nil {
-					break
-				}
-				i += written
-			}
-		}
-	}()
-}
-
-func (server *UDPEchoServer) LocalAddr() net.Addr { return server.conn.LocalAddr() }
-func (server *UDPEchoServer) Close()              { server.conn.Close() }
-
-func testProxyAt(t *testing.T, proto string, proxy Proxy, addr string) {
-	defer proxy.Close()
-	go proxy.Run()
-	client, err := net.Dial(proto, addr)
-	if err != nil {
-		t.Fatalf("Can't connect to the proxy: %v", err)
-	}
-	defer client.Close()
-	client.SetDeadline(time.Now().Add(10 * time.Second))
-	if _, err = client.Write(testBuf); err != nil {
-		t.Fatal(err)
-	}
-	recvBuf := make([]byte, testBufSize)
-	if _, err = client.Read(recvBuf); err != nil {
-		t.Fatal(err)
-	}
-	if !bytes.Equal(testBuf, recvBuf) {
-		t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf))
-	}
-}
-
-func testProxy(t *testing.T, proto string, proxy Proxy) {
-	testProxyAt(t, proto, proxy, proxy.FrontendAddr().String())
-}
-
-func TestTCP4Proxy(t *testing.T) {
-	backend := NewEchoServer(t, "tcp", "127.0.0.1:0")
-	defer backend.Close()
-	backend.Run()
-	frontendAddr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
-	proxy, err := NewProxy(frontendAddr, backend.LocalAddr())
-	if err != nil {
-		t.Fatal(err)
-	}
-	testProxy(t, "tcp", proxy)
-}
-
-func TestTCP6Proxy(t *testing.T) {
-	backend := NewEchoServer(t, "tcp", "[::1]:0")
-	defer backend.Close()
-	backend.Run()
-	frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0}
-	proxy, err := NewProxy(frontendAddr, backend.LocalAddr())
-	if err != nil {
-		t.Fatal(err)
-	}
-	testProxy(t, "tcp", proxy)
-}
-
-func TestTCPDualStackProxy(t *testing.T) {
-	// If I understand `godoc -src net favoriteAddrFamily` (used by the
-	// net.Listen* functions) correctly this should work, but it doesn't.
-	t.Skip("No support for dual stack yet")
-	backend := NewEchoServer(t, "tcp", "[::1]:0")
-	defer backend.Close()
-	backend.Run()
-	frontendAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 0}
-	proxy, err := NewProxy(frontendAddr, backend.LocalAddr())
-	if err != nil {
-		t.Fatal(err)
-	}
-	ipv4ProxyAddr := &net.TCPAddr{
-		IP:   net.IPv4(127, 0, 0, 1),
-		Port: proxy.FrontendAddr().(*net.TCPAddr).Port,
-	}
-	testProxyAt(t, "tcp", proxy, ipv4ProxyAddr.String())
-}
-
-func TestUDP4Proxy(t *testing.T) {
-	backend := NewEchoServer(t, "udp", "127.0.0.1:0")
-	defer backend.Close()
-	backend.Run()
-	frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
-	proxy, err := NewProxy(frontendAddr, backend.LocalAddr())
-	if err != nil {
-		t.Fatal(err)
-	}
-	testProxy(t, "udp", proxy)
-}
-
-func TestUDP6Proxy(t *testing.T) {
-	backend := NewEchoServer(t, "udp", "[::1]:0")
-	defer backend.Close()
-	backend.Run()
-	frontendAddr := &net.UDPAddr{IP: net.IPv6loopback, Port: 0}
-	proxy, err := NewProxy(frontendAddr, backend.LocalAddr())
-	if err != nil {
-		t.Fatal(err)
-	}
-	testProxy(t, "udp", proxy)
-}
-
-func TestUDPWriteError(t *testing.T) {
-	frontendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0}
-	// Hopefully, this port will be free: */
-	backendAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 25587}
-	proxy, err := NewProxy(frontendAddr, backendAddr)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer proxy.Close()
-	go proxy.Run()
-	client, err := net.Dial("udp", "127.0.0.1:25587")
-	if err != nil {
-		t.Fatalf("Can't connect to the proxy: %v", err)
-	}
-	defer client.Close()
-	// Make sure the proxy doesn't stop when there is no actual backend:
-	client.Write(testBuf)
-	client.Write(testBuf)
-	backend := NewEchoServer(t, "udp", "127.0.0.1:25587")
-	defer backend.Close()
-	backend.Run()
-	client.SetDeadline(time.Now().Add(10 * time.Second))
-	if _, err = client.Write(testBuf); err != nil {
-		t.Fatal(err)
-	}
-	recvBuf := make([]byte, testBufSize)
-	if _, err = client.Read(recvBuf); err != nil {
-		t.Fatal(err)
-	}
-	if !bytes.Equal(testBuf, recvBuf) {
-		t.Fatal(fmt.Errorf("Expected [%v] but got [%v]", testBuf, recvBuf))
-	}
-}

+ 0 - 22
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/random/random_test.go

@@ -1,22 +0,0 @@
-package random
-
-import (
-	"math/rand"
-	"sync"
-	"testing"
-)
-
-// for go test -v -race
-func TestConcurrency(t *testing.T) {
-	rnd := rand.New(NewSource())
-	var wg sync.WaitGroup
-
-	for i := 0; i < 10; i++ {
-		wg.Add(1)
-		go func() {
-			rnd.Int63()
-			wg.Done()
-		}()
-	}
-	wg.Wait()
-}

+ 2 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/docker/pkg/signal/signal_unix.go

@@ -14,6 +14,8 @@ const (
 	SIGCHLD = syscall.SIGCHLD
 	SIGCHLD = syscall.SIGCHLD
 	// SIGWINCH is a signal sent to a process when its controlling terminal changes its size
 	// SIGWINCH is a signal sent to a process when its controlling terminal changes its size
 	SIGWINCH = syscall.SIGWINCH
 	SIGWINCH = syscall.SIGWINCH
+	// SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading
+	SIGPIPE = syscall.SIGPIPE
 	// DefaultStopSignal is the syscall signal used to stop a container in unix systems.
 	// DefaultStopSignal is the syscall signal used to stop a container in unix systems.
 	DefaultStopSignal = "SIGTERM"
 	DefaultStopSignal = "SIGTERM"
 )
 )

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio