Browse Source

Merge pull request #188 from mavenugo/kvstore

Host Discovery, DataStore & Config support
Jana Radhakrishnan 10 years ago
parent
commit
db7178a675
100 changed files with 14781 additions and 0 deletions
  1. 34 0
      libnetwork/Godeps/Godeps.json
  2. 5 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/.gitignore
  3. 12 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/.travis.yml
  4. 3 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/COMPATIBLE
  5. 14 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/COPYING
  6. 19 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/Makefile
  7. 220 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md
  8. 14 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
  9. 14 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md
  10. 90 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
  11. 14 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
  12. 14 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md
  13. 131 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
  14. 14 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING
  15. 22 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md
  16. 61 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go
  17. 492 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go
  18. 122 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go
  19. 950 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go
  20. 27 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/doc.go
  21. 496 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go
  22. 506 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go
  23. 19 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go
  24. 18 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go
  25. 874 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go
  26. 498 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go
  27. 1 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/session.vim
  28. 91 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go
  29. 241 0
      libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/type_fields.go
  30. 23 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child.go
  31. 73 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go
  32. 481 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go
  33. 108 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go
  34. 37 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go
  35. 34 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete.go
  36. 46 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go
  37. 36 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap.go
  38. 57 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go
  39. 55 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug.go
  40. 28 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go
  41. 40 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete.go
  42. 81 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go
  43. 49 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/error.go
  44. 32 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get.go
  45. 131 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go
  46. 30 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member.go
  47. 71 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member_test.go
  48. 72 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/options.go
  49. 403 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go
  50. 22 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests_test.go
  51. 89 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.go
  52. 42 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go
  53. 137 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create.go
  54. 241 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go
  55. 6 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go
  56. 103 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch.go
  57. 119 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go
  58. 22 0
      libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/.gitignore
  59. 9 0
      libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/.travis.yml
  60. 22 0
      libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/LICENSE
  61. 94 0
      libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/README.md
  62. 523 0
      libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/bench_test.go
  63. 168 0
      libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/set.go
  64. 910 0
      libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/set_test.go
  65. 204 0
      libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/threadsafe.go
  66. 376 0
      libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/threadsafe_test.go
  67. 246 0
      libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/threadunsafe.go
  68. 274 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/README.md
  69. 106 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery.go
  70. 54 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery_test.go
  71. 71 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file.go
  72. 46 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file_test.go
  73. 35 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator.go
  74. 55 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator_test.go
  75. 92 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv.go
  76. 20 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv_test.go
  77. 45 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes.go
  78. 31 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes_test.go
  79. 31 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/README.md
  80. 104 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token.go
  81. 36 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token_test.go
  82. 79 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/README.md
  83. 301 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/consul.go
  84. 264 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/etcd.go
  85. 51 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/helpers.go
  86. 92 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/store.go
  87. 60 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/structs.go
  88. 213 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/zookeeper.go
  89. 39 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md
  90. 140 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go
  91. 148 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go
  92. 331 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go
  93. 404 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go
  94. 351 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go
  95. 339 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go
  96. 181 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go
  97. 273 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go
  98. 104 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go
  99. 39 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go
  100. 136 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go

+ 34 - 0
libnetwork/Godeps/Godeps.json

@@ -5,11 +5,26 @@
 		"./..."
 	],
 	"Deps": [
+		{
+			"ImportPath": "github.com/BurntSushi/toml",
+			"Comment": "v0.1.0-16-gf706d00",
+			"Rev": "f706d00e3de6abe700c994cdd545a1a4915af060"
+		},
 		{
 			"ImportPath": "github.com/Sirupsen/logrus",
 			"Comment": "v0.6.4-12-g467d9d5",
 			"Rev": "467d9d55c2d2c17248441a8fc661561161f40d5e"
 		},
+		{
+			"ImportPath": "github.com/coreos/go-etcd/etcd",
+			"Comment": "v2.0.0-7-g73a8ef7",
+			"Rev": "73a8ef737e8ea002281a28b4cb92a1de121ad4c6"
+		},
+		{
+			"ImportPath": "github.com/deckarep/golang-set",
+			"Comment": "v1-26-gef32fa3",
+			"Rev": "ef32fa3046d9f249d399f98ebaf9be944430fd1d"
+		},
 		{
 			"ImportPath": "github.com/docker/docker/pkg/homedir",
 			"Comment": "v1.4.1-3479-ga9172f5",
@@ -60,6 +75,16 @@
 			"Comment": "v1.4.0-495-g3e66118",
 			"Rev": "3e661186ba24f259d3860f067df052c7f6904bee"
 		},
+		{
+			"ImportPath": "github.com/docker/swarm/discovery",
+			"Comment": "v0.2.0-333-g54dfabd",
+			"Rev": "54dfabd2521314de1c5b036f6c609efbe09df4ea"
+		},
+		{
+			"ImportPath": "github.com/docker/swarm/pkg/store",
+			"Comment": "v0.2.0-333-g54dfabd",
+			"Rev": "54dfabd2521314de1c5b036f6c609efbe09df4ea"
+		},
 		{
 			"ImportPath": "github.com/godbus/dbus",
 			"Comment": "v2-3-g4160802",
@@ -73,6 +98,15 @@
 			"ImportPath": "github.com/gorilla/mux",
 			"Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5"
 		},
+		{
+			"ImportPath": "github.com/hashicorp/consul/api",
+			"Comment": "v0.5.0rc1-66-g954aec6",
+			"Rev": "954aec66231b79c161a4122b023fbcad13047f79"
+		},
+		{
+			"ImportPath": "github.com/samuel/go-zookeeper/zk",
+			"Rev": "d0e0d8e11f318e000a8cc434616d69e329edc374"
+		},
 		{
 			"ImportPath": "github.com/vishvananda/netlink",
 			"Rev": "8eb64238879fed52fd51c5b30ad20b928fb4c36c"

+ 5 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/.gitignore

@@ -0,0 +1,5 @@
+TAGS
+tags
+.*.swp
+tomlcheck/tomlcheck
+toml.test

+ 12 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/.travis.yml

@@ -0,0 +1,12 @@
+language: go
+go:
+  - 1.1
+  - 1.2
+  - tip
+install:
+  - go install ./...
+  - go get github.com/BurntSushi/toml-test
+script:
+  - export PATH="$PATH:$HOME/gopath/bin"
+  - make test
+

+ 3 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/COMPATIBLE

@@ -0,0 +1,3 @@
+Compatible with TOML version
+[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
+

+ 14 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/COPYING

@@ -0,0 +1,14 @@
+            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+                    Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. You just DO WHAT THE FUCK YOU WANT TO.
+

+ 19 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/Makefile

@@ -0,0 +1,19 @@
+install:
+	go install ./...
+
+test: install
+	go test -v
+	toml-test toml-test-decoder
+	toml-test -encoder toml-test-encoder
+
+fmt:
+	gofmt -w *.go */*.go
+	colcheck *.go */*.go
+
+tags:
+	find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
+
+push:
+	git push origin master
+	git push github master
+

+ 220 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md

@@ -0,0 +1,220 @@
+## TOML parser and encoder for Go with reflection
+
+TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
+reflection interface similar to Go's standard library `json` and `xml` 
+packages. This package also supports the `encoding.TextUnmarshaler` and
+`encoding.TextMarshaler` interfaces so that you can define custom data 
+representations. (There is an example of this below.)
+
+Spec: https://github.com/mojombo/toml
+
+Compatible with TOML version
+[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
+
+Documentation: http://godoc.org/github.com/BurntSushi/toml
+
+Installation:
+
+```bash
+go get github.com/BurntSushi/toml
+```
+
+Try the toml validator:
+
+```bash
+go get github.com/BurntSushi/toml/cmd/tomlv
+tomlv some-toml-file.toml
+```
+
+[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml)
+
+
+### Testing
+
+This package passes all tests in
+[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
+and the encoder.
+
+### Examples
+
+This package works similarly to how the Go standard library handles `XML`
+and `JSON`. Namely, data is loaded into Go values via reflection.
+
+For the simplest example, consider some TOML file as just a list of keys
+and values:
+
+```toml
+Age = 25
+Cats = [ "Cauchy", "Plato" ]
+Pi = 3.14
+Perfection = [ 6, 28, 496, 8128 ]
+DOB = 1987-07-05T05:45:00Z
+```
+
+Which could be defined in Go as:
+
+```go
+type Config struct {
+  Age int
+  Cats []string
+  Pi float64
+  Perfection []int
+  DOB time.Time // requires `import time`
+}
+```
+
+And then decoded with:
+
+```go
+var conf Config
+if _, err := toml.Decode(tomlData, &conf); err != nil {
+  // handle error
+}
+```
+
+You can also use struct tags if your struct field name doesn't map to a TOML
+key value directly:
+
+```toml
+some_key_NAME = "wat"
+```
+
+```go
+type TOML struct {
+  ObscureKey string `toml:"some_key_NAME"`
+}
+```
+
+### Using the `encoding.TextUnmarshaler` interface
+
+Here's an example that automatically parses duration strings into 
+`time.Duration` values:
+
+```toml
+[[song]]
+name = "Thunder Road"
+duration = "4m49s"
+
+[[song]]
+name = "Stairway to Heaven"
+duration = "8m03s"
+```
+
+Which can be decoded with:
+
+```go
+type song struct {
+  Name     string
+  Duration duration
+}
+type songs struct {
+  Song []song
+}
+var favorites songs
+if _, err := toml.Decode(blob, &favorites); err != nil {
+  log.Fatal(err)
+}
+
+for _, s := range favorites.Song {
+  fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+}
+```
+
+And you'll also need a `duration` type that satisfies the 
+`encoding.TextUnmarshaler` interface:
+
+```go
+type duration struct {
+	time.Duration
+}
+
+func (d *duration) UnmarshalText(text []byte) error {
+	var err error
+	d.Duration, err = time.ParseDuration(string(text))
+	return err
+}
+```
+
+### More complex usage
+
+Here's an example of how to load the example from the official spec page:
+
+```toml
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+  # You can indent as you please. Tabs or spaces. TOML don't care.
+  [servers.alpha]
+  ip = "10.0.0.1"
+  dc = "eqdc10"
+
+  [servers.beta]
+  ip = "10.0.0.2"
+  dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+  "alpha",
+  "omega"
+]
+```
+
+And the corresponding Go types are:
+
+```go
+type tomlConfig struct {
+	Title string
+	Owner ownerInfo
+	DB database `toml:"database"`
+	Servers map[string]server
+	Clients clients
+}
+
+type ownerInfo struct {
+	Name string
+	Org string `toml:"organization"`
+	Bio string
+	DOB time.Time
+}
+
+type database struct {
+	Server string
+	Ports []int
+	ConnMax int `toml:"connection_max"`
+	Enabled bool
+}
+
+type server struct {
+	IP string
+	DC string
+}
+
+type clients struct {
+	Data [][]interface{}
+	Hosts []string
+}
+```
+
+Note that a case insensitive match will be tried if an exact match can't be
+found.
+
+A working example of the above can be found in `_examples/example.{go,toml}`.
+

+ 14 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING

@@ -0,0 +1,14 @@
+            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+                    Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. You just DO WHAT THE FUCK YOU WANT TO.
+

+ 14 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md

@@ -0,0 +1,14 @@
+# Implements the TOML test suite interface
+
+This is an implementation of the interface expected by
+[toml-test](https://github.com/BurntSushi/toml-test) for my
+[toml parser written in Go](https://github.com/BurntSushi/toml).
+In particular, it maps TOML data on `stdin` to a JSON format on `stdout`.
+
+
+Compatible with TOML version
+[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
+
+Compatible with `toml-test` version
+[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)
+

+ 90 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go

@@ -0,0 +1,90 @@
+// Command toml-test-decoder satisfies the toml-test interface for testing
+// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout.
+package main
+
+import (
+	"encoding/json"
+	"flag"
+	"fmt"
+	"log"
+	"os"
+	"path"
+	"time"
+
+	"github.com/BurntSushi/toml"
+)
+
+func init() {
+	log.SetFlags(0)
+
+	flag.Usage = usage
+	flag.Parse()
+}
+
+func usage() {
+	log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0]))
+	flag.PrintDefaults()
+
+	os.Exit(1)
+}
+
+func main() {
+	if flag.NArg() != 0 {
+		flag.Usage()
+	}
+
+	var tmp interface{}
+	if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil {
+		log.Fatalf("Error decoding TOML: %s", err)
+	}
+
+	typedTmp := translate(tmp)
+	if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil {
+		log.Fatalf("Error encoding JSON: %s", err)
+	}
+}
+
+func translate(tomlData interface{}) interface{} {
+	switch orig := tomlData.(type) {
+	case map[string]interface{}:
+		typed := make(map[string]interface{}, len(orig))
+		for k, v := range orig {
+			typed[k] = translate(v)
+		}
+		return typed
+	case []map[string]interface{}:
+		typed := make([]map[string]interface{}, len(orig))
+		for i, v := range orig {
+			typed[i] = translate(v).(map[string]interface{})
+		}
+		return typed
+	case []interface{}:
+		typed := make([]interface{}, len(orig))
+		for i, v := range orig {
+			typed[i] = translate(v)
+		}
+
+		// We don't really need to tag arrays, but let's be future proof.
+		// (If TOML ever supports tuples, we'll need this.)
+		return tag("array", typed)
+	case time.Time:
+		return tag("datetime", orig.Format("2006-01-02T15:04:05Z"))
+	case bool:
+		return tag("bool", fmt.Sprintf("%v", orig))
+	case int64:
+		return tag("integer", fmt.Sprintf("%d", orig))
+	case float64:
+		return tag("float", fmt.Sprintf("%v", orig))
+	case string:
+		return tag("string", orig)
+	}
+
+	panic(fmt.Sprintf("Unknown type: %T", tomlData))
+}
+
+func tag(typeName string, data interface{}) map[string]interface{} {
+	return map[string]interface{}{
+		"type":  typeName,
+		"value": data,
+	}
+}

+ 14 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING

@@ -0,0 +1,14 @@
+            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+                    Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. You just DO WHAT THE FUCK YOU WANT TO.
+

+ 14 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md

@@ -0,0 +1,14 @@
+# Implements the TOML test suite interface for TOML encoders
+
+This is an implementation of the interface expected by
+[toml-test](https://github.com/BurntSushi/toml-test) for the
+[TOML encoder](https://github.com/BurntSushi/toml).
+In particular, it maps JSON data on `stdin` to a TOML format on `stdout`.
+
+
+Compatible with TOML version
+[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
+
+Compatible with `toml-test` version
+[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)
+

+ 131 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go

@@ -0,0 +1,131 @@
+// Command toml-test-encoder satisfies the toml-test interface for testing
+// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout.
+package main
+
+import (
+	"encoding/json"
+	"flag"
+	"log"
+	"os"
+	"path"
+	"strconv"
+	"time"
+
+	"github.com/BurntSushi/toml"
+)
+
+func init() {
+	log.SetFlags(0)
+
+	flag.Usage = usage
+	flag.Parse()
+}
+
+func usage() {
+	log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0]))
+	flag.PrintDefaults()
+
+	os.Exit(1)
+}
+
+func main() {
+	if flag.NArg() != 0 {
+		flag.Usage()
+	}
+
+	var tmp interface{}
+	if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil {
+		log.Fatalf("Error decoding JSON: %s", err)
+	}
+
+	tomlData := translate(tmp)
+	if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil {
+		log.Fatalf("Error encoding TOML: %s", err)
+	}
+}
+
+func translate(typedJson interface{}) interface{} {
+	switch v := typedJson.(type) {
+	case map[string]interface{}:
+		if len(v) == 2 && in("type", v) && in("value", v) {
+			return untag(v)
+		}
+		m := make(map[string]interface{}, len(v))
+		for k, v2 := range v {
+			m[k] = translate(v2)
+		}
+		return m
+	case []interface{}:
+		tabArray := make([]map[string]interface{}, len(v))
+		for i := range v {
+			if m, ok := translate(v[i]).(map[string]interface{}); ok {
+				tabArray[i] = m
+			} else {
+				log.Fatalf("JSON arrays may only contain objects. This " +
+					"corresponds to only tables being allowed in " +
+					"TOML table arrays.")
+			}
+		}
+		return tabArray
+	}
+	log.Fatalf("Unrecognized JSON format '%T'.", typedJson)
+	panic("unreachable")
+}
+
+func untag(typed map[string]interface{}) interface{} {
+	t := typed["type"].(string)
+	v := typed["value"]
+	switch t {
+	case "string":
+		return v.(string)
+	case "integer":
+		v := v.(string)
+		n, err := strconv.Atoi(v)
+		if err != nil {
+			log.Fatalf("Could not parse '%s' as integer: %s", v, err)
+		}
+		return n
+	case "float":
+		v := v.(string)
+		f, err := strconv.ParseFloat(v, 64)
+		if err != nil {
+			log.Fatalf("Could not parse '%s' as float64: %s", v, err)
+		}
+		return f
+	case "datetime":
+		v := v.(string)
+		t, err := time.Parse("2006-01-02T15:04:05Z", v)
+		if err != nil {
+			log.Fatalf("Could not parse '%s' as a datetime: %s", v, err)
+		}
+		return t
+	case "bool":
+		v := v.(string)
+		switch v {
+		case "true":
+			return true
+		case "false":
+			return false
+		}
+		log.Fatalf("Could not parse '%s' as a boolean.", v)
+	case "array":
+		v := v.([]interface{})
+		array := make([]interface{}, len(v))
+		for i := range v {
+			if m, ok := v[i].(map[string]interface{}); ok {
+				array[i] = untag(m)
+			} else {
+				log.Fatalf("Arrays may only contain other arrays or "+
+					"primitive values, but found a '%T'.", m)
+			}
+		}
+		return array
+	}
+	log.Fatalf("Unrecognized tag type '%s'.", t)
+	panic("unreachable")
+}
+
+func in(key string, m map[string]interface{}) bool {
+	_, ok := m[key]
+	return ok
+}

+ 14 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING

@@ -0,0 +1,14 @@
+            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+                    Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+            DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. You just DO WHAT THE FUCK YOU WANT TO.
+

+ 22 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md

@@ -0,0 +1,22 @@
+# TOML Validator
+
+If Go is installed, it's simple to try it out:
+
+```bash
+go get github.com/BurntSushi/toml/cmd/tomlv
+tomlv some-toml-file.toml
+```
+
+You can see the types of every key in a TOML file with:
+
+```bash
+tomlv -types some-toml-file.toml
+```
+
+At the moment, only one error message is reported at a time. Error messages
+include line numbers. No output means that the files given are valid TOML, or 
+there is a bug in `tomlv`.
+
+Compatible with TOML version
+[v0.1.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.1.0.md)
+

+ 61 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go

@@ -0,0 +1,61 @@
+// Command tomlv validates TOML documents and prints each key's type.
+package main
+
+import (
+	"flag"
+	"fmt"
+	"log"
+	"os"
+	"path"
+	"strings"
+	"text/tabwriter"
+
+	"github.com/BurntSushi/toml"
+)
+
+var (
+	flagTypes = false
+)
+
+func init() {
+	log.SetFlags(0)
+
+	flag.BoolVar(&flagTypes, "types", flagTypes,
+		"When set, the types of every defined key will be shown.")
+
+	flag.Usage = usage
+	flag.Parse()
+}
+
+func usage() {
+	log.Printf("Usage: %s toml-file [ toml-file ... ]\n",
+		path.Base(os.Args[0]))
+	flag.PrintDefaults()
+
+	os.Exit(1)
+}
+
+func main() {
+	if flag.NArg() < 1 {
+		flag.Usage()
+	}
+	for _, f := range flag.Args() {
+		var tmp interface{}
+		md, err := toml.DecodeFile(f, &tmp)
+		if err != nil {
+			log.Fatalf("Error in '%s': %s", f, err)
+		}
+		if flagTypes {
+			printTypes(md)
+		}
+	}
+}
+
+func printTypes(md toml.MetaData) {
+	tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
+	for _, key := range md.Keys() {
+		fmt.Fprintf(tabw, "%s%s\t%s\n",
+			strings.Repeat("    ", len(key)-1), key, md.Type(key...))
+	}
+	tabw.Flush()
+}

+ 492 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go

@@ -0,0 +1,492 @@
+package toml
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"reflect"
+	"strings"
+	"time"
+)
+
+var e = fmt.Errorf
+
+// Unmarshaler is the interface implemented by objects that can unmarshal a
+// TOML description of themselves.
+type Unmarshaler interface {
+	UnmarshalTOML(interface{}) error
+}
+
+// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
+func Unmarshal(p []byte, v interface{}) error {
+	_, err := Decode(string(p), v)
+	return err
+}
+
+// Primitive is a TOML value that hasn't been decoded into a Go value.
+// When using the various `Decode*` functions, the type `Primitive` may
+// be given to any value, and its decoding will be delayed.
+//
+// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
+//
+// The underlying representation of a `Primitive` value is subject to change.
+// Do not rely on it.
+//
+// N.B. Primitive values are still parsed, so using them will only avoid
+// the overhead of reflection. They can be useful when you don't know the
+// exact type of TOML data until run time.
+type Primitive struct {
+	undecoded interface{}
+	context   Key
+}
+
+// DEPRECATED!
+//
+// Use MetaData.PrimitiveDecode instead.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+	md := MetaData{decoded: make(map[string]bool)}
+	return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
+//
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+	md.context = primValue.context
+	defer func() { md.context = nil }()
+	return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// Decode will decode the contents of `data` in TOML format into a pointer
+// `v`.
+//
+// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
+// used interchangeably.)
+//
+// TOML arrays of tables correspond to either a slice of structs or a slice
+// of maps.
+//
+// TOML datetimes correspond to Go `time.Time` values.
+//
+// All other TOML types (float, string, int, bool and array) correspond
+// to the obvious Go types.
+//
+// An exception to the above rules is if a type implements the
+// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
+// (floats, strings, integers, booleans and datetimes) will be converted to
+// a byte string and given to the value's UnmarshalText method. See the
+// Unmarshaler example for a demonstration with time duration strings.
+//
+// Key mapping
+//
+// TOML keys can map to either keys in a Go map or field names in a Go
+// struct. The special `toml` struct tag may be used to map TOML keys to
+// struct fields that don't match the key name exactly. (See the example.)
+// A case insensitive match to struct names will be tried if an exact match
+// can't be found.
+//
+// The mapping between TOML values and Go values is loose. That is, there
+// may exist TOML values that cannot be placed into your representation, and
+// there may be parts of your representation that do not correspond to
+// TOML values. This loose mapping can be made stricter by using the IsDefined
+// and/or Undecoded methods on the MetaData returned.
+//
+// This decoder will not handle cyclic types. If a cyclic type is passed,
+// `Decode` will not terminate.
+func Decode(data string, v interface{}) (MetaData, error) {
+	p, err := parse(data)
+	if err != nil {
+		return MetaData{}, err
+	}
+	md := MetaData{
+		p.mapping, p.types, p.ordered,
+		make(map[string]bool, len(p.ordered)), nil,
+	}
+	return md, md.unify(p.mapping, rvalue(v))
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at `fpath` and decode it for you.
+func DecodeFile(fpath string, v interface{}) (MetaData, error) {
+	bs, err := ioutil.ReadFile(fpath)
+	if err != nil {
+		return MetaData{}, err
+	}
+	return Decode(string(bs), v)
+}
+
+// DecodeReader is just like Decode, except it will consume all bytes
+// from the reader and decode it for you.
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
+	bs, err := ioutil.ReadAll(r)
+	if err != nil {
+		return MetaData{}, err
+	}
+	return Decode(string(bs), v)
+}
+
+// unify performs a sort of type unification based on the structure of `rv`,
+// which is the client representation.
+//
+// Any type mismatch produces an error. Finding a type that we don't know
+// how to handle produces an unsupported type error.
+func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+
+	// Special case. Look for a `Primitive` value.
+	if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+		// Save the undecoded data and the key context into the primitive
+		// value.
+		context := make(Key, len(md.context))
+		copy(context, md.context)
+		rv.Set(reflect.ValueOf(Primitive{
+			undecoded: data,
+			context:   context,
+		}))
+		return nil
+	}
+
+	// Special case. Unmarshaler Interface support.
+	if rv.CanAddr() {
+		if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
+			return v.UnmarshalTOML(data)
+		}
+	}
+
+	// Special case. Handle time.Time values specifically.
+	// TODO: Remove this code when we decide to drop support for Go 1.1.
+	// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
+	// interfaces.
+	if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
+		return md.unifyDatetime(data, rv)
+	}
+
+	// Special case. Look for a value satisfying the TextUnmarshaler interface.
+	if v, ok := rv.Interface().(TextUnmarshaler); ok {
+		return md.unifyText(data, v)
+	}
+	// BUG(burntsushi)
+	// The behavior here is incorrect whenever a Go type satisfies the
+	// encoding.TextUnmarshaler interface but also corresponds to a TOML
+	// hash or array. In particular, the unmarshaler should only be applied
+	// to primitive TOML values. But at this point, it will be applied to
+	// all kinds of values and produce an incorrect error whenever those values
+	// are hashes or arrays (including arrays of tables).
+
+	k := rv.Kind()
+
+	// laziness
+	if k >= reflect.Int && k <= reflect.Uint64 {
+		return md.unifyInt(data, rv)
+	}
+	switch k {
+	case reflect.Ptr:
+		elem := reflect.New(rv.Type().Elem())
+		err := md.unify(data, reflect.Indirect(elem))
+		if err != nil {
+			return err
+		}
+		rv.Set(elem)
+		return nil
+	case reflect.Struct:
+		return md.unifyStruct(data, rv)
+	case reflect.Map:
+		return md.unifyMap(data, rv)
+	case reflect.Array:
+		return md.unifyArray(data, rv)
+	case reflect.Slice:
+		return md.unifySlice(data, rv)
+	case reflect.String:
+		return md.unifyString(data, rv)
+	case reflect.Bool:
+		return md.unifyBool(data, rv)
+	case reflect.Interface:
+		// we only support empty interfaces.
+		if rv.NumMethod() > 0 {
+			return e("Unsupported type '%s'.", rv.Kind())
+		}
+		return md.unifyAnything(data, rv)
+	case reflect.Float32:
+		fallthrough
+	case reflect.Float64:
+		return md.unifyFloat64(data, rv)
+	}
+	return e("Unsupported type '%s'.", rv.Kind())
+}
+
+func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
+	tmap, ok := mapping.(map[string]interface{})
+	if !ok {
+		return mismatch(rv, "map", mapping)
+	}
+
+	for key, datum := range tmap {
+		var f *field
+		fields := cachedTypeFields(rv.Type())
+		for i := range fields {
+			ff := &fields[i]
+			if ff.name == key {
+				f = ff
+				break
+			}
+			if f == nil && strings.EqualFold(ff.name, key) {
+				f = ff
+			}
+		}
+		if f != nil {
+			subv := rv
+			for _, i := range f.index {
+				subv = indirect(subv.Field(i))
+			}
+			if isUnifiable(subv) {
+				md.decoded[md.context.add(key).String()] = true
+				md.context = append(md.context, key)
+				if err := md.unify(datum, subv); err != nil {
+					return e("Type mismatch for '%s.%s': %s",
+						rv.Type().String(), f.name, err)
+				}
+				md.context = md.context[0 : len(md.context)-1]
+			} else if f.name != "" {
+				// Bad user! No soup for you!
+				return e("Field '%s.%s' is unexported, and therefore cannot "+
+					"be loaded with reflection.", rv.Type().String(), f.name)
+			}
+		}
+	}
+	return nil
+}
+
+func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+	tmap, ok := mapping.(map[string]interface{})
+	if !ok {
+		return badtype("map", mapping)
+	}
+	if rv.IsNil() {
+		rv.Set(reflect.MakeMap(rv.Type()))
+	}
+	for k, v := range tmap {
+		md.decoded[md.context.add(k).String()] = true
+		md.context = append(md.context, k)
+
+		rvkey := indirect(reflect.New(rv.Type().Key()))
+		rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
+		if err := md.unify(v, rvval); err != nil {
+			return err
+		}
+		md.context = md.context[0 : len(md.context)-1]
+
+		rvkey.SetString(k)
+		rv.SetMapIndex(rvkey, rvval)
+	}
+	return nil
+}
+
+func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+	datav := reflect.ValueOf(data)
+	if datav.Kind() != reflect.Slice {
+		return badtype("slice", data)
+	}
+	sliceLen := datav.Len()
+	if sliceLen != rv.Len() {
+		return e("expected array length %d; got TOML array of length %d",
+			rv.Len(), sliceLen)
+	}
+	return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+	datav := reflect.ValueOf(data)
+	if datav.Kind() != reflect.Slice {
+		return badtype("slice", data)
+	}
+	sliceLen := datav.Len()
+	if rv.IsNil() {
+		rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen))
+	}
+	return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
+	sliceLen := data.Len()
+	for i := 0; i < sliceLen; i++ {
+		v := data.Index(i).Interface()
+		sliceval := indirect(rv.Index(i))
+		if err := md.unify(v, sliceval); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
+	if _, ok := data.(time.Time); ok {
+		rv.Set(reflect.ValueOf(data))
+		return nil
+	}
+	return badtype("time.Time", data)
+}
+
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+	if s, ok := data.(string); ok {
+		rv.SetString(s)
+		return nil
+	}
+	return badtype("string", data)
+}
+
+func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+	if num, ok := data.(float64); ok {
+		switch rv.Kind() {
+		case reflect.Float32:
+			fallthrough
+		case reflect.Float64:
+			rv.SetFloat(num)
+		default:
+			panic("bug")
+		}
+		return nil
+	}
+	return badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+	if num, ok := data.(int64); ok {
+		if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
+			switch rv.Kind() {
+			case reflect.Int, reflect.Int64:
+				// No bounds checking necessary.
+			case reflect.Int8:
+				if num < math.MinInt8 || num > math.MaxInt8 {
+					return e("Value '%d' is out of range for int8.", num)
+				}
+			case reflect.Int16:
+				if num < math.MinInt16 || num > math.MaxInt16 {
+					return e("Value '%d' is out of range for int16.", num)
+				}
+			case reflect.Int32:
+				if num < math.MinInt32 || num > math.MaxInt32 {
+					return e("Value '%d' is out of range for int32.", num)
+				}
+			}
+			rv.SetInt(num)
+		} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
+			unum := uint64(num)
+			switch rv.Kind() {
+			case reflect.Uint, reflect.Uint64:
+				// No bounds checking necessary.
+			case reflect.Uint8:
+				if num < 0 || unum > math.MaxUint8 {
+					return e("Value '%d' is out of range for uint8.", num)
+				}
+			case reflect.Uint16:
+				if num < 0 || unum > math.MaxUint16 {
+					return e("Value '%d' is out of range for uint16.", num)
+				}
+			case reflect.Uint32:
+				if num < 0 || unum > math.MaxUint32 {
+					return e("Value '%d' is out of range for uint32.", num)
+				}
+			}
+			rv.SetUint(unum)
+		} else {
+			panic("unreachable")
+		}
+		return nil
+	}
+	return badtype("integer", data)
+}
+
+func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+	if b, ok := data.(bool); ok {
+		rv.SetBool(b)
+		return nil
+	}
+	return badtype("boolean", data)
+}
+
+func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+	rv.Set(reflect.ValueOf(data))
+	return nil
+}
+
+func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
+	var s string
+	switch sdata := data.(type) {
+	case TextMarshaler:
+		text, err := sdata.MarshalText()
+		if err != nil {
+			return err
+		}
+		s = string(text)
+	case fmt.Stringer:
+		s = sdata.String()
+	case string:
+		s = sdata
+	case bool:
+		s = fmt.Sprintf("%v", sdata)
+	case int64:
+		s = fmt.Sprintf("%d", sdata)
+	case float64:
+		s = fmt.Sprintf("%f", sdata)
+	default:
+		return badtype("primitive (string-like)", data)
+	}
+	if err := v.UnmarshalText([]byte(s)); err != nil {
+		return err
+	}
+	return nil
+}
+
+// rvalue returns a reflect.Value of `v`. All pointers are resolved.
+func rvalue(v interface{}) reflect.Value {
+	return indirect(reflect.ValueOf(v))
+}
+
+// indirect returns the value pointed to by a pointer.
+// Pointers are followed until the value is not a pointer.
+// New values are allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of
+// interest to us (like encoding.TextUnmarshaler).
+func indirect(v reflect.Value) reflect.Value {
+	if v.Kind() != reflect.Ptr {
+		if v.CanAddr() {
+			pv := v.Addr()
+			if _, ok := pv.Interface().(TextUnmarshaler); ok {
+				return pv
+			}
+		}
+		return v
+	}
+	if v.IsNil() {
+		v.Set(reflect.New(v.Type().Elem()))
+	}
+	return indirect(reflect.Indirect(v))
+}
+
+func isUnifiable(rv reflect.Value) bool {
+	if rv.CanSet() {
+		return true
+	}
+	if _, ok := rv.Interface().(TextUnmarshaler); ok {
+		return true
+	}
+	return false
+}
+
+func badtype(expected string, data interface{}) error {
+	return e("Expected %s but found '%T'.", expected, data)
+}
+
+func mismatch(user reflect.Value, expected string, data interface{}) error {
+	return e("Type mismatch for %s. Expected %s but found '%T'.",
+		user.Type().String(), expected, data)
+}

+ 122 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go

@@ -0,0 +1,122 @@
+package toml
+
+import "strings"
+
+// MetaData allows access to meta information about TOML data that may not
+// be inferrable via reflection. In particular, whether a key has been defined
+// and the TOML type of a key.
+type MetaData struct {
+	mapping map[string]interface{}
+	types   map[string]tomlType
+	keys    []Key
+	decoded map[string]bool
+	context Key // Used only during decoding.
+}
+
+// IsDefined returns true if the key given exists in the TOML data. The key
+// should be specified hierarchially. e.g.,
+//
+//	// access the TOML key 'a.b.c'
+//	IsDefined("a", "b", "c")
+//
+// IsDefined will return false if an empty key given. Keys are case sensitive.
+func (md *MetaData) IsDefined(key ...string) bool {
+	if len(key) == 0 {
+		return false
+	}
+
+	var hash map[string]interface{}
+	var ok bool
+	var hashOrVal interface{} = md.mapping
+	for _, k := range key {
+		if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+			return false
+		}
+		if hashOrVal, ok = hash[k]; !ok {
+			return false
+		}
+	}
+	return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that
+// does not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+	fullkey := strings.Join(key, ".")
+	if typ, ok := md.types[fullkey]; ok {
+		return typ.typeString()
+	}
+	return ""
+}
+
+// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
+// to get values of this type.
+type Key []string
+
+func (k Key) String() string {
+	return strings.Join(k, ".")
+}
+
+func (k Key) maybeQuotedAll() string {
+	var ss []string
+	for i := range k {
+		ss = append(ss, k.maybeQuoted(i))
+	}
+	return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+	quote := false
+	for _, c := range k[i] {
+		if !isBareKeyChar(c) {
+			quote = true
+			break
+		}
+	}
+	if quote {
+		return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
+	} else {
+		return k[i]
+	}
+}
+
+func (k Key) add(piece string) Key {
+	newKey := make(Key, len(k)+1)
+	copy(newKey, k)
+	newKey[len(k)] = piece
+	return newKey
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific.
+//
+// The list will have the same order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+	return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a Primitive value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+	undecoded := make([]Key, 0, len(md.keys))
+	for _, key := range md.keys {
+		if !md.decoded[key.String()] {
+			undecoded = append(undecoded, key)
+		}
+	}
+	return undecoded
+}

+ 950 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go

@@ -0,0 +1,950 @@
+package toml
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"testing"
+	"time"
+)
+
+func init() {
+	log.SetFlags(0)
+}
+
+func TestDecodeSimple(t *testing.T) {
+	var testSimple = `
+age = 250
+andrew = "gallant"
+kait = "brady"
+now = 1987-07-05T05:45:00Z
+yesOrNo = true
+pi = 3.14
+colors = [
+	["red", "green", "blue"],
+	["cyan", "magenta", "yellow", "black"],
+]
+
+[My.Cats]
+plato = "cat 1"
+cauchy = "cat 2"
+`
+
+	type cats struct {
+		Plato  string
+		Cauchy string
+	}
+	type simple struct {
+		Age     int
+		Colors  [][]string
+		Pi      float64
+		YesOrNo bool
+		Now     time.Time
+		Andrew  string
+		Kait    string
+		My      map[string]cats
+	}
+
+	var val simple
+	_, err := Decode(testSimple, &val)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00")
+	if err != nil {
+		panic(err)
+	}
+	var answer = simple{
+		Age:     250,
+		Andrew:  "gallant",
+		Kait:    "brady",
+		Now:     now,
+		YesOrNo: true,
+		Pi:      3.14,
+		Colors: [][]string{
+			{"red", "green", "blue"},
+			{"cyan", "magenta", "yellow", "black"},
+		},
+		My: map[string]cats{
+			"Cats": cats{Plato: "cat 1", Cauchy: "cat 2"},
+		},
+	}
+	if !reflect.DeepEqual(val, answer) {
+		t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n",
+			answer, val)
+	}
+}
+
+func TestDecodeEmbedded(t *testing.T) {
+	type Dog struct{ Name string }
+	type Age int
+
+	tests := map[string]struct {
+		input       string
+		decodeInto  interface{}
+		wantDecoded interface{}
+	}{
+		"embedded struct": {
+			input:       `Name = "milton"`,
+			decodeInto:  &struct{ Dog }{},
+			wantDecoded: &struct{ Dog }{Dog{"milton"}},
+		},
+		"embedded non-nil pointer to struct": {
+			input:       `Name = "milton"`,
+			decodeInto:  &struct{ *Dog }{},
+			wantDecoded: &struct{ *Dog }{&Dog{"milton"}},
+		},
+		"embedded nil pointer to struct": {
+			input:       ``,
+			decodeInto:  &struct{ *Dog }{},
+			wantDecoded: &struct{ *Dog }{nil},
+		},
+		"embedded int": {
+			input:       `Age = -5`,
+			decodeInto:  &struct{ Age }{},
+			wantDecoded: &struct{ Age }{-5},
+		},
+	}
+
+	for label, test := range tests {
+		_, err := Decode(test.input, test.decodeInto)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) {
+			t.Errorf("%s: want decoded == %+v, got %+v",
+				label, test.wantDecoded, test.decodeInto)
+		}
+	}
+}
+
+func TestTableArrays(t *testing.T) {
+	var tomlTableArrays = `
+[[albums]]
+name = "Born to Run"
+
+  [[albums.songs]]
+  name = "Jungleland"
+
+  [[albums.songs]]
+  name = "Meeting Across the River"
+
+[[albums]]
+name = "Born in the USA"
+
+  [[albums.songs]]
+  name = "Glory Days"
+
+  [[albums.songs]]
+  name = "Dancing in the Dark"
+`
+
+	type Song struct {
+		Name string
+	}
+
+	type Album struct {
+		Name  string
+		Songs []Song
+	}
+
+	type Music struct {
+		Albums []Album
+	}
+
+	expected := Music{[]Album{
+		{"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}},
+		{"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}},
+	}}
+	var got Music
+	if _, err := Decode(tomlTableArrays, &got); err != nil {
+		t.Fatal(err)
+	}
+	if !reflect.DeepEqual(expected, got) {
+		t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
+	}
+}
+
+// Case insensitive matching tests.
+// A bit more comprehensive than needed given the current implementation,
+// but implementations change.
+// Probably still missing demonstrations of some ugly corner cases regarding
+// case insensitive matching and multiple fields.
+func TestCase(t *testing.T) {
+	var caseToml = `
+tOpString = "string"
+tOpInt = 1
+tOpFloat = 1.1
+tOpBool = true
+tOpdate = 2006-01-02T15:04:05Z
+tOparray = [ "array" ]
+Match = "i should be in Match only"
+MatcH = "i should be in MatcH only"
+once = "just once"
+[nEst.eD]
+nEstedString = "another string"
+`
+
+	type InsensitiveEd struct {
+		NestedString string
+	}
+
+	type InsensitiveNest struct {
+		Ed InsensitiveEd
+	}
+
+	type Insensitive struct {
+		TopString string
+		TopInt    int
+		TopFloat  float64
+		TopBool   bool
+		TopDate   time.Time
+		TopArray  []string
+		Match     string
+		MatcH     string
+		Once      string
+		OncE      string
+		Nest      InsensitiveNest
+	}
+
+	tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5])
+	if err != nil {
+		panic(err)
+	}
+	expected := Insensitive{
+		TopString: "string",
+		TopInt:    1,
+		TopFloat:  1.1,
+		TopBool:   true,
+		TopDate:   tme,
+		TopArray:  []string{"array"},
+		MatcH:     "i should be in MatcH only",
+		Match:     "i should be in Match only",
+		Once:      "just once",
+		OncE:      "",
+		Nest: InsensitiveNest{
+			Ed: InsensitiveEd{NestedString: "another string"},
+		},
+	}
+	var got Insensitive
+	if _, err := Decode(caseToml, &got); err != nil {
+		t.Fatal(err)
+	}
+	if !reflect.DeepEqual(expected, got) {
+		t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
+	}
+}
+
+func TestPointers(t *testing.T) {
+	type Object struct {
+		Type        string
+		Description string
+	}
+
+	type Dict struct {
+		NamedObject map[string]*Object
+		BaseObject  *Object
+		Strptr      *string
+		Strptrs     []*string
+	}
+	s1, s2, s3 := "blah", "abc", "def"
+	expected := &Dict{
+		Strptr:  &s1,
+		Strptrs: []*string{&s2, &s3},
+		NamedObject: map[string]*Object{
+			"foo": {"FOO", "fooooo!!!"},
+			"bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"},
+		},
+		BaseObject: &Object{"BASE", "da base"},
+	}
+
+	ex1 := `
+Strptr = "blah"
+Strptrs = ["abc", "def"]
+
+[NamedObject.foo]
+Type = "FOO"
+Description = "fooooo!!!"
+
+[NamedObject.bar]
+Type = "BAR"
+Description = "ba-ba-ba-ba-barrrr!!!"
+
+[BaseObject]
+Type = "BASE"
+Description = "da base"
+`
+	dict := new(Dict)
+	_, err := Decode(ex1, dict)
+	if err != nil {
+		t.Errorf("Decode error: %v", err)
+	}
+	if !reflect.DeepEqual(expected, dict) {
+		t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict)
+	}
+}
+
+type sphere struct {
+	Center [3]float64
+	Radius float64
+}
+
+func TestDecodeSimpleArray(t *testing.T) {
+	var s1 sphere
+	if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestDecodeArrayWrongSize(t *testing.T) {
+	var s1 sphere
+	if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil {
+		t.Fatal("Expected array type mismatch error")
+	}
+}
+
+func TestDecodeLargeIntoSmallInt(t *testing.T) {
+	type table struct {
+		Value int8
+	}
+	var tab table
+	if _, err := Decode(`value = 500`, &tab); err == nil {
+		t.Fatal("Expected integer out-of-bounds error.")
+	}
+}
+
+func TestDecodeSizedInts(t *testing.T) {
+	type table struct {
+		U8  uint8
+		U16 uint16
+		U32 uint32
+		U64 uint64
+		U   uint
+		I8  int8
+		I16 int16
+		I32 int32
+		I64 int64
+		I   int
+	}
+	answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1}
+	toml := `
+	u8 = 1
+	u16 = 1
+	u32 = 1
+	u64 = 1
+	u = 1
+	i8 = -1
+	i16 = -1
+	i32 = -1
+	i64 = -1
+	i = -1
+	`
+	var tab table
+	if _, err := Decode(toml, &tab); err != nil {
+		t.Fatal(err.Error())
+	}
+	if answer != tab {
+		t.Fatalf("Expected %#v but got %#v", answer, tab)
+	}
+}
+
+func TestUnmarshaler(t *testing.T) {
+
+	var tomlBlob = `
+[dishes.hamboogie]
+name = "Hamboogie with fries"
+price = 10.99
+
+[[dishes.hamboogie.ingredients]]
+name = "Bread Bun"
+
+[[dishes.hamboogie.ingredients]]
+name = "Lettuce"
+
+[[dishes.hamboogie.ingredients]]
+name = "Real Beef Patty"
+
+[[dishes.hamboogie.ingredients]]
+name = "Tomato"
+
+[dishes.eggsalad]
+name = "Egg Salad with rice"
+price = 3.99
+
+[[dishes.eggsalad.ingredients]]
+name = "Egg"
+
+[[dishes.eggsalad.ingredients]]
+name = "Mayo"
+
+[[dishes.eggsalad.ingredients]]
+name = "Rice"
+`
+	m := &menu{}
+	if _, err := Decode(tomlBlob, m); err != nil {
+		log.Fatal(err)
+	}
+
+	if len(m.Dishes) != 2 {
+		t.Log("two dishes should be loaded with UnmarshalTOML()")
+		t.Errorf("expected %d but got %d", 2, len(m.Dishes))
+	}
+
+	eggSalad := m.Dishes["eggsalad"]
+	if _, ok := interface{}(eggSalad).(dish); !ok {
+		t.Errorf("expected a dish")
+	}
+
+	if eggSalad.Name != "Egg Salad with rice" {
+		t.Errorf("expected the dish to be named 'Egg Salad with rice'")
+	}
+
+	if len(eggSalad.Ingredients) != 3 {
+		t.Log("dish should be loaded with UnmarshalTOML()")
+		t.Errorf("expected %d but got %d", 3, len(eggSalad.Ingredients))
+	}
+
+	found := false
+	for _, i := range eggSalad.Ingredients {
+		if i.Name == "Rice" {
+			found = true
+			break
+		}
+	}
+	if !found {
+		t.Error("Rice was not loaded in UnmarshalTOML()")
+	}
+
+	// test on a value - must be passed as *
+	o := menu{}
+	if _, err := Decode(tomlBlob, &o); err != nil {
+		log.Fatal(err)
+	}
+
+}
+
+type menu struct {
+	Dishes map[string]dish
+}
+
+func (m *menu) UnmarshalTOML(p interface{}) error {
+	m.Dishes = make(map[string]dish)
+	data, _ := p.(map[string]interface{})
+	dishes := data["dishes"].(map[string]interface{})
+	for n, v := range dishes {
+		if d, ok := v.(map[string]interface{}); ok {
+			nd := dish{}
+			nd.UnmarshalTOML(d)
+			m.Dishes[n] = nd
+		} else {
+			return fmt.Errorf("not a dish")
+		}
+	}
+	return nil
+}
+
+type dish struct {
+	Name        string
+	Price       float32
+	Ingredients []ingredient
+}
+
+func (d *dish) UnmarshalTOML(p interface{}) error {
+	data, _ := p.(map[string]interface{})
+	d.Name, _ = data["name"].(string)
+	d.Price, _ = data["price"].(float32)
+	ingredients, _ := data["ingredients"].([]map[string]interface{})
+	for _, e := range ingredients {
+		n, _ := interface{}(e).(map[string]interface{})
+		name, _ := n["name"].(string)
+		i := ingredient{name}
+		d.Ingredients = append(d.Ingredients, i)
+	}
+	return nil
+}
+
+type ingredient struct {
+	Name string
+}
+
+func ExampleMetaData_PrimitiveDecode() {
+	var md MetaData
+	var err error
+
+	var tomlBlob = `
+ranking = ["Springsteen", "J Geils"]
+
+[bands.Springsteen]
+started = 1973
+albums = ["Greetings", "WIESS", "Born to Run", "Darkness"]
+
+[bands."J Geils"]
+started = 1970
+albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"]
+`
+
+	type band struct {
+		Started int
+		Albums  []string
+	}
+	type classics struct {
+		Ranking []string
+		Bands   map[string]Primitive
+	}
+
+	// Do the initial decode. Reflection is delayed on Primitive values.
+	var music classics
+	if md, err = Decode(tomlBlob, &music); err != nil {
+		log.Fatal(err)
+	}
+
+	// MetaData still includes information on Primitive values.
+	fmt.Printf("Is `bands.Springsteen` defined? %v\n",
+		md.IsDefined("bands", "Springsteen"))
+
+	// Decode primitive data into Go values.
+	for _, artist := range music.Ranking {
+		// A band is a primitive value, so we need to decode it to get a
+		// real `band` value.
+		primValue := music.Bands[artist]
+
+		var aBand band
+		if err = md.PrimitiveDecode(primValue, &aBand); err != nil {
+			log.Fatal(err)
+		}
+		fmt.Printf("%s started in %d.\n", artist, aBand.Started)
+	}
+	// Check to see if there were any fields left undecoded.
+	// Note that this won't be empty before decoding the Primitive value!
+	fmt.Printf("Undecoded: %q\n", md.Undecoded())
+
+	// Output:
+	// Is `bands.Springsteen` defined? true
+	// Springsteen started in 1973.
+	// J Geils started in 1970.
+	// Undecoded: []
+}
+
+func ExampleDecode() {
+	var tomlBlob = `
+# Some comments.
+[alpha]
+ip = "10.0.0.1"
+
+	[alpha.config]
+	Ports = [ 8001, 8002 ]
+	Location = "Toronto"
+	Created = 1987-07-05T05:45:00Z
+
+[beta]
+ip = "10.0.0.2"
+
+	[beta.config]
+	Ports = [ 9001, 9002 ]
+	Location = "New Jersey"
+	Created = 1887-01-05T05:55:00Z
+`
+
+	type serverConfig struct {
+		Ports    []int
+		Location string
+		Created  time.Time
+	}
+
+	type server struct {
+		IP     string       `toml:"ip"`
+		Config serverConfig `toml:"config"`
+	}
+
+	type servers map[string]server
+
+	var config servers
+	if _, err := Decode(tomlBlob, &config); err != nil {
+		log.Fatal(err)
+	}
+
+	for _, name := range []string{"alpha", "beta"} {
+		s := config[name]
+		fmt.Printf("Server: %s (ip: %s) in %s created on %s\n",
+			name, s.IP, s.Config.Location,
+			s.Config.Created.Format("2006-01-02"))
+		fmt.Printf("Ports: %v\n", s.Config.Ports)
+	}
+
+	// Output:
+	// Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05
+	// Ports: [8001 8002]
+	// Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05
+	// Ports: [9001 9002]
+}
+
+type duration struct {
+	time.Duration
+}
+
+func (d *duration) UnmarshalText(text []byte) error {
+	var err error
+	d.Duration, err = time.ParseDuration(string(text))
+	return err
+}
+
+// Example Unmarshaler shows how to decode TOML strings into your own
+// custom data type.
+func Example_unmarshaler() {
+	blob := `
+[[song]]
+name = "Thunder Road"
+duration = "4m49s"
+
+[[song]]
+name = "Stairway to Heaven"
+duration = "8m03s"
+`
+	type song struct {
+		Name     string
+		Duration duration
+	}
+	type songs struct {
+		Song []song
+	}
+	var favorites songs
+	if _, err := Decode(blob, &favorites); err != nil {
+		log.Fatal(err)
+	}
+
+	// Code to implement the TextUnmarshaler interface for `duration`:
+	//
+	// type duration struct {
+	// 	time.Duration
+	// }
+	//
+	// func (d *duration) UnmarshalText(text []byte) error {
+	// 	var err error
+	// 	d.Duration, err = time.ParseDuration(string(text))
+	// 	return err
+	// }
+
+	for _, s := range favorites.Song {
+		fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+	}
+	// Output:
+	// Thunder Road (4m49s)
+	// Stairway to Heaven (8m3s)
+}
+
+// Example StrictDecoding shows how to detect whether there are keys in the
+// TOML document that weren't decoded into the value given. This is useful
+// for returning an error to the user if they've included extraneous fields
+// in their configuration.
+func Example_strictDecoding() {
+	var blob = `
+key1 = "value1"
+key2 = "value2"
+key3 = "value3"
+`
+	type config struct {
+		Key1 string
+		Key3 string
+	}
+
+	var conf config
+	md, err := Decode(blob, &conf)
+	if err != nil {
+		log.Fatal(err)
+	}
+	fmt.Printf("Undecoded keys: %q\n", md.Undecoded())
+	// Output:
+	// Undecoded keys: ["key2"]
+}
+
+// Example UnmarshalTOML shows how to implement a struct type that knows how to
+// unmarshal itself. The struct must take full responsibility for mapping the
+// values passed into the struct. The method may be used with interfaces in a
+// struct in cases where the actual type is not known until the data is
+// examined.
+func Example_unmarshalTOML() {
+
+	var blob = `
+[[parts]]
+type = "valve"
+id = "valve-1"
+size = 1.2
+rating = 4
+
+[[parts]]
+type = "valve"
+id = "valve-2"
+size = 2.1
+rating = 5
+
+[[parts]]
+type = "pipe"
+id = "pipe-1"
+length = 2.1
+diameter = 12
+
+[[parts]]
+type = "cable"
+id = "cable-1"
+length = 12
+rating = 3.1
+`
+	o := &order{}
+	err := Unmarshal([]byte(blob), o)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	fmt.Println(len(o.parts))
+
+	for _, part := range o.parts {
+		fmt.Println(part.Name())
+	}
+
+	// Code to implement UmarshalJSON.
+
+	// type order struct {
+	// 	// NOTE `order.parts` is a private slice of type `part` which is an
+	// 	// interface and may only be loaded from toml using the
+	// 	// UnmarshalTOML() method of the Umarshaler interface.
+	// 	parts parts
+	// }
+
+	// func (o *order) UnmarshalTOML(data interface{}) error {
+
+	// 	// NOTE the example below contains detailed type casting to show how
+	// 	// the 'data' is retrieved. In operational use, a type cast wrapper
+	// 	// may be prefered e.g.
+	// 	//
+	// 	// func AsMap(v interface{}) (map[string]interface{}, error) {
+	// 	// 		return v.(map[string]interface{})
+	// 	// }
+	// 	//
+	// 	// resulting in:
+	// 	// d, _ := AsMap(data)
+	// 	//
+
+	// 	d, _ := data.(map[string]interface{})
+	// 	parts, _ := d["parts"].([]map[string]interface{})
+
+	// 	for _, p := range parts {
+
+	// 		typ, _ := p["type"].(string)
+	// 		id, _ := p["id"].(string)
+
+	// 		// detect the type of part and handle each case
+	// 		switch p["type"] {
+	// 		case "valve":
+
+	// 			size := float32(p["size"].(float64))
+	// 			rating := int(p["rating"].(int64))
+
+	// 			valve := &valve{
+	// 				Type:   typ,
+	// 				ID:     id,
+	// 				Size:   size,
+	// 				Rating: rating,
+	// 			}
+
+	// 			o.parts = append(o.parts, valve)
+
+	// 		case "pipe":
+
+	// 			length := float32(p["length"].(float64))
+	// 			diameter := int(p["diameter"].(int64))
+
+	// 			pipe := &pipe{
+	// 				Type:     typ,
+	// 				ID:       id,
+	// 				Length:   length,
+	// 				Diameter: diameter,
+	// 			}
+
+	// 			o.parts = append(o.parts, pipe)
+
+	// 		case "cable":
+
+	// 			length := int(p["length"].(int64))
+	// 			rating := float32(p["rating"].(float64))
+
+	// 			cable := &cable{
+	// 				Type:   typ,
+	// 				ID:     id,
+	// 				Length: length,
+	// 				Rating: rating,
+	// 			}
+
+	// 			o.parts = append(o.parts, cable)
+
+	// 		}
+	// 	}
+
+	// 	return nil
+	// }
+
+	// type parts []part
+
+	// type part interface {
+	// 	Name() string
+	// }
+
+	// type valve struct {
+	// 	Type   string
+	// 	ID     string
+	// 	Size   float32
+	// 	Rating int
+	// }
+
+	// func (v *valve) Name() string {
+	// 	return fmt.Sprintf("VALVE: %s", v.ID)
+	// }
+
+	// type pipe struct {
+	// 	Type     string
+	// 	ID       string
+	// 	Length   float32
+	// 	Diameter int
+	// }
+
+	// func (p *pipe) Name() string {
+	// 	return fmt.Sprintf("PIPE: %s", p.ID)
+	// }
+
+	// type cable struct {
+	// 	Type   string
+	// 	ID     string
+	// 	Length int
+	// 	Rating float32
+	// }
+
+	// func (c *cable) Name() string {
+	// 	return fmt.Sprintf("CABLE: %s", c.ID)
+	// }
+
+	// Output:
+	// 4
+	// VALVE: valve-1
+	// VALVE: valve-2
+	// PIPE: pipe-1
+	// CABLE: cable-1
+
+}
+
+type order struct {
+	// NOTE `order.parts` is a private slice of type `part` which is an
+	// interface and may only be loaded from toml using the UnmarshalTOML()
+	// method of the Umarshaler interface.
+	parts parts
+}
+
+func (o *order) UnmarshalTOML(data interface{}) error {
+
+	// NOTE the example below contains detailed type casting to show how
+	// the 'data' is retrieved. In operational use, a type cast wrapper
+	// may be prefered e.g.
+	//
+	// func AsMap(v interface{}) (map[string]interface{}, error) {
+	// 		return v.(map[string]interface{})
+	// }
+	//
+	// resulting in:
+	// d, _ := AsMap(data)
+	//
+
+	d, _ := data.(map[string]interface{})
+	parts, _ := d["parts"].([]map[string]interface{})
+
+	for _, p := range parts {
+
+		typ, _ := p["type"].(string)
+		id, _ := p["id"].(string)
+
+		// detect the type of part and handle each case
+		switch p["type"] {
+		case "valve":
+
+			size := float32(p["size"].(float64))
+			rating := int(p["rating"].(int64))
+
+			valve := &valve{
+				Type:   typ,
+				ID:     id,
+				Size:   size,
+				Rating: rating,
+			}
+
+			o.parts = append(o.parts, valve)
+
+		case "pipe":
+
+			length := float32(p["length"].(float64))
+			diameter := int(p["diameter"].(int64))
+
+			pipe := &pipe{
+				Type:     typ,
+				ID:       id,
+				Length:   length,
+				Diameter: diameter,
+			}
+
+			o.parts = append(o.parts, pipe)
+
+		case "cable":
+
+			length := int(p["length"].(int64))
+			rating := float32(p["rating"].(float64))
+
+			cable := &cable{
+				Type:   typ,
+				ID:     id,
+				Length: length,
+				Rating: rating,
+			}
+
+			o.parts = append(o.parts, cable)
+
+		}
+	}
+
+	return nil
+}
+
+type parts []part
+
+type part interface {
+	Name() string
+}
+
+type valve struct {
+	Type   string
+	ID     string
+	Size   float32
+	Rating int
+}
+
+func (v *valve) Name() string {
+	return fmt.Sprintf("VALVE: %s", v.ID)
+}
+
+type pipe struct {
+	Type     string
+	ID       string
+	Length   float32
+	Diameter int
+}
+
+func (p *pipe) Name() string {
+	return fmt.Sprintf("PIPE: %s", p.ID)
+}
+
+type cable struct {
+	Type   string
+	ID     string
+	Length int
+	Rating float32
+}
+
+func (c *cable) Name() string {
+	return fmt.Sprintf("CABLE: %s", c.ID)
+}

+ 27 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/doc.go

@@ -0,0 +1,27 @@
+/*
+Package toml provides facilities for decoding and encoding TOML configuration
+files via reflection. There is also support for delaying decoding with
+the Primitive type, and querying the set of keys in a TOML document with the
+MetaData type.
+
+The specification implemented: https://github.com/mojombo/toml
+
+The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
+whether a file is a valid TOML document. It can also be used to print the
+type of each key in a TOML document.
+
+Testing
+
+There are two important types of tests used for this package. The first is
+contained inside '*_test.go' files and uses the standard Go unit testing
+framework. These tests are primarily devoted to holistically testing the
+decoder and encoder.
+
+The second type of testing is used to verify the implementation's adherence
+to the TOML specification. These tests have been factored into their own
+project: https://github.com/BurntSushi/toml-test
+
+The reason the tests are in a separate project is so that they can be used by
+any implementation of TOML. Namely, it is language agnostic.
+*/
+package toml

+ 496 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go

@@ -0,0 +1,496 @@
+package toml
+
+import (
+	"bufio"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type tomlEncodeError struct{ error }
+
+var (
+	errArrayMixedElementTypes = errors.New(
+		"can't encode array with mixed element types")
+	errArrayNilElement = errors.New(
+		"can't encode array with nil element")
+	errNonString = errors.New(
+		"can't encode a map with non-string key type")
+	errAnonNonStruct = errors.New(
+		"can't encode an anonymous field that is not a struct")
+	errArrayNoTable = errors.New(
+		"TOML array element can't contain a table")
+	errNoKey = errors.New(
+		"top-level values must be a Go map or struct")
+	errAnything = errors.New("") // used in testing
+)
+
+var quotedReplacer = strings.NewReplacer(
+	"\t", "\\t",
+	"\n", "\\n",
+	"\r", "\\r",
+	"\"", "\\\"",
+	"\\", "\\\\",
+)
+
+// Encoder controls the encoding of Go values to a TOML document to some
+// io.Writer.
+//
+// The indentation level can be controlled with the Indent field.
+type Encoder struct {
+	// A single indentation level. By default it is two spaces.
+	Indent string
+
+	// hasWritten is whether we have written any output to w yet.
+	hasWritten bool
+	w          *bufio.Writer
+}
+
+// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
+// given. By default, a single indentation level is 2 spaces.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		w:      bufio.NewWriter(w),
+		Indent: "  ",
+	}
+}
+
+// Encode writes a TOML representation of the Go value to the underlying
+// io.Writer. If the value given cannot be encoded to a valid TOML document,
+// then an error is returned.
+//
+// The mapping between Go values and TOML values should be precisely the same
+// as for the Decode* functions. Similarly, the TextMarshaler interface is
+// supported by encoding the resulting bytes as strings. (If you want to write
+// arbitrary binary data then you will need to use something like base64 since
+// TOML does not have any binary types.)
+//
+// When encoding TOML hashes (i.e., Go maps or structs), keys without any
+// sub-hashes are encoded first.
+//
+// If a Go map is encoded, then its keys are sorted alphabetically for
+// deterministic output. More control over this behavior may be provided if
+// there is demand for it.
+//
+// Encoding Go values without a corresponding TOML representation---like map
+// types with non-string keys---will cause an error to be returned. Similarly
+// for mixed arrays/slices, arrays/slices with nil elements, embedded
+// non-struct types and nested slices containing maps or structs.
+// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
+// and so is []map[string][]string.)
+func (enc *Encoder) Encode(v interface{}) error {
+	rv := eindirect(reflect.ValueOf(v))
+	if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+		return err
+	}
+	return enc.w.Flush()
+}
+
+func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if terr, ok := r.(tomlEncodeError); ok {
+				err = terr.error
+				return
+			}
+			panic(r)
+		}
+	}()
+	enc.encode(key, rv)
+	return nil
+}
+
+func (enc *Encoder) encode(key Key, rv reflect.Value) {
+	// Special case. Time needs to be in ISO8601 format.
+	// Special case. If we can marshal the type to text, then we used that.
+	// Basically, this prevents the encoder for handling these types as
+	// generic structs (or whatever the underlying type of a TextMarshaler is).
+	switch rv.Interface().(type) {
+	case time.Time, TextMarshaler:
+		enc.keyEqElement(key, rv)
+		return
+	}
+
+	k := rv.Kind()
+	switch k {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+		reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+		reflect.Uint64,
+		reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
+		enc.keyEqElement(key, rv)
+	case reflect.Array, reflect.Slice:
+		if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
+			enc.eArrayOfTables(key, rv)
+		} else {
+			enc.keyEqElement(key, rv)
+		}
+	case reflect.Interface:
+		if rv.IsNil() {
+			return
+		}
+		enc.encode(key, rv.Elem())
+	case reflect.Map:
+		if rv.IsNil() {
+			return
+		}
+		enc.eTable(key, rv)
+	case reflect.Ptr:
+		if rv.IsNil() {
+			return
+		}
+		enc.encode(key, rv.Elem())
+	case reflect.Struct:
+		enc.eTable(key, rv)
+	default:
+		panic(e("Unsupported type for key '%s': %s", key, k))
+	}
+}
+
+// eElement encodes any value that can be an array element (primitives and
+// arrays).
+func (enc *Encoder) eElement(rv reflect.Value) {
+	switch v := rv.Interface().(type) {
+	case time.Time:
+		// Special case time.Time as a primitive. Has to come before
+		// TextMarshaler below because time.Time implements
+		// encoding.TextMarshaler, but we need to always use UTC.
+		enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
+		return
+	case TextMarshaler:
+		// Special case. Use text marshaler if it's available for this value.
+		if s, err := v.MarshalText(); err != nil {
+			encPanic(err)
+		} else {
+			enc.writeQuoted(string(s))
+		}
+		return
+	}
+	switch rv.Kind() {
+	case reflect.Bool:
+		enc.wf(strconv.FormatBool(rv.Bool()))
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+		reflect.Int64:
+		enc.wf(strconv.FormatInt(rv.Int(), 10))
+	case reflect.Uint, reflect.Uint8, reflect.Uint16,
+		reflect.Uint32, reflect.Uint64:
+		enc.wf(strconv.FormatUint(rv.Uint(), 10))
+	case reflect.Float32:
+		enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
+	case reflect.Float64:
+		enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
+	case reflect.Array, reflect.Slice:
+		enc.eArrayOrSliceElement(rv)
+	case reflect.Interface:
+		enc.eElement(rv.Elem())
+	case reflect.String:
+		enc.writeQuoted(rv.String())
+	default:
+		panic(e("Unexpected primitive type: %s", rv.Kind()))
+	}
+}
+
+// By the TOML spec, all floats must have a decimal with at least one
+// number on either side.
+func floatAddDecimal(fstr string) string {
+	if !strings.Contains(fstr, ".") {
+		return fstr + ".0"
+	}
+	return fstr
+}
+
+func (enc *Encoder) writeQuoted(s string) {
+	enc.wf("\"%s\"", quotedReplacer.Replace(s))
+}
+
+func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
+	length := rv.Len()
+	enc.wf("[")
+	for i := 0; i < length; i++ {
+		elem := rv.Index(i)
+		enc.eElement(elem)
+		if i != length-1 {
+			enc.wf(", ")
+		}
+	}
+	enc.wf("]")
+}
+
+func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
+	if len(key) == 0 {
+		encPanic(errNoKey)
+	}
+	for i := 0; i < rv.Len(); i++ {
+		trv := rv.Index(i)
+		if isNil(trv) {
+			continue
+		}
+		panicIfInvalidKey(key)
+		enc.newline()
+		enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
+		enc.newline()
+		enc.eMapOrStruct(key, trv)
+	}
+}
+
+func (enc *Encoder) eTable(key Key, rv reflect.Value) {
+	panicIfInvalidKey(key)
+	if len(key) == 1 {
+		// Output an extra new line between top-level tables.
+		// (The newline isn't written if nothing else has been written though.)
+		enc.newline()
+	}
+	if len(key) > 0 {
+		enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
+		enc.newline()
+	}
+	enc.eMapOrStruct(key, rv)
+}
+
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
+	switch rv := eindirect(rv); rv.Kind() {
+	case reflect.Map:
+		enc.eMap(key, rv)
+	case reflect.Struct:
+		enc.eStruct(key, rv)
+	default:
+		panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
+	}
+}
+
+func (enc *Encoder) eMap(key Key, rv reflect.Value) {
+	rt := rv.Type()
+	if rt.Key().Kind() != reflect.String {
+		encPanic(errNonString)
+	}
+
+	// Sort keys so that we have deterministic output. And write keys directly
+	// underneath this key first, before writing sub-structs or sub-maps.
+	var mapKeysDirect, mapKeysSub []string
+	for _, mapKey := range rv.MapKeys() {
+		k := mapKey.String()
+		if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+			mapKeysSub = append(mapKeysSub, k)
+		} else {
+			mapKeysDirect = append(mapKeysDirect, k)
+		}
+	}
+
+	var writeMapKeys = func(mapKeys []string) {
+		sort.Strings(mapKeys)
+		for _, mapKey := range mapKeys {
+			mrv := rv.MapIndex(reflect.ValueOf(mapKey))
+			if isNil(mrv) {
+				// Don't write anything for nil fields.
+				continue
+			}
+			enc.encode(key.add(mapKey), mrv)
+		}
+	}
+	writeMapKeys(mapKeysDirect)
+	writeMapKeys(mapKeysSub)
+}
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
+	// Write keys for fields directly under this key first, because if we write
+	// a field that creates a new table, then all keys under it will be in that
+	// table (not the one we're writing here).
+	rt := rv.Type()
+	var fieldsDirect, fieldsSub [][]int
+	var addFields func(rt reflect.Type, rv reflect.Value, start []int)
+	addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
+		for i := 0; i < rt.NumField(); i++ {
+			f := rt.Field(i)
+			// skip unexporded fields
+			if f.PkgPath != "" {
+				continue
+			}
+			frv := rv.Field(i)
+			if f.Anonymous {
+				frv := eindirect(frv)
+				t := frv.Type()
+				if t.Kind() != reflect.Struct {
+					encPanic(errAnonNonStruct)
+				}
+				addFields(t, frv, f.Index)
+			} else if typeIsHash(tomlTypeOfGo(frv)) {
+				fieldsSub = append(fieldsSub, append(start, f.Index...))
+			} else {
+				fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+			}
+		}
+	}
+	addFields(rt, rv, nil)
+
+	var writeFields = func(fields [][]int) {
+		for _, fieldIndex := range fields {
+			sft := rt.FieldByIndex(fieldIndex)
+			sf := rv.FieldByIndex(fieldIndex)
+			if isNil(sf) {
+				// Don't write anything for nil fields.
+				continue
+			}
+
+			keyName := sft.Tag.Get("toml")
+			if keyName == "-" {
+				continue
+			}
+			if keyName == "" {
+				keyName = sft.Name
+			}
+			enc.encode(key.add(keyName), sf)
+		}
+	}
+	writeFields(fieldsDirect)
+	writeFields(fieldsSub)
+}
+
+// tomlTypeName returns the TOML type name of the Go value's type. It is
+// used to determine whether the types of array elements are mixed (which is
+// forbidden). If the Go value is nil, then it is illegal for it to be an array
+// element, and valueIsNil is returned as true.
+
+// Returns the TOML type of a Go value. The type may be `nil`, which means
+// no concrete TOML type could be found.
+func tomlTypeOfGo(rv reflect.Value) tomlType {
+	if isNil(rv) || !rv.IsValid() {
+		return nil
+	}
+	switch rv.Kind() {
+	case reflect.Bool:
+		return tomlBool
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+		reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+		reflect.Uint64:
+		return tomlInteger
+	case reflect.Float32, reflect.Float64:
+		return tomlFloat
+	case reflect.Array, reflect.Slice:
+		if typeEqual(tomlHash, tomlArrayType(rv)) {
+			return tomlArrayHash
+		} else {
+			return tomlArray
+		}
+	case reflect.Ptr, reflect.Interface:
+		return tomlTypeOfGo(rv.Elem())
+	case reflect.String:
+		return tomlString
+	case reflect.Map:
+		return tomlHash
+	case reflect.Struct:
+		switch rv.Interface().(type) {
+		case time.Time:
+			return tomlDatetime
+		case TextMarshaler:
+			return tomlString
+		default:
+			return tomlHash
+		}
+	default:
+		panic("unexpected reflect.Kind: " + rv.Kind().String())
+	}
+}
+
+// tomlArrayType returns the element type of a TOML array. The type returned
+// may be nil if it cannot be determined (e.g., a nil slice or a zero length
+// slize). This function may also panic if it finds a type that cannot be
+// expressed in TOML (such as nil elements, heterogeneous arrays or directly
+// nested arrays of tables).
+func tomlArrayType(rv reflect.Value) tomlType {
+	if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
+		return nil
+	}
+	firstType := tomlTypeOfGo(rv.Index(0))
+	if firstType == nil {
+		encPanic(errArrayNilElement)
+	}
+
+	rvlen := rv.Len()
+	for i := 1; i < rvlen; i++ {
+		elem := rv.Index(i)
+		switch elemType := tomlTypeOfGo(elem); {
+		case elemType == nil:
+			encPanic(errArrayNilElement)
+		case !typeEqual(firstType, elemType):
+			encPanic(errArrayMixedElementTypes)
+		}
+	}
+	// If we have a nested array, then we must make sure that the nested
+	// array contains ONLY primitives.
+	// This checks arbitrarily nested arrays.
+	if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
+		nest := tomlArrayType(eindirect(rv.Index(0)))
+		if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
+			encPanic(errArrayNoTable)
+		}
+	}
+	return firstType
+}
+
+func (enc *Encoder) newline() {
+	if enc.hasWritten {
+		enc.wf("\n")
+	}
+}
+
+func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
+	if len(key) == 0 {
+		encPanic(errNoKey)
+	}
+	panicIfInvalidKey(key)
+	enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+	enc.eElement(val)
+	enc.newline()
+}
+
+func (enc *Encoder) wf(format string, v ...interface{}) {
+	if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
+		encPanic(err)
+	}
+	enc.hasWritten = true
+}
+
+func (enc *Encoder) indentStr(key Key) string {
+	return strings.Repeat(enc.Indent, len(key)-1)
+}
+
+func encPanic(err error) {
+	panic(tomlEncodeError{err})
+}
+
+func eindirect(v reflect.Value) reflect.Value {
+	switch v.Kind() {
+	case reflect.Ptr, reflect.Interface:
+		return eindirect(v.Elem())
+	default:
+		return v
+	}
+}
+
+func isNil(rv reflect.Value) bool {
+	switch rv.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return rv.IsNil()
+	default:
+		return false
+	}
+}
+
+func panicIfInvalidKey(key Key) {
+	for _, k := range key {
+		if len(k) == 0 {
+			encPanic(e("Key '%s' is not a valid table name. Key names "+
+				"cannot be empty.", key.maybeQuotedAll()))
+		}
+	}
+}
+
+func isValidKeyName(s string) bool {
+	return len(s) != 0
+}

+ 506 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go

@@ -0,0 +1,506 @@
+package toml
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"net"
+	"testing"
+	"time"
+)
+
+func TestEncodeRoundTrip(t *testing.T) {
+	type Config struct {
+		Age        int
+		Cats       []string
+		Pi         float64
+		Perfection []int
+		DOB        time.Time
+		Ipaddress  net.IP
+	}
+
+	var inputs = Config{
+		13,
+		[]string{"one", "two", "three"},
+		3.145,
+		[]int{11, 2, 3, 4},
+		time.Now(),
+		net.ParseIP("192.168.59.254"),
+	}
+
+	var firstBuffer bytes.Buffer
+	e := NewEncoder(&firstBuffer)
+	err := e.Encode(inputs)
+	if err != nil {
+		t.Fatal(err)
+	}
+	var outputs Config
+	if _, err := Decode(firstBuffer.String(), &outputs); err != nil {
+		log.Printf("Could not decode:\n-----\n%s\n-----\n",
+			firstBuffer.String())
+		t.Fatal(err)
+	}
+
+	// could test each value individually, but I'm lazy
+	var secondBuffer bytes.Buffer
+	e2 := NewEncoder(&secondBuffer)
+	err = e2.Encode(outputs)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if firstBuffer.String() != secondBuffer.String() {
+		t.Error(
+			firstBuffer.String(),
+			"\n\n is not identical to\n\n",
+			secondBuffer.String())
+	}
+}
+
+// XXX(burntsushi)
+// I think these tests probably should be removed. They are good, but they
+// ought to be obsolete by toml-test.
+func TestEncode(t *testing.T) {
+	type Embedded struct {
+		Int int `toml:"_int"`
+	}
+	type NonStruct int
+
+	date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600))
+	dateStr := "2014-05-11T19:30:40Z"
+
+	tests := map[string]struct {
+		input      interface{}
+		wantOutput string
+		wantError  error
+	}{
+		"bool field": {
+			input: struct {
+				BoolTrue  bool
+				BoolFalse bool
+			}{true, false},
+			wantOutput: "BoolTrue = true\nBoolFalse = false\n",
+		},
+		"int fields": {
+			input: struct {
+				Int   int
+				Int8  int8
+				Int16 int16
+				Int32 int32
+				Int64 int64
+			}{1, 2, 3, 4, 5},
+			wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n",
+		},
+		"uint fields": {
+			input: struct {
+				Uint   uint
+				Uint8  uint8
+				Uint16 uint16
+				Uint32 uint32
+				Uint64 uint64
+			}{1, 2, 3, 4, 5},
+			wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" +
+				"\nUint64 = 5\n",
+		},
+		"float fields": {
+			input: struct {
+				Float32 float32
+				Float64 float64
+			}{1.5, 2.5},
+			wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n",
+		},
+		"string field": {
+			input:      struct{ String string }{"foo"},
+			wantOutput: "String = \"foo\"\n",
+		},
+		"string field and unexported field": {
+			input: struct {
+				String     string
+				unexported int
+			}{"foo", 0},
+			wantOutput: "String = \"foo\"\n",
+		},
+		"datetime field in UTC": {
+			input:      struct{ Date time.Time }{date},
+			wantOutput: fmt.Sprintf("Date = %s\n", dateStr),
+		},
+		"datetime field as primitive": {
+			// Using a map here to fail if isStructOrMap() returns true for
+			// time.Time.
+			input: map[string]interface{}{
+				"Date": date,
+				"Int":  1,
+			},
+			wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr),
+		},
+		"array fields": {
+			input: struct {
+				IntArray0 [0]int
+				IntArray3 [3]int
+			}{[0]int{}, [3]int{1, 2, 3}},
+			wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n",
+		},
+		"slice fields": {
+			input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{
+				nil, []int{}, []int{1, 2, 3},
+			},
+			wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n",
+		},
+		"datetime slices": {
+			input: struct{ DatetimeSlice []time.Time }{
+				[]time.Time{date, date},
+			},
+			wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n",
+				dateStr, dateStr),
+		},
+		"nested arrays and slices": {
+			input: struct {
+				SliceOfArrays         [][2]int
+				ArrayOfSlices         [2][]int
+				SliceOfArraysOfSlices [][2][]int
+				ArrayOfSlicesOfArrays [2][][2]int
+				SliceOfMixedArrays    [][2]interface{}
+				ArrayOfMixedSlices    [2][]interface{}
+			}{
+				[][2]int{{1, 2}, {3, 4}},
+				[2][]int{{1, 2}, {3, 4}},
+				[][2][]int{
+					{
+						{1, 2}, {3, 4},
+					},
+					{
+						{5, 6}, {7, 8},
+					},
+				},
+				[2][][2]int{
+					{
+						{1, 2}, {3, 4},
+					},
+					{
+						{5, 6}, {7, 8},
+					},
+				},
+				[][2]interface{}{
+					{1, 2}, {"a", "b"},
+				},
+				[2][]interface{}{
+					{1, 2}, {"a", "b"},
+				},
+			},
+			wantOutput: `SliceOfArrays = [[1, 2], [3, 4]]
+ArrayOfSlices = [[1, 2], [3, 4]]
+SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
+ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
+SliceOfMixedArrays = [[1, 2], ["a", "b"]]
+ArrayOfMixedSlices = [[1, 2], ["a", "b"]]
+`,
+		},
+		"empty slice": {
+			input:      struct{ Empty []interface{} }{[]interface{}{}},
+			wantOutput: "Empty = []\n",
+		},
+		"(error) slice with element type mismatch (string and integer)": {
+			input:     struct{ Mixed []interface{} }{[]interface{}{1, "a"}},
+			wantError: errArrayMixedElementTypes,
+		},
+		"(error) slice with element type mismatch (integer and float)": {
+			input:     struct{ Mixed []interface{} }{[]interface{}{1, 2.5}},
+			wantError: errArrayMixedElementTypes,
+		},
+		"slice with elems of differing Go types, same TOML types": {
+			input: struct {
+				MixedInts   []interface{}
+				MixedFloats []interface{}
+			}{
+				[]interface{}{
+					int(1), int8(2), int16(3), int32(4), int64(5),
+					uint(1), uint8(2), uint16(3), uint32(4), uint64(5),
+				},
+				[]interface{}{float32(1.5), float64(2.5)},
+			},
+			wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" +
+				"MixedFloats = [1.5, 2.5]\n",
+		},
+		"(error) slice w/ element type mismatch (one is nested array)": {
+			input: struct{ Mixed []interface{} }{
+				[]interface{}{1, []interface{}{2}},
+			},
+			wantError: errArrayMixedElementTypes,
+		},
+		"(error) slice with 1 nil element": {
+			input:     struct{ NilElement1 []interface{} }{[]interface{}{nil}},
+			wantError: errArrayNilElement,
+		},
+		"(error) slice with 1 nil element (and other non-nil elements)": {
+			input: struct{ NilElement []interface{} }{
+				[]interface{}{1, nil},
+			},
+			wantError: errArrayNilElement,
+		},
+		"simple map": {
+			input:      map[string]int{"a": 1, "b": 2},
+			wantOutput: "a = 1\nb = 2\n",
+		},
+		"map with interface{} value type": {
+			input:      map[string]interface{}{"a": 1, "b": "c"},
+			wantOutput: "a = 1\nb = \"c\"\n",
+		},
+		"map with interface{} value type, some of which are structs": {
+			input: map[string]interface{}{
+				"a": struct{ Int int }{2},
+				"b": 1,
+			},
+			wantOutput: "b = 1\n\n[a]\n  Int = 2\n",
+		},
+		"nested map": {
+			input: map[string]map[string]int{
+				"a": {"b": 1},
+				"c": {"d": 2},
+			},
+			wantOutput: "[a]\n  b = 1\n\n[c]\n  d = 2\n",
+		},
+		"nested struct": {
+			input: struct{ Struct struct{ Int int } }{
+				struct{ Int int }{1},
+			},
+			wantOutput: "[Struct]\n  Int = 1\n",
+		},
+		"nested struct and non-struct field": {
+			input: struct {
+				Struct struct{ Int int }
+				Bool   bool
+			}{struct{ Int int }{1}, true},
+			wantOutput: "Bool = true\n\n[Struct]\n  Int = 1\n",
+		},
+		"2 nested structs": {
+			input: struct{ Struct1, Struct2 struct{ Int int } }{
+				struct{ Int int }{1}, struct{ Int int }{2},
+			},
+			wantOutput: "[Struct1]\n  Int = 1\n\n[Struct2]\n  Int = 2\n",
+		},
+		"deeply nested structs": {
+			input: struct {
+				Struct1, Struct2 struct{ Struct3 *struct{ Int int } }
+			}{
+				struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}},
+				struct{ Struct3 *struct{ Int int } }{nil},
+			},
+			wantOutput: "[Struct1]\n  [Struct1.Struct3]\n    Int = 1" +
+				"\n\n[Struct2]\n",
+		},
+		"nested struct with nil struct elem": {
+			input: struct {
+				Struct struct{ Inner *struct{ Int int } }
+			}{
+				struct{ Inner *struct{ Int int } }{nil},
+			},
+			wantOutput: "[Struct]\n",
+		},
+		"nested struct with no fields": {
+			input: struct {
+				Struct struct{ Inner struct{} }
+			}{
+				struct{ Inner struct{} }{struct{}{}},
+			},
+			wantOutput: "[Struct]\n  [Struct.Inner]\n",
+		},
+		"struct with tags": {
+			input: struct {
+				Struct struct {
+					Int int `toml:"_int"`
+				} `toml:"_struct"`
+				Bool bool `toml:"_bool"`
+			}{
+				struct {
+					Int int `toml:"_int"`
+				}{1}, true,
+			},
+			wantOutput: "_bool = true\n\n[_struct]\n  _int = 1\n",
+		},
+		"embedded struct": {
+			input:      struct{ Embedded }{Embedded{1}},
+			wantOutput: "_int = 1\n",
+		},
+		"embedded *struct": {
+			input:      struct{ *Embedded }{&Embedded{1}},
+			wantOutput: "_int = 1\n",
+		},
+		"nested embedded struct": {
+			input: struct {
+				Struct struct{ Embedded } `toml:"_struct"`
+			}{struct{ Embedded }{Embedded{1}}},
+			wantOutput: "[_struct]\n  _int = 1\n",
+		},
+		"nested embedded *struct": {
+			input: struct {
+				Struct struct{ *Embedded } `toml:"_struct"`
+			}{struct{ *Embedded }{&Embedded{1}}},
+			wantOutput: "[_struct]\n  _int = 1\n",
+		},
+		"array of tables": {
+			input: struct {
+				Structs []*struct{ Int int } `toml:"struct"`
+			}{
+				[]*struct{ Int int }{{1}, {3}},
+			},
+			wantOutput: "[[struct]]\n  Int = 1\n\n[[struct]]\n  Int = 3\n",
+		},
+		"array of tables order": {
+			input: map[string]interface{}{
+				"map": map[string]interface{}{
+					"zero": 5,
+					"arr": []map[string]int{
+						map[string]int{
+							"friend": 5,
+						},
+					},
+				},
+			},
+			wantOutput: "[map]\n  zero = 5\n\n  [[map.arr]]\n    friend = 5\n",
+		},
+		"(error) top-level slice": {
+			input:     []struct{ Int int }{{1}, {2}, {3}},
+			wantError: errNoKey,
+		},
+		"(error) slice of slice": {
+			input: struct {
+				Slices [][]struct{ Int int }
+			}{
+				[][]struct{ Int int }{{{1}}, {{2}}, {{3}}},
+			},
+			wantError: errArrayNoTable,
+		},
+		"(error) map no string key": {
+			input:     map[int]string{1: ""},
+			wantError: errNonString,
+		},
+		"(error) anonymous non-struct": {
+			input:     struct{ NonStruct }{5},
+			wantError: errAnonNonStruct,
+		},
+		"(error) empty key name": {
+			input:     map[string]int{"": 1},
+			wantError: errAnything,
+		},
+		"(error) empty map name": {
+			input: map[string]interface{}{
+				"": map[string]int{"v": 1},
+			},
+			wantError: errAnything,
+		},
+	}
+	for label, test := range tests {
+		encodeExpected(t, label, test.input, test.wantOutput, test.wantError)
+	}
+}
+
+func TestEncodeNestedTableArrays(t *testing.T) {
+	type song struct {
+		Name string `toml:"name"`
+	}
+	type album struct {
+		Name  string `toml:"name"`
+		Songs []song `toml:"songs"`
+	}
+	type springsteen struct {
+		Albums []album `toml:"albums"`
+	}
+	value := springsteen{
+		[]album{
+			{"Born to Run",
+				[]song{{"Jungleland"}, {"Meeting Across the River"}}},
+			{"Born in the USA",
+				[]song{{"Glory Days"}, {"Dancing in the Dark"}}},
+		},
+	}
+	expected := `[[albums]]
+  name = "Born to Run"
+
+  [[albums.songs]]
+    name = "Jungleland"
+
+  [[albums.songs]]
+    name = "Meeting Across the River"
+
+[[albums]]
+  name = "Born in the USA"
+
+  [[albums.songs]]
+    name = "Glory Days"
+
+  [[albums.songs]]
+    name = "Dancing in the Dark"
+`
+	encodeExpected(t, "nested table arrays", value, expected, nil)
+}
+
+func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) {
+	type Alpha struct {
+		V int
+	}
+	type Beta struct {
+		V int
+	}
+	type Conf struct {
+		V int
+		A Alpha
+		B []Beta
+	}
+
+	val := Conf{
+		V: 1,
+		A: Alpha{2},
+		B: []Beta{{3}},
+	}
+	expected := "V = 1\n\n[A]\n  V = 2\n\n[[B]]\n  V = 3\n"
+	encodeExpected(t, "array hash with normal hash order", val, expected, nil)
+}
+
+func encodeExpected(
+	t *testing.T, label string, val interface{}, wantStr string, wantErr error,
+) {
+	var buf bytes.Buffer
+	enc := NewEncoder(&buf)
+	err := enc.Encode(val)
+	if err != wantErr {
+		if wantErr != nil {
+			if wantErr == errAnything && err != nil {
+				return
+			}
+			t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err)
+		} else {
+			t.Errorf("%s: Encode failed: %s", label, err)
+		}
+	}
+	if err != nil {
+		return
+	}
+	if got := buf.String(); wantStr != got {
+		t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n",
+			label, wantStr, got)
+	}
+}
+
+func ExampleEncoder_Encode() {
+	date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC")
+	var config = map[string]interface{}{
+		"date":   date,
+		"counts": []int{1, 1, 2, 3, 5, 8},
+		"hash": map[string]string{
+			"key1": "val1",
+			"key2": "val2",
+		},
+	}
+	buf := new(bytes.Buffer)
+	if err := NewEncoder(buf).Encode(config); err != nil {
+		log.Fatal(err)
+	}
+	fmt.Println(buf.String())
+
+	// Output:
+	// counts = [1, 1, 2, 3, 5, 8]
+	// date = 2010-03-14T18:00:00Z
+	//
+	// [hash]
+	//   key1 = "val1"
+	//   key2 = "val2"
+}

+ 19 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go

@@ -0,0 +1,19 @@
+// +build go1.2
+
+package toml
+
+// In order to support Go 1.1, we define our own TextMarshaler and
+// TextUnmarshaler types. For Go 1.2+, we just alias them with the
+// standard library interfaces.
+
+import (
+	"encoding"
+)
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler encoding.TextMarshaler
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler encoding.TextUnmarshaler

+ 18 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go

@@ -0,0 +1,18 @@
+// +build !go1.2
+
+package toml
+
+// These interfaces were introduced in Go 1.2, so we add them manually when
+// compiling for Go 1.1.
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler interface {
+	MarshalText() (text []byte, err error)
+}
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler interface {
+	UnmarshalText(text []byte) error
+}

+ 874 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go

@@ -0,0 +1,874 @@
+package toml
+
+import (
+	"fmt"
+	"strings"
+	"unicode/utf8"
+)
+
+type itemType int
+
+const (
+	itemError itemType = iota
+	itemNIL            // used in the parser to indicate no type
+	itemEOF
+	itemText
+	itemString
+	itemRawString
+	itemMultilineString
+	itemRawMultilineString
+	itemBool
+	itemInteger
+	itemFloat
+	itemDatetime
+	itemArray // the start of an array
+	itemArrayEnd
+	itemTableStart
+	itemTableEnd
+	itemArrayTableStart
+	itemArrayTableEnd
+	itemKeyStart
+	itemCommentStart
+)
+
+const (
+	eof             = 0
+	tableStart      = '['
+	tableEnd        = ']'
+	arrayTableStart = '['
+	arrayTableEnd   = ']'
+	tableSep        = '.'
+	keySep          = '='
+	arrayStart      = '['
+	arrayEnd        = ']'
+	arrayValTerm    = ','
+	commentStart    = '#'
+	stringStart     = '"'
+	stringEnd       = '"'
+	rawStringStart  = '\''
+	rawStringEnd    = '\''
+)
+
+type stateFn func(lx *lexer) stateFn
+
+type lexer struct {
+	input string
+	start int
+	pos   int
+	width int
+	line  int
+	state stateFn
+	items chan item
+
+	// A stack of state functions used to maintain context.
+	// The idea is to reuse parts of the state machine in various places.
+	// For example, values can appear at the top level or within arbitrarily
+	// nested arrays. The last state on the stack is used after a value has
+	// been lexed. Similarly for comments.
+	stack []stateFn
+}
+
+type item struct {
+	typ  itemType
+	val  string
+	line int
+}
+
+func (lx *lexer) nextItem() item {
+	for {
+		select {
+		case item := <-lx.items:
+			return item
+		default:
+			lx.state = lx.state(lx)
+		}
+	}
+}
+
+func lex(input string) *lexer {
+	lx := &lexer{
+		input: input + "\n",
+		state: lexTop,
+		line:  1,
+		items: make(chan item, 10),
+		stack: make([]stateFn, 0, 10),
+	}
+	return lx
+}
+
+func (lx *lexer) push(state stateFn) {
+	lx.stack = append(lx.stack, state)
+}
+
+func (lx *lexer) pop() stateFn {
+	if len(lx.stack) == 0 {
+		return lx.errorf("BUG in lexer: no states to pop.")
+	}
+	last := lx.stack[len(lx.stack)-1]
+	lx.stack = lx.stack[0 : len(lx.stack)-1]
+	return last
+}
+
+func (lx *lexer) current() string {
+	return lx.input[lx.start:lx.pos]
+}
+
+func (lx *lexer) emit(typ itemType) {
+	lx.items <- item{typ, lx.current(), lx.line}
+	lx.start = lx.pos
+}
+
+func (lx *lexer) emitTrim(typ itemType) {
+	lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
+	lx.start = lx.pos
+}
+
+func (lx *lexer) next() (r rune) {
+	if lx.pos >= len(lx.input) {
+		lx.width = 0
+		return eof
+	}
+
+	if lx.input[lx.pos] == '\n' {
+		lx.line++
+	}
+	r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
+	lx.pos += lx.width
+	return r
+}
+
+// ignore skips over the pending input before this point.
+func (lx *lexer) ignore() {
+	lx.start = lx.pos
+}
+
+// backup steps back one rune. Can be called only once per call of next.
+func (lx *lexer) backup() {
+	lx.pos -= lx.width
+	if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
+		lx.line--
+	}
+}
+
+// accept consumes the next rune if it's equal to `valid`.
+func (lx *lexer) accept(valid rune) bool {
+	if lx.next() == valid {
+		return true
+	}
+	lx.backup()
+	return false
+}
+
+// peek returns but does not consume the next rune in the input.
+func (lx *lexer) peek() rune {
+	r := lx.next()
+	lx.backup()
+	return r
+}
+
+// errorf stops all lexing by emitting an error and returning `nil`.
+// Note that any value that is a character is escaped if it's a special
+// character (new lines, tabs, etc.).
+func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+	lx.items <- item{
+		itemError,
+		fmt.Sprintf(format, values...),
+		lx.line,
+	}
+	return nil
+}
+
+// lexTop consumes elements at the top level of TOML data.
+func lexTop(lx *lexer) stateFn {
+	r := lx.next()
+	if isWhitespace(r) || isNL(r) {
+		return lexSkip(lx, lexTop)
+	}
+
+	switch r {
+	case commentStart:
+		lx.push(lexTop)
+		return lexCommentStart
+	case tableStart:
+		return lexTableStart
+	case eof:
+		if lx.pos > lx.start {
+			return lx.errorf("Unexpected EOF.")
+		}
+		lx.emit(itemEOF)
+		return nil
+	}
+
+	// At this point, the only valid item can be a key, so we back up
+	// and let the key lexer do the rest.
+	lx.backup()
+	lx.push(lexTopEnd)
+	return lexKeyStart
+}
+
+// lexTopEnd is entered whenever a top-level item has been consumed. (A value
+// or a table.) It must see only whitespace, and will turn back to lexTop
+// upon a new line. If it sees EOF, it will quit the lexer successfully.
+func lexTopEnd(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == commentStart:
+		// a comment will read to a new line for us.
+		lx.push(lexTop)
+		return lexCommentStart
+	case isWhitespace(r):
+		return lexTopEnd
+	case isNL(r):
+		lx.ignore()
+		return lexTop
+	case r == eof:
+		lx.ignore()
+		return lexTop
+	}
+	return lx.errorf("Expected a top-level item to end with a new line, "+
+		"comment or EOF, but got %q instead.", r)
+}
+
+// lexTable lexes the beginning of a table. Namely, it makes sure that
+// it starts with a character other than '.' and ']'.
+// It assumes that '[' has already been consumed.
+// It also handles the case that this is an item in an array of tables.
+// e.g., '[[name]]'.
+func lexTableStart(lx *lexer) stateFn {
+	if lx.peek() == arrayTableStart {
+		lx.next()
+		lx.emit(itemArrayTableStart)
+		lx.push(lexArrayTableEnd)
+	} else {
+		lx.emit(itemTableStart)
+		lx.push(lexTableEnd)
+	}
+	return lexTableNameStart
+}
+
+func lexTableEnd(lx *lexer) stateFn {
+	lx.emit(itemTableEnd)
+	return lexTopEnd
+}
+
+func lexArrayTableEnd(lx *lexer) stateFn {
+	if r := lx.next(); r != arrayTableEnd {
+		return lx.errorf("Expected end of table array name delimiter %q, "+
+			"but got %q instead.", arrayTableEnd, r)
+	}
+	lx.emit(itemArrayTableEnd)
+	return lexTopEnd
+}
+
+func lexTableNameStart(lx *lexer) stateFn {
+	switch r := lx.peek(); {
+	case r == tableEnd || r == eof:
+		return lx.errorf("Unexpected end of table name. (Table names cannot " +
+			"be empty.)")
+	case r == tableSep:
+		return lx.errorf("Unexpected table separator. (Table names cannot " +
+			"be empty.)")
+	case r == stringStart || r == rawStringStart:
+		lx.ignore()
+		lx.push(lexTableNameEnd)
+		return lexValue // reuse string lexing
+	case isWhitespace(r):
+		return lexTableNameStart
+	default:
+		return lexBareTableName
+	}
+}
+
+// lexTableName lexes the name of a table. It assumes that at least one
+// valid character for the table has already been read.
+func lexBareTableName(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case isBareKeyChar(r):
+		return lexBareTableName
+	case r == tableSep || r == tableEnd:
+		lx.backup()
+		lx.emitTrim(itemText)
+		return lexTableNameEnd
+	default:
+		return lx.errorf("Bare keys cannot contain %q.", r)
+	}
+}
+
+// lexTableNameEnd reads the end of a piece of a table name, optionally
+// consuming whitespace.
+func lexTableNameEnd(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case isWhitespace(r):
+		return lexTableNameEnd
+	case r == tableSep:
+		lx.ignore()
+		return lexTableNameStart
+	case r == tableEnd:
+		return lx.pop()
+	default:
+		return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
+			"instead.", r)
+	}
+}
+
+// lexKeyStart consumes a key name up until the first non-whitespace character.
+// lexKeyStart will ignore whitespace.
+func lexKeyStart(lx *lexer) stateFn {
+	r := lx.peek()
+	switch {
+	case r == keySep:
+		return lx.errorf("Unexpected key separator %q.", keySep)
+	case isWhitespace(r) || isNL(r):
+		lx.next()
+		return lexSkip(lx, lexKeyStart)
+	case r == stringStart || r == rawStringStart:
+		lx.ignore()
+		lx.emit(itemKeyStart)
+		lx.push(lexKeyEnd)
+		return lexValue // reuse string lexing
+	default:
+		lx.ignore()
+		lx.emit(itemKeyStart)
+		return lexBareKey
+	}
+}
+
+// lexBareKey consumes the text of a bare key. Assumes that the first character
+// (which is not whitespace) has not yet been consumed.
+func lexBareKey(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case isBareKeyChar(r):
+		return lexBareKey
+	case isWhitespace(r):
+		lx.emitTrim(itemText)
+		return lexKeyEnd
+	case r == keySep:
+		lx.backup()
+		lx.emitTrim(itemText)
+		return lexKeyEnd
+	default:
+		return lx.errorf("Bare keys cannot contain %q.", r)
+	}
+}
+
+// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
+// separator).
+func lexKeyEnd(lx *lexer) stateFn {
+	switch r := lx.next(); {
+	case r == keySep:
+		return lexSkip(lx, lexValue)
+	case isWhitespace(r):
+		return lexSkip(lx, lexKeyEnd)
+	default:
+		return lx.errorf("Expected key separator %q, but got %q instead.",
+			keySep, r)
+	}
+}
+
+// lexValue starts the consumption of a value anywhere a value is expected.
+// lexValue will ignore whitespace.
+// After a value is lexed, the last state on the next is popped and returned.
+func lexValue(lx *lexer) stateFn {
+	// We allow whitespace to precede a value, but NOT new lines.
+	// In array syntax, the array states are responsible for ignoring new
+	// lines.
+	r := lx.next()
+	if isWhitespace(r) {
+		return lexSkip(lx, lexValue)
+	}
+
+	switch {
+	case r == arrayStart:
+		lx.ignore()
+		lx.emit(itemArray)
+		return lexArrayValue
+	case r == stringStart:
+		if lx.accept(stringStart) {
+			if lx.accept(stringStart) {
+				lx.ignore() // Ignore """
+				return lexMultilineString
+			}
+			lx.backup()
+		}
+		lx.ignore() // ignore the '"'
+		return lexString
+	case r == rawStringStart:
+		if lx.accept(rawStringStart) {
+			if lx.accept(rawStringStart) {
+				lx.ignore() // Ignore """
+				return lexMultilineRawString
+			}
+			lx.backup()
+		}
+		lx.ignore() // ignore the "'"
+		return lexRawString
+	case r == 't':
+		return lexTrue
+	case r == 'f':
+		return lexFalse
+	case r == '-':
+		return lexNumberStart
+	case isDigit(r):
+		lx.backup() // avoid an extra state and use the same as above
+		return lexNumberOrDateStart
+	case r == '.': // special error case, be kind to users
+		return lx.errorf("Floats must start with a digit, not '.'.")
+	}
+	return lx.errorf("Expected value but found %q instead.", r)
+}
+
+// lexArrayValue consumes one value in an array. It assumes that '[' or ','
+// have already been consumed. All whitespace and new lines are ignored.
+func lexArrayValue(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r) || isNL(r):
+		return lexSkip(lx, lexArrayValue)
+	case r == commentStart:
+		lx.push(lexArrayValue)
+		return lexCommentStart
+	case r == arrayValTerm:
+		return lx.errorf("Unexpected array value terminator %q.",
+			arrayValTerm)
+	case r == arrayEnd:
+		return lexArrayEnd
+	}
+
+	lx.backup()
+	lx.push(lexArrayValueEnd)
+	return lexValue
+}
+
+// lexArrayValueEnd consumes the cruft between values of an array. Namely,
+// it ignores whitespace and expects either a ',' or a ']'.
+func lexArrayValueEnd(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isWhitespace(r) || isNL(r):
+		return lexSkip(lx, lexArrayValueEnd)
+	case r == commentStart:
+		lx.push(lexArrayValueEnd)
+		return lexCommentStart
+	case r == arrayValTerm:
+		lx.ignore()
+		return lexArrayValue // move on to the next value
+	case r == arrayEnd:
+		return lexArrayEnd
+	}
+	return lx.errorf("Expected an array value terminator %q or an array "+
+		"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
+}
+
+// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
+// just been consumed.
+func lexArrayEnd(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemArrayEnd)
+	return lx.pop()
+}
+
+// lexString consumes the inner contents of a string. It assumes that the
+// beginning '"' has already been consumed and ignored.
+func lexString(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isNL(r):
+		return lx.errorf("Strings cannot contain new lines.")
+	case r == '\\':
+		lx.push(lexString)
+		return lexStringEscape
+	case r == stringEnd:
+		lx.backup()
+		lx.emit(itemString)
+		lx.next()
+		lx.ignore()
+		return lx.pop()
+	}
+	return lexString
+}
+
+// lexMultilineString consumes the inner contents of a string. It assumes that
+// the beginning '"""' has already been consumed and ignored.
+func lexMultilineString(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == '\\':
+		return lexMultilineStringEscape
+	case r == stringEnd:
+		if lx.accept(stringEnd) {
+			if lx.accept(stringEnd) {
+				lx.backup()
+				lx.backup()
+				lx.backup()
+				lx.emit(itemMultilineString)
+				lx.next()
+				lx.next()
+				lx.next()
+				lx.ignore()
+				return lx.pop()
+			}
+			lx.backup()
+		}
+	}
+	return lexMultilineString
+}
+
+// lexRawString consumes a raw string. Nothing can be escaped in such a string.
+// It assumes that the beginning "'" has already been consumed and ignored.
+func lexRawString(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isNL(r):
+		return lx.errorf("Strings cannot contain new lines.")
+	case r == rawStringEnd:
+		lx.backup()
+		lx.emit(itemRawString)
+		lx.next()
+		lx.ignore()
+		return lx.pop()
+	}
+	return lexRawString
+}
+
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
+// a string. It assumes that the beginning "'" has already been consumed and
+// ignored.
+func lexMultilineRawString(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == rawStringEnd:
+		if lx.accept(rawStringEnd) {
+			if lx.accept(rawStringEnd) {
+				lx.backup()
+				lx.backup()
+				lx.backup()
+				lx.emit(itemRawMultilineString)
+				lx.next()
+				lx.next()
+				lx.next()
+				lx.ignore()
+				return lx.pop()
+			}
+			lx.backup()
+		}
+	}
+	return lexMultilineRawString
+}
+
+// lexMultilineStringEscape consumes an escaped character. It assumes that the
+// preceding '\\' has already been consumed.
+func lexMultilineStringEscape(lx *lexer) stateFn {
+	// Handle the special case first:
+	if isNL(lx.next()) {
+		lx.next()
+		return lexMultilineString
+	} else {
+		lx.backup()
+		lx.push(lexMultilineString)
+		return lexStringEscape(lx)
+	}
+}
+
+func lexStringEscape(lx *lexer) stateFn {
+	r := lx.next()
+	switch r {
+	case 'b':
+		fallthrough
+	case 't':
+		fallthrough
+	case 'n':
+		fallthrough
+	case 'f':
+		fallthrough
+	case 'r':
+		fallthrough
+	case '"':
+		fallthrough
+	case '\\':
+		return lx.pop()
+	case 'u':
+		return lexShortUnicodeEscape
+	case 'U':
+		return lexLongUnicodeEscape
+	}
+	return lx.errorf("Invalid escape character %q. Only the following "+
+		"escape characters are allowed: "+
+		"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
+		"\\uXXXX and \\UXXXXXXXX.", r)
+}
+
+func lexShortUnicodeEscape(lx *lexer) stateFn {
+	var r rune
+	for i := 0; i < 4; i++ {
+		r = lx.next()
+		if !isHexadecimal(r) {
+			return lx.errorf("Expected four hexadecimal digits after '\\u', "+
+				"but got '%s' instead.", lx.current())
+		}
+	}
+	return lx.pop()
+}
+
+func lexLongUnicodeEscape(lx *lexer) stateFn {
+	var r rune
+	for i := 0; i < 8; i++ {
+		r = lx.next()
+		if !isHexadecimal(r) {
+			return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
+				"but got '%s' instead.", lx.current())
+		}
+	}
+	return lx.pop()
+}
+
+// lexNumberOrDateStart consumes either a (positive) integer, float or
+// datetime. It assumes that NO negative sign has been consumed.
+func lexNumberOrDateStart(lx *lexer) stateFn {
+	r := lx.next()
+	if !isDigit(r) {
+		if r == '.' {
+			return lx.errorf("Floats must start with a digit, not '.'.")
+		} else {
+			return lx.errorf("Expected a digit but got %q.", r)
+		}
+	}
+	return lexNumberOrDate
+}
+
+// lexNumberOrDate consumes either a (positive) integer, float or datetime.
+func lexNumberOrDate(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case r == '-':
+		if lx.pos-lx.start != 5 {
+			return lx.errorf("All ISO8601 dates must be in full Zulu form.")
+		}
+		return lexDateAfterYear
+	case isDigit(r):
+		return lexNumberOrDate
+	case r == '.':
+		return lexFloatStart
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
+// It assumes that "YYYY-" has already been consumed.
+func lexDateAfterYear(lx *lexer) stateFn {
+	formats := []rune{
+		// digits are '0'.
+		// everything else is direct equality.
+		'0', '0', '-', '0', '0',
+		'T',
+		'0', '0', ':', '0', '0', ':', '0', '0',
+		'Z',
+	}
+	for _, f := range formats {
+		r := lx.next()
+		if f == '0' {
+			if !isDigit(r) {
+				return lx.errorf("Expected digit in ISO8601 datetime, "+
+					"but found %q instead.", r)
+			}
+		} else if f != r {
+			return lx.errorf("Expected %q in ISO8601 datetime, "+
+				"but found %q instead.", f, r)
+		}
+	}
+	lx.emit(itemDatetime)
+	return lx.pop()
+}
+
+// lexNumberStart consumes either an integer or a float. It assumes that
+// a negative sign has already been read, but that *no* digits have been
+// consumed. lexNumberStart will move to the appropriate integer or float
+// states.
+func lexNumberStart(lx *lexer) stateFn {
+	// we MUST see a digit. Even floats have to start with a digit.
+	r := lx.next()
+	if !isDigit(r) {
+		if r == '.' {
+			return lx.errorf("Floats must start with a digit, not '.'.")
+		} else {
+			return lx.errorf("Expected a digit but got %q.", r)
+		}
+	}
+	return lexNumber
+}
+
+// lexNumber consumes an integer or a float after seeing the first digit.
+func lexNumber(lx *lexer) stateFn {
+	r := lx.next()
+	switch {
+	case isDigit(r):
+		return lexNumber
+	case r == '.':
+		return lexFloatStart
+	}
+
+	lx.backup()
+	lx.emit(itemInteger)
+	return lx.pop()
+}
+
+// lexFloatStart starts the consumption of digits of a float after a '.'.
+// Namely, at least one digit is required.
+func lexFloatStart(lx *lexer) stateFn {
+	r := lx.next()
+	if !isDigit(r) {
+		return lx.errorf("Floats must have a digit after the '.', but got "+
+			"%q instead.", r)
+	}
+	return lexFloat
+}
+
+// lexFloat consumes the digits of a float after a '.'.
+// Assumes that one digit has been consumed after a '.' already.
+func lexFloat(lx *lexer) stateFn {
+	r := lx.next()
+	if isDigit(r) {
+		return lexFloat
+	}
+
+	lx.backup()
+	lx.emit(itemFloat)
+	return lx.pop()
+}
+
+// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
+// consumed.
+func lexConst(lx *lexer, s string) stateFn {
+	for i := range s[1:] {
+		if r := lx.next(); r != rune(s[i+1]) {
+			return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
+				s[:i]+string(r))
+		}
+	}
+	return nil
+}
+
+// lexTrue consumes the "rue" in "true". It assumes that 't' has already
+// been consumed.
+func lexTrue(lx *lexer) stateFn {
+	if fn := lexConst(lx, "true"); fn != nil {
+		return fn
+	}
+	lx.emit(itemBool)
+	return lx.pop()
+}
+
+// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
+// been consumed.
+func lexFalse(lx *lexer) stateFn {
+	if fn := lexConst(lx, "false"); fn != nil {
+		return fn
+	}
+	lx.emit(itemBool)
+	return lx.pop()
+}
+
+// lexCommentStart begins the lexing of a comment. It will emit
+// itemCommentStart and consume no characters, passing control to lexComment.
+func lexCommentStart(lx *lexer) stateFn {
+	lx.ignore()
+	lx.emit(itemCommentStart)
+	return lexComment
+}
+
+// lexComment lexes an entire comment. It assumes that '#' has been consumed.
+// It will consume *up to* the first new line character, and pass control
+// back to the last state on the stack.
+func lexComment(lx *lexer) stateFn {
+	r := lx.peek()
+	if isNL(r) || r == eof {
+		lx.emit(itemText)
+		return lx.pop()
+	}
+	lx.next()
+	return lexComment
+}
+
+// lexSkip ignores all slurped input and moves on to the next state.
+func lexSkip(lx *lexer, nextState stateFn) stateFn {
+	return func(lx *lexer) stateFn {
+		lx.ignore()
+		return nextState
+	}
+}
+
+// isWhitespace returns true if `r` is a whitespace character according
+// to the spec.
+func isWhitespace(r rune) bool {
+	return r == '\t' || r == ' '
+}
+
+func isNL(r rune) bool {
+	return r == '\n' || r == '\r'
+}
+
+func isDigit(r rune) bool {
+	return r >= '0' && r <= '9'
+}
+
+func isHexadecimal(r rune) bool {
+	return (r >= '0' && r <= '9') ||
+		(r >= 'a' && r <= 'f') ||
+		(r >= 'A' && r <= 'F')
+}
+
+func isBareKeyChar(r rune) bool {
+	return (r >= 'A' && r <= 'Z') ||
+		(r >= 'a' && r <= 'z') ||
+		(r >= '0' && r <= '9') ||
+		r == '_' ||
+		r == '-'
+}
+
+func (itype itemType) String() string {
+	switch itype {
+	case itemError:
+		return "Error"
+	case itemNIL:
+		return "NIL"
+	case itemEOF:
+		return "EOF"
+	case itemText:
+		return "Text"
+	case itemString:
+		return "String"
+	case itemRawString:
+		return "String"
+	case itemMultilineString:
+		return "String"
+	case itemRawMultilineString:
+		return "String"
+	case itemBool:
+		return "Bool"
+	case itemInteger:
+		return "Integer"
+	case itemFloat:
+		return "Float"
+	case itemDatetime:
+		return "DateTime"
+	case itemTableStart:
+		return "TableStart"
+	case itemTableEnd:
+		return "TableEnd"
+	case itemKeyStart:
+		return "KeyStart"
+	case itemArray:
+		return "Array"
+	case itemArrayEnd:
+		return "ArrayEnd"
+	case itemCommentStart:
+		return "CommentStart"
+	}
+	panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
+}
+
+func (item item) String() string {
+	return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+}

+ 498 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go

@@ -0,0 +1,498 @@
+package toml
+
+import (
+	"fmt"
+	"log"
+	"strconv"
+	"strings"
+	"time"
+	"unicode"
+	"unicode/utf8"
+)
+
+type parser struct {
+	mapping map[string]interface{}
+	types   map[string]tomlType
+	lx      *lexer
+
+	// A list of keys in the order that they appear in the TOML data.
+	ordered []Key
+
+	// the full key for the current hash in scope
+	context Key
+
+	// the base key name for everything except hashes
+	currentKey string
+
+	// rough approximation of line number
+	approxLine int
+
+	// A map of 'key.group.names' to whether they were created implicitly.
+	implicits map[string]bool
+}
+
+type parseError string
+
+func (pe parseError) Error() string {
+	return string(pe)
+}
+
+func parse(data string) (p *parser, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			var ok bool
+			if err, ok = r.(parseError); ok {
+				return
+			}
+			panic(r)
+		}
+	}()
+
+	p = &parser{
+		mapping:   make(map[string]interface{}),
+		types:     make(map[string]tomlType),
+		lx:        lex(data),
+		ordered:   make([]Key, 0),
+		implicits: make(map[string]bool),
+	}
+	for {
+		item := p.next()
+		if item.typ == itemEOF {
+			break
+		}
+		p.topLevel(item)
+	}
+
+	return p, nil
+}
+
+func (p *parser) panicf(format string, v ...interface{}) {
+	msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
+		p.approxLine, p.current(), fmt.Sprintf(format, v...))
+	panic(parseError(msg))
+}
+
+func (p *parser) next() item {
+	it := p.lx.nextItem()
+	if it.typ == itemError {
+		p.panicf("%s", it.val)
+	}
+	return it
+}
+
+func (p *parser) bug(format string, v ...interface{}) {
+	log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...))
+}
+
+func (p *parser) expect(typ itemType) item {
+	it := p.next()
+	p.assertEqual(typ, it.typ)
+	return it
+}
+
+func (p *parser) assertEqual(expected, got itemType) {
+	if expected != got {
+		p.bug("Expected '%s' but got '%s'.", expected, got)
+	}
+}
+
+func (p *parser) topLevel(item item) {
+	switch item.typ {
+	case itemCommentStart:
+		p.approxLine = item.line
+		p.expect(itemText)
+	case itemTableStart:
+		kg := p.next()
+		p.approxLine = kg.line
+
+		var key Key
+		for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
+			key = append(key, p.keyString(kg))
+		}
+		p.assertEqual(itemTableEnd, kg.typ)
+
+		p.establishContext(key, false)
+		p.setType("", tomlHash)
+		p.ordered = append(p.ordered, key)
+	case itemArrayTableStart:
+		kg := p.next()
+		p.approxLine = kg.line
+
+		var key Key
+		for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
+			key = append(key, p.keyString(kg))
+		}
+		p.assertEqual(itemArrayTableEnd, kg.typ)
+
+		p.establishContext(key, true)
+		p.setType("", tomlArrayHash)
+		p.ordered = append(p.ordered, key)
+	case itemKeyStart:
+		kname := p.next()
+		p.approxLine = kname.line
+		p.currentKey = p.keyString(kname)
+
+		val, typ := p.value(p.next())
+		p.setValue(p.currentKey, val)
+		p.setType(p.currentKey, typ)
+		p.ordered = append(p.ordered, p.context.add(p.currentKey))
+		p.currentKey = ""
+	default:
+		p.bug("Unexpected type at top level: %s", item.typ)
+	}
+}
+
+// Gets a string for a key (or part of a key in a table name).
+func (p *parser) keyString(it item) string {
+	switch it.typ {
+	case itemText:
+		return it.val
+	case itemString, itemMultilineString,
+		itemRawString, itemRawMultilineString:
+		s, _ := p.value(it)
+		return s.(string)
+	default:
+		p.bug("Unexpected key type: %s", it.typ)
+		panic("unreachable")
+	}
+}
+
+// value translates an expected value from the lexer into a Go value wrapped
+// as an empty interface.
+func (p *parser) value(it item) (interface{}, tomlType) {
+	switch it.typ {
+	case itemString:
+		return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
+	case itemMultilineString:
+		trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
+		return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
+	case itemRawString:
+		return it.val, p.typeOfPrimitive(it)
+	case itemRawMultilineString:
+		return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+	case itemBool:
+		switch it.val {
+		case "true":
+			return true, p.typeOfPrimitive(it)
+		case "false":
+			return false, p.typeOfPrimitive(it)
+		}
+		p.bug("Expected boolean value, but got '%s'.", it.val)
+	case itemInteger:
+		num, err := strconv.ParseInt(it.val, 10, 64)
+		if err != nil {
+			// See comment below for floats describing why we make a
+			// distinction between a bug and a user error.
+			if e, ok := err.(*strconv.NumError); ok &&
+				e.Err == strconv.ErrRange {
+
+				p.panicf("Integer '%s' is out of the range of 64-bit "+
+					"signed integers.", it.val)
+			} else {
+				p.bug("Expected integer value, but got '%s'.", it.val)
+			}
+		}
+		return num, p.typeOfPrimitive(it)
+	case itemFloat:
+		num, err := strconv.ParseFloat(it.val, 64)
+		if err != nil {
+			// Distinguish float values. Normally, it'd be a bug if the lexer
+			// provides an invalid float, but it's possible that the float is
+			// out of range of valid values (which the lexer cannot determine).
+			// So mark the former as a bug but the latter as a legitimate user
+			// error.
+			//
+			// This is also true for integers.
+			if e, ok := err.(*strconv.NumError); ok &&
+				e.Err == strconv.ErrRange {
+
+				p.panicf("Float '%s' is out of the range of 64-bit "+
+					"IEEE-754 floating-point numbers.", it.val)
+			} else {
+				p.bug("Expected float value, but got '%s'.", it.val)
+			}
+		}
+		return num, p.typeOfPrimitive(it)
+	case itemDatetime:
+		t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
+		if err != nil {
+			p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val)
+		}
+		return t, p.typeOfPrimitive(it)
+	case itemArray:
+		array := make([]interface{}, 0)
+		types := make([]tomlType, 0)
+
+		for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+			if it.typ == itemCommentStart {
+				p.expect(itemText)
+				continue
+			}
+
+			val, typ := p.value(it)
+			array = append(array, val)
+			types = append(types, typ)
+		}
+		return array, p.typeOfArray(types)
+	}
+	p.bug("Unexpected value type: %s", it.typ)
+	panic("unreachable")
+}
+
+// establishContext sets the current context of the parser,
+// where the context is either a hash or an array of hashes. Which one is
+// set depends on the value of the `array` parameter.
+//
+// Establishing the context also makes sure that the key isn't a duplicate, and
+// will create implicit hashes automatically.
+func (p *parser) establishContext(key Key, array bool) {
+	var ok bool
+
+	// Always start at the top level and drill down for our context.
+	hashContext := p.mapping
+	keyContext := make(Key, 0)
+
+	// We only need implicit hashes for key[0:-1]
+	for _, k := range key[0 : len(key)-1] {
+		_, ok = hashContext[k]
+		keyContext = append(keyContext, k)
+
+		// No key? Make an implicit hash and move on.
+		if !ok {
+			p.addImplicit(keyContext)
+			hashContext[k] = make(map[string]interface{})
+		}
+
+		// If the hash context is actually an array of tables, then set
+		// the hash context to the last element in that array.
+		//
+		// Otherwise, it better be a table, since this MUST be a key group (by
+		// virtue of it not being the last element in a key).
+		switch t := hashContext[k].(type) {
+		case []map[string]interface{}:
+			hashContext = t[len(t)-1]
+		case map[string]interface{}:
+			hashContext = t
+		default:
+			p.panicf("Key '%s' was already created as a hash.", keyContext)
+		}
+	}
+
+	p.context = keyContext
+	if array {
+		// If this is the first element for this array, then allocate a new
+		// list of tables for it.
+		k := key[len(key)-1]
+		if _, ok := hashContext[k]; !ok {
+			hashContext[k] = make([]map[string]interface{}, 0, 5)
+		}
+
+		// Add a new table. But make sure the key hasn't already been used
+		// for something else.
+		if hash, ok := hashContext[k].([]map[string]interface{}); ok {
+			hashContext[k] = append(hash, make(map[string]interface{}))
+		} else {
+			p.panicf("Key '%s' was already created and cannot be used as "+
+				"an array.", keyContext)
+		}
+	} else {
+		p.setValue(key[len(key)-1], make(map[string]interface{}))
+	}
+	p.context = append(p.context, key[len(key)-1])
+}
+
+// setValue sets the given key to the given value in the current context.
+// It will make sure that the key hasn't already been defined, account for
+// implicit key groups.
+func (p *parser) setValue(key string, value interface{}) {
+	var tmpHash interface{}
+	var ok bool
+
+	hash := p.mapping
+	keyContext := make(Key, 0)
+	for _, k := range p.context {
+		keyContext = append(keyContext, k)
+		if tmpHash, ok = hash[k]; !ok {
+			p.bug("Context for key '%s' has not been established.", keyContext)
+		}
+		switch t := tmpHash.(type) {
+		case []map[string]interface{}:
+			// The context is a table of hashes. Pick the most recent table
+			// defined as the current hash.
+			hash = t[len(t)-1]
+		case map[string]interface{}:
+			hash = t
+		default:
+			p.bug("Expected hash to have type 'map[string]interface{}', but "+
+				"it has '%T' instead.", tmpHash)
+		}
+	}
+	keyContext = append(keyContext, key)
+
+	if _, ok := hash[key]; ok {
+		// Typically, if the given key has already been set, then we have
+		// to raise an error since duplicate keys are disallowed. However,
+		// it's possible that a key was previously defined implicitly. In this
+		// case, it is allowed to be redefined concretely. (See the
+		// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
+		//
+		// But we have to make sure to stop marking it as an implicit. (So that
+		// another redefinition provokes an error.)
+		//
+		// Note that since it has already been defined (as a hash), we don't
+		// want to overwrite it. So our business is done.
+		if p.isImplicit(keyContext) {
+			p.removeImplicit(keyContext)
+			return
+		}
+
+		// Otherwise, we have a concrete key trying to override a previous
+		// key, which is *always* wrong.
+		p.panicf("Key '%s' has already been defined.", keyContext)
+	}
+	hash[key] = value
+}
+
+// setType sets the type of a particular value at a given key.
+// It should be called immediately AFTER setValue.
+//
+// Note that if `key` is empty, then the type given will be applied to the
+// current context (which is either a table or an array of tables).
+func (p *parser) setType(key string, typ tomlType) {
+	keyContext := make(Key, 0, len(p.context)+1)
+	for _, k := range p.context {
+		keyContext = append(keyContext, k)
+	}
+	if len(key) > 0 { // allow type setting for hashes
+		keyContext = append(keyContext, key)
+	}
+	p.types[keyContext.String()] = typ
+}
+
+// addImplicit sets the given Key as having been created implicitly.
+func (p *parser) addImplicit(key Key) {
+	p.implicits[key.String()] = true
+}
+
+// removeImplicit stops tagging the given key as having been implicitly
+// created.
+func (p *parser) removeImplicit(key Key) {
+	p.implicits[key.String()] = false
+}
+
+// isImplicit returns true if the key group pointed to by the key was created
+// implicitly.
+func (p *parser) isImplicit(key Key) bool {
+	return p.implicits[key.String()]
+}
+
+// current returns the full key name of the current context.
+func (p *parser) current() string {
+	if len(p.currentKey) == 0 {
+		return p.context.String()
+	}
+	if len(p.context) == 0 {
+		return p.currentKey
+	}
+	return fmt.Sprintf("%s.%s", p.context, p.currentKey)
+}
+
+func stripFirstNewline(s string) string {
+	if len(s) == 0 || s[0] != '\n' {
+		return s
+	}
+	return s[1:len(s)]
+}
+
+func stripEscapedWhitespace(s string) string {
+	esc := strings.Split(s, "\\\n")
+	if len(esc) > 1 {
+		for i := 1; i < len(esc); i++ {
+			esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
+		}
+	}
+	return strings.Join(esc, "")
+}
+
+func (p *parser) replaceEscapes(str string) string {
+	var replaced []rune
+	s := []byte(str)
+	r := 0
+	for r < len(s) {
+		if s[r] != '\\' {
+			c, size := utf8.DecodeRune(s[r:])
+			r += size
+			replaced = append(replaced, c)
+			continue
+		}
+		r += 1
+		if r >= len(s) {
+			p.bug("Escape sequence at end of string.")
+			return ""
+		}
+		switch s[r] {
+		default:
+			p.bug("Expected valid escape code after \\, but got %q.", s[r])
+			return ""
+		case 'b':
+			replaced = append(replaced, rune(0x0008))
+			r += 1
+		case 't':
+			replaced = append(replaced, rune(0x0009))
+			r += 1
+		case 'n':
+			replaced = append(replaced, rune(0x000A))
+			r += 1
+		case 'f':
+			replaced = append(replaced, rune(0x000C))
+			r += 1
+		case 'r':
+			replaced = append(replaced, rune(0x000D))
+			r += 1
+		case '"':
+			replaced = append(replaced, rune(0x0022))
+			r += 1
+		case '\\':
+			replaced = append(replaced, rune(0x005C))
+			r += 1
+		case 'u':
+			// At this point, we know we have a Unicode escape of the form
+			// `uXXXX` at [r, r+5). (Because the lexer guarantees this
+			// for us.)
+			escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
+			replaced = append(replaced, escaped)
+			r += 5
+		case 'U':
+			// At this point, we know we have a Unicode escape of the form
+			// `uXXXX` at [r, r+9). (Because the lexer guarantees this
+			// for us.)
+			escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
+			replaced = append(replaced, escaped)
+			r += 9
+		}
+	}
+	return string(replaced)
+}
+
+func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
+	s := string(bs)
+	hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
+	if err != nil {
+		p.bug("Could not parse '%s' as a hexadecimal number, but the "+
+			"lexer claims it's OK: %s", s, err)
+	}
+
+	// BUG(burntsushi)
+	// I honestly don't understand how this works. I can't seem
+	// to find a way to make this fail. I figured this would fail on invalid
+	// UTF-8 characters like U+DCFF, but it doesn't.
+	if !utf8.ValidString(string(rune(hex))) {
+		p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
+	}
+	return rune(hex)
+}
+
+func isStringType(ty itemType) bool {
+	return ty == itemString || ty == itemMultilineString ||
+		ty == itemRawString || ty == itemRawMultilineString
+}

+ 1 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/session.vim

@@ -0,0 +1 @@
+au BufWritePost *.go silent!make tags > /dev/null 2>&1

+ 91 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go

@@ -0,0 +1,91 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+	typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+	if t1 == nil || t2 == nil {
+		return false
+	}
+	return t1.typeString() == t2.typeString()
+}
+
+func typeIsHash(t tomlType) bool {
+	return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string {
+	return string(btype)
+}
+
+func (btype tomlBaseType) String() string {
+	return btype.typeString()
+}
+
+var (
+	tomlInteger   tomlBaseType = "Integer"
+	tomlFloat     tomlBaseType = "Float"
+	tomlDatetime  tomlBaseType = "Datetime"
+	tomlString    tomlBaseType = "String"
+	tomlBool      tomlBaseType = "Bool"
+	tomlArray     tomlBaseType = "Array"
+	tomlHash      tomlBaseType = "Hash"
+	tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+	switch lexItem.typ {
+	case itemInteger:
+		return tomlInteger
+	case itemFloat:
+		return tomlFloat
+	case itemDatetime:
+		return tomlDatetime
+	case itemString:
+		return tomlString
+	case itemMultilineString:
+		return tomlString
+	case itemRawString:
+		return tomlString
+	case itemRawMultilineString:
+		return tomlString
+	case itemBool:
+		return tomlBool
+	}
+	p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+	panic("unreachable")
+}
+
+// typeOfArray returns a tomlType for an array given a list of types of its
+// values.
+//
+// In the current spec, if an array is homogeneous, then its type is always
+// "Array". If the array is not homogeneous, an error is generated.
+func (p *parser) typeOfArray(types []tomlType) tomlType {
+	// Empty arrays are cool.
+	if len(types) == 0 {
+		return tomlArray
+	}
+
+	theType := types[0]
+	for _, t := range types[1:] {
+		if !typeEqual(theType, t) {
+			p.panicf("Array contains values of type '%s' and '%s', but "+
+				"arrays must be homogeneous.", theType, t)
+		}
+	}
+	return tomlArray
+}

+ 241 - 0
libnetwork/Godeps/_workspace/src/github.com/BurntSushi/toml/type_fields.go

@@ -0,0 +1,241 @@
+package toml
+
+// Struct field handling is adapted from code in encoding/json:
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the Go distribution.
+
+import (
+	"reflect"
+	"sort"
+	"sync"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+	name  string       // the name of the field (`toml` tag included)
+	tag   bool         // whether field has a `toml` tag
+	index []int        // represents the depth of an anonymous field
+	typ   reflect.Type // the type of the field
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from toml tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that TOML should recognize for the given
+// type. The algorithm is breadth-first search over the set of structs to
+// include - the top struct and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	count := map[reflect.Type]int{}
+	nextCount := map[reflect.Type]int{}
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" { // unexported
+					continue
+				}
+				name := sf.Tag.Get("toml")
+				if name == "-" {
+					continue
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Record found field and index sequence.
+				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := name != ""
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, field{name, tagged, index, ft})
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					f := field{name: ft.Name(), index: index, typ: ft}
+					next = append(next, f)
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with TOML tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// TOML tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}

+ 23 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child.go

@@ -0,0 +1,23 @@
+package etcd
+
+// Add a new directory with a random etcd-generated key under the given path.
+func (c *Client) AddChildDir(key string, ttl uint64) (*Response, error) {
+	raw, err := c.post(key, "", ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// Add a new file with a random etcd-generated key under the given path.
+func (c *Client) AddChild(key string, value string, ttl uint64) (*Response, error) {
+	raw, err := c.post(key, value, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}

+ 73 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go

@@ -0,0 +1,73 @@
+package etcd
+
+import "testing"
+
+func TestAddChild(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("fooDir", true)
+		c.Delete("nonexistentDir", true)
+	}()
+
+	c.CreateDir("fooDir", 5)
+
+	_, err := c.AddChild("fooDir", "v0", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = c.AddChild("fooDir", "v1", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.Get("fooDir", true, false)
+	// The child with v0 should proceed the child with v1 because it's added
+	// earlier, so it should have a lower key.
+	if !(len(resp.Node.Nodes) == 2 && (resp.Node.Nodes[0].Value == "v0" && resp.Node.Nodes[1].Value == "v1")) {
+		t.Fatalf("AddChild 1 failed.  There should be two chlidren whose values are v0 and v1, respectively."+
+			"  The response was: %#v", resp)
+	}
+
+	// Creating a child under a nonexistent directory should succeed.
+	// The directory should be created.
+	resp, err = c.AddChild("nonexistentDir", "foo", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestAddChildDir(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("fooDir", true)
+		c.Delete("nonexistentDir", true)
+	}()
+
+	c.CreateDir("fooDir", 5)
+
+	_, err := c.AddChildDir("fooDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = c.AddChildDir("fooDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.Get("fooDir", true, false)
+	// The child with v0 should proceed the child with v1 because it's added
+	// earlier, so it should have a lower key.
+	if !(len(resp.Node.Nodes) == 2 && (len(resp.Node.Nodes[0].Nodes) == 0 && len(resp.Node.Nodes[1].Nodes) == 0)) {
+		t.Fatalf("AddChildDir 1 failed.  There should be two chlidren whose values are v0 and v1, respectively."+
+			"  The response was: %#v", resp)
+	}
+
+	// Creating a child under a nonexistent directory should succeed.
+	// The directory should be created.
+	resp, err = c.AddChildDir("nonexistentDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+}

+ 481 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go

@@ -0,0 +1,481 @@
+package etcd
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/json"
+	"errors"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"path"
+	"strings"
+	"time"
+)
+
+// See SetConsistency for how to use these constants.
+const (
+	// Using strings rather than iota because the consistency level
+	// could be persisted to disk, so it'd be better to use
+	// human-readable values.
+	STRONG_CONSISTENCY = "STRONG"
+	WEAK_CONSISTENCY   = "WEAK"
+)
+
+const (
+	defaultBufferSize = 10
+)
+
+func init() {
+	rand.Seed(int64(time.Now().Nanosecond()))
+}
+
+type Config struct {
+	CertFile    string        `json:"certFile"`
+	KeyFile     string        `json:"keyFile"`
+	CaCertFile  []string      `json:"caCertFiles"`
+	DialTimeout time.Duration `json:"timeout"`
+	Consistency string        `json:"consistency"`
+}
+
+type credentials struct {
+	username string
+	password string
+}
+
+type Client struct {
+	config      Config   `json:"config"`
+	cluster     *Cluster `json:"cluster"`
+	httpClient  *http.Client
+	credentials *credentials
+	transport   *http.Transport
+	persistence io.Writer
+	cURLch      chan string
+	// CheckRetry can be used to control the policy for failed requests
+	// and modify the cluster if needed.
+	// The client calls it before sending requests again, and
+	// stops retrying if CheckRetry returns some error. The cases that
+	// this function needs to handle include no response and unexpected
+	// http status code of response.
+	// If CheckRetry is nil, client will call the default one
+	// `DefaultCheckRetry`.
+	// Argument cluster is the etcd.Cluster object that these requests have been made on.
+	// Argument numReqs is the number of http.Requests that have been made so far.
+	// Argument lastResp is the http.Responses from the last request.
+	// Argument err is the reason of the failure.
+	CheckRetry func(cluster *Cluster, numReqs int,
+		lastResp http.Response, err error) error
+}
+
+// NewClient create a basic client that is configured to be used
+// with the given machine list.
+func NewClient(machines []string) *Client {
+	config := Config{
+		// default timeout is one second
+		DialTimeout: time.Second,
+		Consistency: WEAK_CONSISTENCY,
+	}
+
+	client := &Client{
+		cluster: NewCluster(machines),
+		config:  config,
+	}
+
+	client.initHTTPClient()
+	client.saveConfig()
+
+	return client
+}
+
+// NewTLSClient create a basic client with TLS configuration
+func NewTLSClient(machines []string, cert, key, caCert string) (*Client, error) {
+	// overwrite the default machine to use https
+	if len(machines) == 0 {
+		machines = []string{"https://127.0.0.1:4001"}
+	}
+
+	config := Config{
+		// default timeout is one second
+		DialTimeout: time.Second,
+		Consistency: WEAK_CONSISTENCY,
+		CertFile:    cert,
+		KeyFile:     key,
+		CaCertFile:  make([]string, 0),
+	}
+
+	client := &Client{
+		cluster: NewCluster(machines),
+		config:  config,
+	}
+
+	err := client.initHTTPSClient(cert, key)
+	if err != nil {
+		return nil, err
+	}
+
+	err = client.AddRootCA(caCert)
+
+	client.saveConfig()
+
+	return client, nil
+}
+
+// NewClientFromFile creates a client from a given file path.
+// The given file is expected to use the JSON format.
+func NewClientFromFile(fpath string) (*Client, error) {
+	fi, err := os.Open(fpath)
+	if err != nil {
+		return nil, err
+	}
+
+	defer func() {
+		if err := fi.Close(); err != nil {
+			panic(err)
+		}
+	}()
+
+	return NewClientFromReader(fi)
+}
+
+// NewClientFromReader creates a Client configured from a given reader.
+// The configuration is expected to use the JSON format.
+func NewClientFromReader(reader io.Reader) (*Client, error) {
+	c := new(Client)
+
+	b, err := ioutil.ReadAll(reader)
+	if err != nil {
+		return nil, err
+	}
+
+	err = json.Unmarshal(b, c)
+	if err != nil {
+		return nil, err
+	}
+	if c.config.CertFile == "" {
+		c.initHTTPClient()
+	} else {
+		err = c.initHTTPSClient(c.config.CertFile, c.config.KeyFile)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	for _, caCert := range c.config.CaCertFile {
+		if err := c.AddRootCA(caCert); err != nil {
+			return nil, err
+		}
+	}
+
+	return c, nil
+}
+
+// Override the Client's HTTP Transport object
+func (c *Client) SetTransport(tr *http.Transport) {
+	c.httpClient.Transport = tr
+	c.transport = tr
+}
+
+func (c *Client) SetCredentials(username, password string) {
+	c.credentials = &credentials{username, password}
+}
+
+func (c *Client) Close() {
+	c.transport.DisableKeepAlives = true
+	c.transport.CloseIdleConnections()
+}
+
+// initHTTPClient initializes a HTTP client for etcd client
+func (c *Client) initHTTPClient() {
+	c.transport = &http.Transport{
+		Dial: c.dial,
+		TLSClientConfig: &tls.Config{
+			InsecureSkipVerify: true,
+		},
+	}
+	c.httpClient = &http.Client{Transport: c.transport}
+}
+
+// initHTTPClient initializes a HTTPS client for etcd client
+func (c *Client) initHTTPSClient(cert, key string) error {
+	if cert == "" || key == "" {
+		return errors.New("Require both cert and key path")
+	}
+
+	tlsCert, err := tls.LoadX509KeyPair(cert, key)
+	if err != nil {
+		return err
+	}
+
+	tlsConfig := &tls.Config{
+		Certificates:       []tls.Certificate{tlsCert},
+		InsecureSkipVerify: true,
+	}
+
+	tr := &http.Transport{
+		TLSClientConfig: tlsConfig,
+		Dial:            c.dial,
+	}
+
+	c.httpClient = &http.Client{Transport: tr}
+	return nil
+}
+
+// SetPersistence sets a writer to which the config will be
+// written every time it's changed.
+func (c *Client) SetPersistence(writer io.Writer) {
+	c.persistence = writer
+}
+
+// SetConsistency changes the consistency level of the client.
+//
+// When consistency is set to STRONG_CONSISTENCY, all requests,
+// including GET, are sent to the leader.  This means that, assuming
+// the absence of leader failures, GET requests are guaranteed to see
+// the changes made by previous requests.
+//
+// When consistency is set to WEAK_CONSISTENCY, other requests
+// are still sent to the leader, but GET requests are sent to a
+// random server from the server pool.  This reduces the read
+// load on the leader, but it's not guaranteed that the GET requests
+// will see changes made by previous requests (they might have not
+// yet been committed on non-leader servers).
+func (c *Client) SetConsistency(consistency string) error {
+	if !(consistency == STRONG_CONSISTENCY || consistency == WEAK_CONSISTENCY) {
+		return errors.New("The argument must be either STRONG_CONSISTENCY or WEAK_CONSISTENCY.")
+	}
+	c.config.Consistency = consistency
+	return nil
+}
+
+// Sets the DialTimeout value
+func (c *Client) SetDialTimeout(d time.Duration) {
+	c.config.DialTimeout = d
+}
+
+// AddRootCA adds a root CA cert for the etcd client
+func (c *Client) AddRootCA(caCert string) error {
+	if c.httpClient == nil {
+		return errors.New("Client has not been initialized yet!")
+	}
+
+	certBytes, err := ioutil.ReadFile(caCert)
+	if err != nil {
+		return err
+	}
+
+	tr, ok := c.httpClient.Transport.(*http.Transport)
+
+	if !ok {
+		panic("AddRootCA(): Transport type assert should not fail")
+	}
+
+	if tr.TLSClientConfig.RootCAs == nil {
+		caCertPool := x509.NewCertPool()
+		ok = caCertPool.AppendCertsFromPEM(certBytes)
+		if ok {
+			tr.TLSClientConfig.RootCAs = caCertPool
+		}
+		tr.TLSClientConfig.InsecureSkipVerify = false
+	} else {
+		ok = tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(certBytes)
+	}
+
+	if !ok {
+		err = errors.New("Unable to load caCert")
+	}
+
+	c.config.CaCertFile = append(c.config.CaCertFile, caCert)
+	c.saveConfig()
+
+	return err
+}
+
+// SetCluster updates cluster information using the given machine list.
+func (c *Client) SetCluster(machines []string) bool {
+	success := c.internalSyncCluster(machines)
+	return success
+}
+
+func (c *Client) GetCluster() []string {
+	return c.cluster.Machines
+}
+
+// SyncCluster updates the cluster information using the internal machine list.
+func (c *Client) SyncCluster() bool {
+	return c.internalSyncCluster(c.cluster.Machines)
+}
+
+// internalSyncCluster syncs cluster information using the given machine list.
+func (c *Client) internalSyncCluster(machines []string) bool {
+	for _, machine := range machines {
+		httpPath := c.createHttpPath(machine, path.Join(version, "members"))
+		resp, err := c.httpClient.Get(httpPath)
+		if err != nil {
+			// try another machine in the cluster
+			continue
+		}
+
+		if resp.StatusCode != http.StatusOK { // fall-back to old endpoint
+			httpPath := c.createHttpPath(machine, path.Join(version, "machines"))
+			resp, err := c.httpClient.Get(httpPath)
+			if err != nil {
+				// try another machine in the cluster
+				continue
+			}
+			b, err := ioutil.ReadAll(resp.Body)
+			resp.Body.Close()
+			if err != nil {
+				// try another machine in the cluster
+				continue
+			}
+			// update Machines List
+			c.cluster.updateFromStr(string(b))
+		} else {
+			b, err := ioutil.ReadAll(resp.Body)
+			resp.Body.Close()
+			if err != nil {
+				// try another machine in the cluster
+				continue
+			}
+
+			var mCollection memberCollection
+			if err := json.Unmarshal(b, &mCollection); err != nil {
+				// try another machine
+				continue
+			}
+
+			urls := make([]string, 0)
+			for _, m := range mCollection {
+				urls = append(urls, m.ClientURLs...)
+			}
+
+			// update Machines List
+			c.cluster.updateFromStr(strings.Join(urls, ","))
+		}
+
+		logger.Debug("sync.machines ", c.cluster.Machines)
+		c.saveConfig()
+		return true
+	}
+
+	return false
+}
+
+// createHttpPath creates a complete HTTP URL.
+// serverName should contain both the host name and a port number, if any.
+func (c *Client) createHttpPath(serverName string, _path string) string {
+	u, err := url.Parse(serverName)
+	if err != nil {
+		panic(err)
+	}
+
+	u.Path = path.Join(u.Path, _path)
+
+	if u.Scheme == "" {
+		u.Scheme = "http"
+	}
+	return u.String()
+}
+
+// dial attempts to open a TCP connection to the provided address, explicitly
+// enabling keep-alives with a one-second interval.
+func (c *Client) dial(network, addr string) (net.Conn, error) {
+	conn, err := net.DialTimeout(network, addr, c.config.DialTimeout)
+	if err != nil {
+		return nil, err
+	}
+
+	tcpConn, ok := conn.(*net.TCPConn)
+	if !ok {
+		return nil, errors.New("Failed type-assertion of net.Conn as *net.TCPConn")
+	}
+
+	// Keep TCP alive to check whether or not the remote machine is down
+	if err = tcpConn.SetKeepAlive(true); err != nil {
+		return nil, err
+	}
+
+	if err = tcpConn.SetKeepAlivePeriod(time.Second); err != nil {
+		return nil, err
+	}
+
+	return tcpConn, nil
+}
+
+func (c *Client) OpenCURL() {
+	c.cURLch = make(chan string, defaultBufferSize)
+}
+
+func (c *Client) CloseCURL() {
+	c.cURLch = nil
+}
+
+func (c *Client) sendCURL(command string) {
+	go func() {
+		select {
+		case c.cURLch <- command:
+		default:
+		}
+	}()
+}
+
+func (c *Client) RecvCURL() string {
+	return <-c.cURLch
+}
+
+// saveConfig saves the current config using c.persistence.
+func (c *Client) saveConfig() error {
+	if c.persistence != nil {
+		b, err := json.Marshal(c)
+		if err != nil {
+			return err
+		}
+
+		_, err = c.persistence.Write(b)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// MarshalJSON implements the Marshaller interface
+// as defined by the standard JSON package.
+func (c *Client) MarshalJSON() ([]byte, error) {
+	b, err := json.Marshal(struct {
+		Config  Config   `json:"config"`
+		Cluster *Cluster `json:"cluster"`
+	}{
+		Config:  c.config,
+		Cluster: c.cluster,
+	})
+
+	if err != nil {
+		return nil, err
+	}
+
+	return b, nil
+}
+
+// UnmarshalJSON implements the Unmarshaller interface
+// as defined by the standard JSON package.
+func (c *Client) UnmarshalJSON(b []byte) error {
+	temp := struct {
+		Config  Config   `json:"config"`
+		Cluster *Cluster `json:"cluster"`
+	}{}
+	err := json.Unmarshal(b, &temp)
+	if err != nil {
+		return err
+	}
+
+	c.cluster = temp.Cluster
+	c.config = temp.Config
+	return nil
+}

+ 108 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go

@@ -0,0 +1,108 @@
+package etcd
+
+import (
+	"encoding/json"
+	"fmt"
+	"net"
+	"net/url"
+	"os"
+	"testing"
+)
+
+// To pass this test, we need to create a cluster of 3 machines
+// The server should be listening on localhost:4001, 4002, 4003
+func TestSync(t *testing.T) {
+	fmt.Println("Make sure there are three nodes at 0.0.0.0:4001-4003")
+
+	// Explicit trailing slash to ensure this doesn't reproduce:
+	// https://github.com/coreos/go-etcd/issues/82
+	c := NewClient([]string{"http://127.0.0.1:4001/"})
+
+	success := c.SyncCluster()
+	if !success {
+		t.Fatal("cannot sync machines")
+	}
+
+	for _, m := range c.GetCluster() {
+		u, err := url.Parse(m)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if u.Scheme != "http" {
+			t.Fatal("scheme must be http")
+		}
+
+		host, _, err := net.SplitHostPort(u.Host)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if host != "localhost" {
+			t.Fatal("Host must be localhost")
+		}
+	}
+
+	badMachines := []string{"abc", "edef"}
+
+	success = c.SetCluster(badMachines)
+
+	if success {
+		t.Fatal("should not sync on bad machines")
+	}
+
+	goodMachines := []string{"127.0.0.1:4002"}
+
+	success = c.SetCluster(goodMachines)
+
+	if !success {
+		t.Fatal("cannot sync machines")
+	} else {
+		fmt.Println(c.cluster.Machines)
+	}
+
+}
+
+func TestPersistence(t *testing.T) {
+	c := NewClient(nil)
+	c.SyncCluster()
+
+	fo, err := os.Create("config.json")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() {
+		if err := fo.Close(); err != nil {
+			panic(err)
+		}
+	}()
+
+	c.SetPersistence(fo)
+	err = c.saveConfig()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	c2, err := NewClientFromFile("config.json")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Verify that the two clients have the same config
+	b1, _ := json.Marshal(c)
+	b2, _ := json.Marshal(c2)
+
+	if string(b1) != string(b2) {
+		t.Fatalf("The two configs should be equal!")
+	}
+}
+
+func TestClientRetry(t *testing.T) {
+	c := NewClient([]string{"http://strange", "http://127.0.0.1:4001"})
+	// use first endpoint as the picked url
+	c.cluster.picked = 0
+	if _, err := c.Set("foo", "bar", 5); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := c.Delete("foo", true); err != nil {
+		t.Fatal(err)
+	}
+}

+ 37 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go

@@ -0,0 +1,37 @@
+package etcd
+
+import (
+	"math/rand"
+	"strings"
+)
+
+type Cluster struct {
+	Leader   string   `json:"leader"`
+	Machines []string `json:"machines"`
+	picked   int
+}
+
+func NewCluster(machines []string) *Cluster {
+	// if an empty slice was sent in then just assume HTTP 4001 on localhost
+	if len(machines) == 0 {
+		machines = []string{"http://127.0.0.1:4001"}
+	}
+
+	// default leader and machines
+	return &Cluster{
+		Leader:   "",
+		Machines: machines,
+		picked:   rand.Intn(len(machines)),
+	}
+}
+
+func (cl *Cluster) failure()     { cl.picked = rand.Intn(len(cl.Machines)) }
+func (cl *Cluster) pick() string { return cl.Machines[cl.picked] }
+
+func (cl *Cluster) updateFromStr(machines string) {
+	cl.Machines = strings.Split(machines, ",")
+	for i := range cl.Machines {
+		cl.Machines[i] = strings.TrimSpace(cl.Machines[i])
+	}
+	cl.picked = rand.Intn(len(cl.Machines))
+}

+ 34 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete.go

@@ -0,0 +1,34 @@
+package etcd
+
+import "fmt"
+
+func (c *Client) CompareAndDelete(key string, prevValue string, prevIndex uint64) (*Response, error) {
+	raw, err := c.RawCompareAndDelete(key, prevValue, prevIndex)
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+func (c *Client) RawCompareAndDelete(key string, prevValue string, prevIndex uint64) (*RawResponse, error) {
+	if prevValue == "" && prevIndex == 0 {
+		return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
+	}
+
+	options := Options{}
+	if prevValue != "" {
+		options["prevValue"] = prevValue
+	}
+	if prevIndex != 0 {
+		options["prevIndex"] = prevIndex
+	}
+
+	raw, err := c.delete(key, options)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw, err
+}

+ 46 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go

@@ -0,0 +1,46 @@
+package etcd
+
+import (
+	"testing"
+)
+
+func TestCompareAndDelete(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+	}()
+
+	c.Set("foo", "bar", 5)
+
+	// This should succeed an correct prevValue
+	resp, err := c.CompareAndDelete("foo", "bar", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+		t.Fatalf("CompareAndDelete 1 prevNode failed: %#v", resp)
+	}
+
+	resp, _ = c.Set("foo", "bar", 5)
+	// This should fail because it gives an incorrect prevValue
+	_, err = c.CompareAndDelete("foo", "xxx", 0)
+	if err == nil {
+		t.Fatalf("CompareAndDelete 2 should have failed.  The response is: %#v", resp)
+	}
+
+	// This should succeed because it gives an correct prevIndex
+	resp, err = c.CompareAndDelete("foo", "", resp.Node.ModifiedIndex)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+		t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
+	}
+
+	c.Set("foo", "bar", 5)
+	// This should fail because it gives an incorrect prevIndex
+	resp, err = c.CompareAndDelete("foo", "", 29817514)
+	if err == nil {
+		t.Fatalf("CompareAndDelete 4 should have failed.  The response is: %#v", resp)
+	}
+}

+ 36 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap.go

@@ -0,0 +1,36 @@
+package etcd
+
+import "fmt"
+
+func (c *Client) CompareAndSwap(key string, value string, ttl uint64,
+	prevValue string, prevIndex uint64) (*Response, error) {
+	raw, err := c.RawCompareAndSwap(key, value, ttl, prevValue, prevIndex)
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+func (c *Client) RawCompareAndSwap(key string, value string, ttl uint64,
+	prevValue string, prevIndex uint64) (*RawResponse, error) {
+	if prevValue == "" && prevIndex == 0 {
+		return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
+	}
+
+	options := Options{}
+	if prevValue != "" {
+		options["prevValue"] = prevValue
+	}
+	if prevIndex != 0 {
+		options["prevIndex"] = prevIndex
+	}
+
+	raw, err := c.put(key, value, ttl, options)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw, err
+}

+ 57 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go

@@ -0,0 +1,57 @@
+package etcd
+
+import (
+	"testing"
+)
+
+func TestCompareAndSwap(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+	}()
+
+	c.Set("foo", "bar", 5)
+
+	// This should succeed
+	resp, err := c.CompareAndSwap("foo", "bar2", 5, "bar", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+		t.Fatalf("CompareAndSwap 1 failed: %#v", resp)
+	}
+
+	if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+		t.Fatalf("CompareAndSwap 1 prevNode failed: %#v", resp)
+	}
+
+	// This should fail because it gives an incorrect prevValue
+	resp, err = c.CompareAndSwap("foo", "bar3", 5, "xxx", 0)
+	if err == nil {
+		t.Fatalf("CompareAndSwap 2 should have failed.  The response is: %#v", resp)
+	}
+
+	resp, err = c.Set("foo", "bar", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// This should succeed
+	resp, err = c.CompareAndSwap("foo", "bar2", 5, "", resp.Node.ModifiedIndex)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+		t.Fatalf("CompareAndSwap 3 failed: %#v", resp)
+	}
+
+	if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+		t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
+	}
+
+	// This should fail because it gives an incorrect prevIndex
+	resp, err = c.CompareAndSwap("foo", "bar3", 5, "", 29817514)
+	if err == nil {
+		t.Fatalf("CompareAndSwap 4 should have failed.  The response is: %#v", resp)
+	}
+}

+ 55 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug.go

@@ -0,0 +1,55 @@
+package etcd
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"strings"
+)
+
+var logger *etcdLogger
+
+func SetLogger(l *log.Logger) {
+	logger = &etcdLogger{l}
+}
+
+func GetLogger() *log.Logger {
+	return logger.log
+}
+
+type etcdLogger struct {
+	log *log.Logger
+}
+
+func (p *etcdLogger) Debug(args ...interface{}) {
+	msg := "DEBUG: " + fmt.Sprint(args...)
+	p.log.Println(msg)
+}
+
+func (p *etcdLogger) Debugf(f string, args ...interface{}) {
+	msg := "DEBUG: " + fmt.Sprintf(f, args...)
+	// Append newline if necessary
+	if !strings.HasSuffix(msg, "\n") {
+		msg = msg + "\n"
+	}
+	p.log.Print(msg)
+}
+
+func (p *etcdLogger) Warning(args ...interface{}) {
+	msg := "WARNING: " + fmt.Sprint(args...)
+	p.log.Println(msg)
+}
+
+func (p *etcdLogger) Warningf(f string, args ...interface{}) {
+	msg := "WARNING: " + fmt.Sprintf(f, args...)
+	// Append newline if necessary
+	if !strings.HasSuffix(msg, "\n") {
+		msg = msg + "\n"
+	}
+	p.log.Print(msg)
+}
+
+func init() {
+	// Default logger uses the go default log.
+	SetLogger(log.New(ioutil.Discard, "go-etcd", log.LstdFlags))
+}

+ 28 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go

@@ -0,0 +1,28 @@
+package etcd
+
+import (
+	"testing"
+)
+
+type Foo struct{}
+type Bar struct {
+	one string
+	two int
+}
+
+// Tests that logs don't panic with arbitrary interfaces
+func TestDebug(t *testing.T) {
+	f := &Foo{}
+	b := &Bar{"asfd", 3}
+	for _, test := range []interface{}{
+		1234,
+		"asdf",
+		f,
+		b,
+	} {
+		logger.Debug(test)
+		logger.Debugf("something, %s", test)
+		logger.Warning(test)
+		logger.Warningf("something, %s", test)
+	}
+}

+ 40 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete.go

@@ -0,0 +1,40 @@
+package etcd
+
+// Delete deletes the given key.
+//
+// When recursive set to false, if the key points to a
+// directory the method will fail.
+//
+// When recursive set to true, if the key points to a file,
+// the file will be deleted; if the key points to a directory,
+// then everything under the directory (including all child directories)
+// will be deleted.
+func (c *Client) Delete(key string, recursive bool) (*Response, error) {
+	raw, err := c.RawDelete(key, recursive, false)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// DeleteDir deletes an empty directory or a key value pair
+func (c *Client) DeleteDir(key string) (*Response, error) {
+	raw, err := c.RawDelete(key, false, true)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+func (c *Client) RawDelete(key string, recursive bool, dir bool) (*RawResponse, error) {
+	ops := Options{
+		"recursive": recursive,
+		"dir":       dir,
+	}
+
+	return c.delete(key, ops)
+}

+ 81 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go

@@ -0,0 +1,81 @@
+package etcd
+
+import (
+	"testing"
+)
+
+func TestDelete(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+	}()
+
+	c.Set("foo", "bar", 5)
+	resp, err := c.Delete("foo", false)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Node.Value == "") {
+		t.Fatalf("Delete failed with %s", resp.Node.Value)
+	}
+
+	if !(resp.PrevNode.Value == "bar") {
+		t.Fatalf("Delete PrevNode failed with %s", resp.Node.Value)
+	}
+
+	resp, err = c.Delete("foo", false)
+	if err == nil {
+		t.Fatalf("Delete should have failed because the key foo did not exist.  "+
+			"The response was: %v", resp)
+	}
+}
+
+func TestDeleteAll(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+		c.Delete("fooDir", true)
+	}()
+
+	c.SetDir("foo", 5)
+	// test delete an empty dir
+	resp, err := c.DeleteDir("foo")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Node.Value == "") {
+		t.Fatalf("DeleteAll 1 failed: %#v", resp)
+	}
+
+	if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
+		t.Fatalf("DeleteAll 1 PrevNode failed: %#v", resp)
+	}
+
+	c.CreateDir("fooDir", 5)
+	c.Set("fooDir/foo", "bar", 5)
+	_, err = c.DeleteDir("fooDir")
+	if err == nil {
+		t.Fatal("should not able to delete a non-empty dir with deletedir")
+	}
+
+	resp, err = c.Delete("fooDir", true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Node.Value == "") {
+		t.Fatalf("DeleteAll 2 failed: %#v", resp)
+	}
+
+	if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
+		t.Fatalf("DeleteAll 2 PrevNode failed: %#v", resp)
+	}
+
+	resp, err = c.Delete("foo", true)
+	if err == nil {
+		t.Fatalf("DeleteAll should have failed because the key foo did not exist.  "+
+			"The response was: %v", resp)
+	}
+}

+ 49 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/error.go

@@ -0,0 +1,49 @@
+package etcd
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+const (
+	ErrCodeEtcdNotReachable    = 501
+	ErrCodeUnhandledHTTPStatus = 502
+)
+
+var (
+	errorMap = map[int]string{
+		ErrCodeEtcdNotReachable: "All the given peers are not reachable",
+	}
+)
+
+type EtcdError struct {
+	ErrorCode int    `json:"errorCode"`
+	Message   string `json:"message"`
+	Cause     string `json:"cause,omitempty"`
+	Index     uint64 `json:"index"`
+}
+
+func (e EtcdError) Error() string {
+	return fmt.Sprintf("%v: %v (%v) [%v]", e.ErrorCode, e.Message, e.Cause, e.Index)
+}
+
+func newError(errorCode int, cause string, index uint64) *EtcdError {
+	return &EtcdError{
+		ErrorCode: errorCode,
+		Message:   errorMap[errorCode],
+		Cause:     cause,
+		Index:     index,
+	}
+}
+
+func handleError(b []byte) error {
+	etcdErr := new(EtcdError)
+
+	err := json.Unmarshal(b, etcdErr)
+	if err != nil {
+		logger.Warningf("cannot unmarshal etcd error: %v", err)
+		return err
+	}
+
+	return etcdErr
+}

+ 32 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get.go

@@ -0,0 +1,32 @@
+package etcd
+
+// Get gets the file or directory associated with the given key.
+// If the key points to a directory, files and directories under
+// it will be returned in sorted or unsorted order, depending on
+// the sort flag.
+// If recursive is set to false, contents under child directories
+// will not be returned.
+// If recursive is set to true, all the contents will be returned.
+func (c *Client) Get(key string, sort, recursive bool) (*Response, error) {
+	raw, err := c.RawGet(key, sort, recursive)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+func (c *Client) RawGet(key string, sort, recursive bool) (*RawResponse, error) {
+	var q bool
+	if c.config.Consistency == STRONG_CONSISTENCY {
+		q = true
+	}
+	ops := Options{
+		"recursive": recursive,
+		"sorted":    sort,
+		"quorum":    q,
+	}
+
+	return c.get(key, ops)
+}

+ 131 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go

@@ -0,0 +1,131 @@
+package etcd
+
+import (
+	"reflect"
+	"testing"
+)
+
+// cleanNode scrubs Expiration, ModifiedIndex and CreatedIndex of a node.
+func cleanNode(n *Node) {
+	n.Expiration = nil
+	n.ModifiedIndex = 0
+	n.CreatedIndex = 0
+}
+
+// cleanResult scrubs a result object two levels deep of Expiration,
+// ModifiedIndex and CreatedIndex.
+func cleanResult(result *Response) {
+	//  TODO(philips): make this recursive.
+	cleanNode(result.Node)
+	for i, _ := range result.Node.Nodes {
+		cleanNode(result.Node.Nodes[i])
+		for j, _ := range result.Node.Nodes[i].Nodes {
+			cleanNode(result.Node.Nodes[i].Nodes[j])
+		}
+	}
+}
+
+func TestGet(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+	}()
+
+	c.Set("foo", "bar", 5)
+
+	result, err := c.Get("foo", false, false)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if result.Node.Key != "/foo" || result.Node.Value != "bar" {
+		t.Fatalf("Get failed with %s %s %v", result.Node.Key, result.Node.Value, result.Node.TTL)
+	}
+
+	result, err = c.Get("goo", false, false)
+	if err == nil {
+		t.Fatalf("should not be able to get non-exist key")
+	}
+}
+
+func TestGetAll(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("fooDir", true)
+	}()
+
+	c.CreateDir("fooDir", 5)
+	c.Set("fooDir/k0", "v0", 5)
+	c.Set("fooDir/k1", "v1", 5)
+
+	// Return kv-pairs in sorted order
+	result, err := c.Get("fooDir", true, false)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expected := Nodes{
+		&Node{
+			Key:   "/fooDir/k0",
+			Value: "v0",
+			TTL:   5,
+		},
+		&Node{
+			Key:   "/fooDir/k1",
+			Value: "v1",
+			TTL:   5,
+		},
+	}
+
+	cleanResult(result)
+
+	if !reflect.DeepEqual(result.Node.Nodes, expected) {
+		t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
+	}
+
+	// Test the `recursive` option
+	c.CreateDir("fooDir/childDir", 5)
+	c.Set("fooDir/childDir/k2", "v2", 5)
+
+	// Return kv-pairs in sorted order
+	result, err = c.Get("fooDir", true, true)
+
+	cleanResult(result)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expected = Nodes{
+		&Node{
+			Key: "/fooDir/childDir",
+			Dir: true,
+			Nodes: Nodes{
+				&Node{
+					Key:   "/fooDir/childDir/k2",
+					Value: "v2",
+					TTL:   5,
+				},
+			},
+			TTL: 5,
+		},
+		&Node{
+			Key:   "/fooDir/k0",
+			Value: "v0",
+			TTL:   5,
+		},
+		&Node{
+			Key:   "/fooDir/k1",
+			Value: "v1",
+			TTL:   5,
+		},
+	}
+
+	cleanResult(result)
+
+	if !reflect.DeepEqual(result.Node.Nodes, expected) {
+		t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
+	}
+}

+ 30 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member.go

@@ -0,0 +1,30 @@
+package etcd
+
+import "encoding/json"
+
+type Member struct {
+	ID         string   `json:"id"`
+	Name       string   `json:"name"`
+	PeerURLs   []string `json:"peerURLs"`
+	ClientURLs []string `json:"clientURLs"`
+}
+
+type memberCollection []Member
+
+func (c *memberCollection) UnmarshalJSON(data []byte) error {
+	d := struct {
+		Members []Member
+	}{}
+
+	if err := json.Unmarshal(data, &d); err != nil {
+		return err
+	}
+
+	if d.Members == nil {
+		*c = make([]Member, 0)
+		return nil
+	}
+
+	*c = d.Members
+	return nil
+}

+ 71 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member_test.go

@@ -0,0 +1,71 @@
+package etcd
+
+import (
+	"encoding/json"
+	"reflect"
+	"testing"
+)
+
+func TestMemberCollectionUnmarshal(t *testing.T) {
+	tests := []struct {
+		body []byte
+		want memberCollection
+	}{
+		{
+			body: []byte(`{"members":[]}`),
+			want: memberCollection([]Member{}),
+		},
+		{
+			body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+			want: memberCollection(
+				[]Member{
+					{
+						ID:   "2745e2525fce8fe",
+						Name: "node3",
+						PeerURLs: []string{
+							"http://127.0.0.1:7003",
+						},
+						ClientURLs: []string{
+							"http://127.0.0.1:4003",
+						},
+					},
+					{
+						ID:   "42134f434382925",
+						Name: "node1",
+						PeerURLs: []string{
+							"http://127.0.0.1:2380",
+							"http://127.0.0.1:7001",
+						},
+						ClientURLs: []string{
+							"http://127.0.0.1:2379",
+							"http://127.0.0.1:4001",
+						},
+					},
+					{
+						ID:   "94088180e21eb87b",
+						Name: "node2",
+						PeerURLs: []string{
+							"http://127.0.0.1:7002",
+						},
+						ClientURLs: []string{
+							"http://127.0.0.1:4002",
+						},
+					},
+				},
+			),
+		},
+	}
+
+	for i, tt := range tests {
+		var got memberCollection
+		err := json.Unmarshal(tt.body, &got)
+		if err != nil {
+			t.Errorf("#%d: unexpected error: %v", i, err)
+			continue
+		}
+
+		if !reflect.DeepEqual(tt.want, got) {
+			t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.want, got)
+		}
+	}
+}

+ 72 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/options.go

@@ -0,0 +1,72 @@
+package etcd
+
+import (
+	"fmt"
+	"net/url"
+	"reflect"
+)
+
+type Options map[string]interface{}
+
+// An internally-used data structure that represents a mapping
+// between valid options and their kinds
+type validOptions map[string]reflect.Kind
+
+// Valid options for GET, PUT, POST, DELETE
+// Using CAPITALIZED_UNDERSCORE to emphasize that these
+// values are meant to be used as constants.
+var (
+	VALID_GET_OPTIONS = validOptions{
+		"recursive": reflect.Bool,
+		"quorum":    reflect.Bool,
+		"sorted":    reflect.Bool,
+		"wait":      reflect.Bool,
+		"waitIndex": reflect.Uint64,
+	}
+
+	VALID_PUT_OPTIONS = validOptions{
+		"prevValue": reflect.String,
+		"prevIndex": reflect.Uint64,
+		"prevExist": reflect.Bool,
+		"dir":       reflect.Bool,
+	}
+
+	VALID_POST_OPTIONS = validOptions{}
+
+	VALID_DELETE_OPTIONS = validOptions{
+		"recursive": reflect.Bool,
+		"dir":       reflect.Bool,
+		"prevValue": reflect.String,
+		"prevIndex": reflect.Uint64,
+	}
+)
+
+// Convert options to a string of HTML parameters
+func (ops Options) toParameters(validOps validOptions) (string, error) {
+	p := "?"
+	values := url.Values{}
+
+	if ops == nil {
+		return "", nil
+	}
+
+	for k, v := range ops {
+		// Check if the given option is valid (that it exists)
+		kind := validOps[k]
+		if kind == reflect.Invalid {
+			return "", fmt.Errorf("Invalid option: %v", k)
+		}
+
+		// Check if the given option is of the valid type
+		t := reflect.TypeOf(v)
+		if kind != t.Kind() {
+			return "", fmt.Errorf("Option %s should be of %v kind, not of %v kind.",
+				k, kind, t.Kind())
+		}
+
+		values.Set(k, fmt.Sprintf("%v", v))
+	}
+
+	p += values.Encode()
+	return p, nil
+}

+ 403 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go

@@ -0,0 +1,403 @@
+package etcd
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"path"
+	"strings"
+	"sync"
+	"time"
+)
+
+// Errors introduced by handling requests
+var (
+	ErrRequestCancelled = errors.New("sending request is cancelled")
+)
+
+type RawRequest struct {
+	Method       string
+	RelativePath string
+	Values       url.Values
+	Cancel       <-chan bool
+}
+
+// NewRawRequest returns a new RawRequest
+func NewRawRequest(method, relativePath string, values url.Values, cancel <-chan bool) *RawRequest {
+	return &RawRequest{
+		Method:       method,
+		RelativePath: relativePath,
+		Values:       values,
+		Cancel:       cancel,
+	}
+}
+
+// getCancelable issues a cancelable GET request
+func (c *Client) getCancelable(key string, options Options,
+	cancel <-chan bool) (*RawResponse, error) {
+	logger.Debugf("get %s [%s]", key, c.cluster.pick())
+	p := keyToPath(key)
+
+	str, err := options.toParameters(VALID_GET_OPTIONS)
+	if err != nil {
+		return nil, err
+	}
+	p += str
+
+	req := NewRawRequest("GET", p, nil, cancel)
+	resp, err := c.SendRequest(req)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return resp, nil
+}
+
+// get issues a GET request
+func (c *Client) get(key string, options Options) (*RawResponse, error) {
+	return c.getCancelable(key, options, nil)
+}
+
+// put issues a PUT request
+func (c *Client) put(key string, value string, ttl uint64,
+	options Options) (*RawResponse, error) {
+
+	logger.Debugf("put %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick())
+	p := keyToPath(key)
+
+	str, err := options.toParameters(VALID_PUT_OPTIONS)
+	if err != nil {
+		return nil, err
+	}
+	p += str
+
+	req := NewRawRequest("PUT", p, buildValues(value, ttl), nil)
+	resp, err := c.SendRequest(req)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return resp, nil
+}
+
+// post issues a POST request
+func (c *Client) post(key string, value string, ttl uint64) (*RawResponse, error) {
+	logger.Debugf("post %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick())
+	p := keyToPath(key)
+
+	req := NewRawRequest("POST", p, buildValues(value, ttl), nil)
+	resp, err := c.SendRequest(req)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return resp, nil
+}
+
+// delete issues a DELETE request
+func (c *Client) delete(key string, options Options) (*RawResponse, error) {
+	logger.Debugf("delete %s [%s]", key, c.cluster.pick())
+	p := keyToPath(key)
+
+	str, err := options.toParameters(VALID_DELETE_OPTIONS)
+	if err != nil {
+		return nil, err
+	}
+	p += str
+
+	req := NewRawRequest("DELETE", p, nil, nil)
+	resp, err := c.SendRequest(req)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return resp, nil
+}
+
+// SendRequest sends a HTTP request and returns a Response as defined by etcd
+func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) {
+	var req *http.Request
+	var resp *http.Response
+	var httpPath string
+	var err error
+	var respBody []byte
+
+	var numReqs = 1
+
+	checkRetry := c.CheckRetry
+	if checkRetry == nil {
+		checkRetry = DefaultCheckRetry
+	}
+
+	cancelled := make(chan bool, 1)
+	reqLock := new(sync.Mutex)
+
+	if rr.Cancel != nil {
+		cancelRoutine := make(chan bool)
+		defer close(cancelRoutine)
+
+		go func() {
+			select {
+			case <-rr.Cancel:
+				cancelled <- true
+				logger.Debug("send.request is cancelled")
+			case <-cancelRoutine:
+				return
+			}
+
+			// Repeat canceling request until this thread is stopped
+			// because we have no idea about whether it succeeds.
+			for {
+				reqLock.Lock()
+				c.httpClient.Transport.(*http.Transport).CancelRequest(req)
+				reqLock.Unlock()
+
+				select {
+				case <-time.After(100 * time.Millisecond):
+				case <-cancelRoutine:
+					return
+				}
+			}
+		}()
+	}
+
+	// If we connect to a follower and consistency is required, retry until
+	// we connect to a leader
+	sleep := 25 * time.Millisecond
+	maxSleep := time.Second
+
+	for attempt := 0; ; attempt++ {
+		if attempt > 0 {
+			select {
+			case <-cancelled:
+				return nil, ErrRequestCancelled
+			case <-time.After(sleep):
+				sleep = sleep * 2
+				if sleep > maxSleep {
+					sleep = maxSleep
+				}
+			}
+		}
+
+		logger.Debug("Connecting to etcd: attempt ", attempt+1, " for ", rr.RelativePath)
+
+		// get httpPath if not set
+		if httpPath == "" {
+			httpPath = c.getHttpPath(rr.RelativePath)
+		}
+
+		// Return a cURL command if curlChan is set
+		if c.cURLch != nil {
+			command := fmt.Sprintf("curl -X %s %s", rr.Method, httpPath)
+			for key, value := range rr.Values {
+				command += fmt.Sprintf(" -d %s=%s", key, value[0])
+			}
+			if c.credentials != nil {
+				command += fmt.Sprintf(" -u %s", c.credentials.username)
+			}
+			c.sendCURL(command)
+		}
+
+		logger.Debug("send.request.to ", httpPath, " | method ", rr.Method)
+
+		req, err := func() (*http.Request, error) {
+			reqLock.Lock()
+			defer reqLock.Unlock()
+
+			if rr.Values == nil {
+				if req, err = http.NewRequest(rr.Method, httpPath, nil); err != nil {
+					return nil, err
+				}
+			} else {
+				body := strings.NewReader(rr.Values.Encode())
+				if req, err = http.NewRequest(rr.Method, httpPath, body); err != nil {
+					return nil, err
+				}
+
+				req.Header.Set("Content-Type",
+					"application/x-www-form-urlencoded; param=value")
+			}
+			return req, nil
+		}()
+
+		if err != nil {
+			return nil, err
+		}
+
+		if c.credentials != nil {
+			req.SetBasicAuth(c.credentials.username, c.credentials.password)
+		}
+
+		resp, err = c.httpClient.Do(req)
+		// clear previous httpPath
+		httpPath = ""
+		defer func() {
+			if resp != nil {
+				resp.Body.Close()
+			}
+		}()
+
+		// If the request was cancelled, return ErrRequestCancelled directly
+		select {
+		case <-cancelled:
+			return nil, ErrRequestCancelled
+		default:
+		}
+
+		numReqs++
+
+		// network error, change a machine!
+		if err != nil {
+			logger.Debug("network error: ", err.Error())
+			lastResp := http.Response{}
+			if checkErr := checkRetry(c.cluster, numReqs, lastResp, err); checkErr != nil {
+				return nil, checkErr
+			}
+
+			c.cluster.failure()
+			continue
+		}
+
+		// if there is no error, it should receive response
+		logger.Debug("recv.response.from ", httpPath)
+
+		if validHttpStatusCode[resp.StatusCode] {
+			// try to read byte code and break the loop
+			respBody, err = ioutil.ReadAll(resp.Body)
+			if err == nil {
+				logger.Debug("recv.success ", httpPath)
+				break
+			}
+			// ReadAll error may be caused due to cancel request
+			select {
+			case <-cancelled:
+				return nil, ErrRequestCancelled
+			default:
+			}
+
+			if err == io.ErrUnexpectedEOF {
+				// underlying connection was closed prematurely, probably by timeout
+				// TODO: empty body or unexpectedEOF can cause http.Transport to get hosed;
+				// this allows the client to detect that and take evasive action. Need
+				// to revisit once code.google.com/p/go/issues/detail?id=8648 gets fixed.
+				respBody = []byte{}
+				break
+			}
+		}
+
+		if resp.StatusCode == http.StatusTemporaryRedirect {
+			u, err := resp.Location()
+
+			if err != nil {
+				logger.Warning(err)
+			} else {
+				// set httpPath for following redirection
+				httpPath = u.String()
+			}
+			resp.Body.Close()
+			continue
+		}
+
+		if checkErr := checkRetry(c.cluster, numReqs, *resp,
+			errors.New("Unexpected HTTP status code")); checkErr != nil {
+			return nil, checkErr
+		}
+		resp.Body.Close()
+	}
+
+	r := &RawResponse{
+		StatusCode: resp.StatusCode,
+		Body:       respBody,
+		Header:     resp.Header,
+	}
+
+	return r, nil
+}
+
+// DefaultCheckRetry defines the retrying behaviour for bad HTTP requests
+// If we have retried 2 * machine number, stop retrying.
+// If status code is InternalServerError, sleep for 200ms.
+func DefaultCheckRetry(cluster *Cluster, numReqs int, lastResp http.Response,
+	err error) error {
+
+	if numReqs > 2*len(cluster.Machines) {
+		errStr := fmt.Sprintf("failed to propose on members %v twice [last error: %v]", cluster.Machines, err)
+		return newError(ErrCodeEtcdNotReachable, errStr, 0)
+	}
+
+	if isEmptyResponse(lastResp) {
+		// always retry if it failed to get response from one machine
+		return nil
+	}
+	if !shouldRetry(lastResp) {
+		body := []byte("nil")
+		if lastResp.Body != nil {
+			if b, err := ioutil.ReadAll(lastResp.Body); err == nil {
+				body = b
+			}
+		}
+		errStr := fmt.Sprintf("unhandled http status [%s] with body [%s]", http.StatusText(lastResp.StatusCode), body)
+		return newError(ErrCodeUnhandledHTTPStatus, errStr, 0)
+	}
+	// sleep some time and expect leader election finish
+	time.Sleep(time.Millisecond * 200)
+	logger.Warning("bad response status code", lastResp.StatusCode)
+	return nil
+}
+
+func isEmptyResponse(r http.Response) bool { return r.StatusCode == 0 }
+
+// shouldRetry returns whether the reponse deserves retry.
+func shouldRetry(r http.Response) bool {
+	// TODO: only retry when the cluster is in leader election
+	// We cannot do it exactly because etcd doesn't support it well.
+	return r.StatusCode == http.StatusInternalServerError
+}
+
+func (c *Client) getHttpPath(s ...string) string {
+	fullPath := c.cluster.pick() + "/" + version
+	for _, seg := range s {
+		fullPath = fullPath + "/" + seg
+	}
+	return fullPath
+}
+
+// buildValues builds a url.Values map according to the given value and ttl
+func buildValues(value string, ttl uint64) url.Values {
+	v := url.Values{}
+
+	if value != "" {
+		v.Set("value", value)
+	}
+
+	if ttl > 0 {
+		v.Set("ttl", fmt.Sprintf("%v", ttl))
+	}
+
+	return v
+}
+
+// convert key string to http path exclude version, including URL escaping
+// for example: key[foo] -> path[keys/foo]
+// key[/%z] -> path[keys/%25z]
+// key[/] -> path[keys/]
+func keyToPath(key string) string {
+	// URL-escape our key, except for slashes
+	p := strings.Replace(url.QueryEscape(path.Join("keys", key)), "%2F", "/", -1)
+
+	// corner case: if key is "/" or "//" ect
+	// path join will clear the tailing "/"
+	// we need to add it back
+	if p == "keys" {
+		p = "keys/"
+	}
+
+	return p
+}

+ 22 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests_test.go

@@ -0,0 +1,22 @@
+package etcd
+
+import "testing"
+
+func TestKeyToPath(t *testing.T) {
+	tests := []struct {
+		key   string
+		wpath string
+	}{
+		{"", "keys/"},
+		{"foo", "keys/foo"},
+		{"foo/bar", "keys/foo/bar"},
+		{"%z", "keys/%25z"},
+		{"/", "keys/"},
+	}
+	for i, tt := range tests {
+		path := keyToPath(tt.key)
+		if path != tt.wpath {
+			t.Errorf("#%d: path = %s, want %s", i, path, tt.wpath)
+		}
+	}
+}

+ 89 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.go

@@ -0,0 +1,89 @@
+package etcd
+
+import (
+	"encoding/json"
+	"net/http"
+	"strconv"
+	"time"
+)
+
+const (
+	rawResponse = iota
+	normalResponse
+)
+
+type responseType int
+
+type RawResponse struct {
+	StatusCode int
+	Body       []byte
+	Header     http.Header
+}
+
+var (
+	validHttpStatusCode = map[int]bool{
+		http.StatusCreated:            true,
+		http.StatusOK:                 true,
+		http.StatusBadRequest:         true,
+		http.StatusNotFound:           true,
+		http.StatusPreconditionFailed: true,
+		http.StatusForbidden:          true,
+	}
+)
+
+// Unmarshal parses RawResponse and stores the result in Response
+func (rr *RawResponse) Unmarshal() (*Response, error) {
+	if rr.StatusCode != http.StatusOK && rr.StatusCode != http.StatusCreated {
+		return nil, handleError(rr.Body)
+	}
+
+	resp := new(Response)
+
+	err := json.Unmarshal(rr.Body, resp)
+
+	if err != nil {
+		return nil, err
+	}
+
+	// attach index and term to response
+	resp.EtcdIndex, _ = strconv.ParseUint(rr.Header.Get("X-Etcd-Index"), 10, 64)
+	resp.RaftIndex, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Index"), 10, 64)
+	resp.RaftTerm, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Term"), 10, 64)
+
+	return resp, nil
+}
+
+type Response struct {
+	Action    string `json:"action"`
+	Node      *Node  `json:"node"`
+	PrevNode  *Node  `json:"prevNode,omitempty"`
+	EtcdIndex uint64 `json:"etcdIndex"`
+	RaftIndex uint64 `json:"raftIndex"`
+	RaftTerm  uint64 `json:"raftTerm"`
+}
+
+type Node struct {
+	Key           string     `json:"key, omitempty"`
+	Value         string     `json:"value,omitempty"`
+	Dir           bool       `json:"dir,omitempty"`
+	Expiration    *time.Time `json:"expiration,omitempty"`
+	TTL           int64      `json:"ttl,omitempty"`
+	Nodes         Nodes      `json:"nodes,omitempty"`
+	ModifiedIndex uint64     `json:"modifiedIndex,omitempty"`
+	CreatedIndex  uint64     `json:"createdIndex,omitempty"`
+}
+
+type Nodes []*Node
+
+// interfaces for sorting
+func (ns Nodes) Len() int {
+	return len(ns)
+}
+
+func (ns Nodes) Less(i, j int) bool {
+	return ns[i].Key < ns[j].Key
+}
+
+func (ns Nodes) Swap(i, j int) {
+	ns[i], ns[j] = ns[j], ns[i]
+}

+ 42 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go

@@ -0,0 +1,42 @@
+package etcd
+
+import (
+	"fmt"
+	"testing"
+)
+
+func TestSetCurlChan(t *testing.T) {
+	c := NewClient(nil)
+	c.OpenCURL()
+
+	defer func() {
+		c.Delete("foo", true)
+	}()
+
+	_, err := c.Set("foo", "bar", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expected := fmt.Sprintf("curl -X PUT %s/v2/keys/foo -d value=bar -d ttl=5",
+		c.cluster.pick())
+	actual := c.RecvCURL()
+	if expected != actual {
+		t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
+			actual, expected)
+	}
+
+	c.SetConsistency(STRONG_CONSISTENCY)
+	_, err = c.Get("foo", false, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expected = fmt.Sprintf("curl -X GET %s/v2/keys/foo?quorum=true&recursive=false&sorted=false",
+		c.cluster.pick())
+	actual = c.RecvCURL()
+	if expected != actual {
+		t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
+			actual, expected)
+	}
+}

+ 137 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create.go

@@ -0,0 +1,137 @@
+package etcd
+
+// Set sets the given key to the given value.
+// It will create a new key value pair or replace the old one.
+// It will not replace a existing directory.
+func (c *Client) Set(key string, value string, ttl uint64) (*Response, error) {
+	raw, err := c.RawSet(key, value, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// SetDir sets the given key to a directory.
+// It will create a new directory or replace the old key value pair by a directory.
+// It will not replace a existing directory.
+func (c *Client) SetDir(key string, ttl uint64) (*Response, error) {
+	raw, err := c.RawSetDir(key, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// CreateDir creates a directory. It succeeds only if
+// the given key does not yet exist.
+func (c *Client) CreateDir(key string, ttl uint64) (*Response, error) {
+	raw, err := c.RawCreateDir(key, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// UpdateDir updates the given directory. It succeeds only if the
+// given key already exists.
+func (c *Client) UpdateDir(key string, ttl uint64) (*Response, error) {
+	raw, err := c.RawUpdateDir(key, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// Create creates a file with the given value under the given key.  It succeeds
+// only if the given key does not yet exist.
+func (c *Client) Create(key string, value string, ttl uint64) (*Response, error) {
+	raw, err := c.RawCreate(key, value, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// CreateInOrder creates a file with a key that's guaranteed to be higher than other
+// keys in the given directory. It is useful for creating queues.
+func (c *Client) CreateInOrder(dir string, value string, ttl uint64) (*Response, error) {
+	raw, err := c.RawCreateInOrder(dir, value, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// Update updates the given key to the given value.  It succeeds only if the
+// given key already exists.
+func (c *Client) Update(key string, value string, ttl uint64) (*Response, error) {
+	raw, err := c.RawUpdate(key, value, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+func (c *Client) RawUpdateDir(key string, ttl uint64) (*RawResponse, error) {
+	ops := Options{
+		"prevExist": true,
+		"dir":       true,
+	}
+
+	return c.put(key, "", ttl, ops)
+}
+
+func (c *Client) RawCreateDir(key string, ttl uint64) (*RawResponse, error) {
+	ops := Options{
+		"prevExist": false,
+		"dir":       true,
+	}
+
+	return c.put(key, "", ttl, ops)
+}
+
+func (c *Client) RawSet(key string, value string, ttl uint64) (*RawResponse, error) {
+	return c.put(key, value, ttl, nil)
+}
+
+func (c *Client) RawSetDir(key string, ttl uint64) (*RawResponse, error) {
+	ops := Options{
+		"dir": true,
+	}
+
+	return c.put(key, "", ttl, ops)
+}
+
+func (c *Client) RawUpdate(key string, value string, ttl uint64) (*RawResponse, error) {
+	ops := Options{
+		"prevExist": true,
+	}
+
+	return c.put(key, value, ttl, ops)
+}
+
+func (c *Client) RawCreate(key string, value string, ttl uint64) (*RawResponse, error) {
+	ops := Options{
+		"prevExist": false,
+	}
+
+	return c.put(key, value, ttl, ops)
+}
+
+func (c *Client) RawCreateInOrder(dir string, value string, ttl uint64) (*RawResponse, error) {
+	return c.post(dir, value, ttl)
+}

+ 241 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go

@@ -0,0 +1,241 @@
+package etcd
+
+import (
+	"testing"
+)
+
+func TestSet(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+	}()
+
+	resp, err := c.Set("foo", "bar", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if resp.Node.Key != "/foo" || resp.Node.Value != "bar" || resp.Node.TTL != 5 {
+		t.Fatalf("Set 1 failed: %#v", resp)
+	}
+	if resp.PrevNode != nil {
+		t.Fatalf("Set 1 PrevNode failed: %#v", resp)
+	}
+
+	resp, err = c.Set("foo", "bar2", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/foo" && resp.Node.Value == "bar2" && resp.Node.TTL == 5) {
+		t.Fatalf("Set 2 failed: %#v", resp)
+	}
+	if resp.PrevNode.Key != "/foo" || resp.PrevNode.Value != "bar" || resp.Node.TTL != 5 {
+		t.Fatalf("Set 2 PrevNode failed: %#v", resp)
+	}
+}
+
+func TestUpdate(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+		c.Delete("nonexistent", true)
+	}()
+
+	resp, err := c.Set("foo", "bar", 5)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// This should succeed.
+	resp, err = c.Update("foo", "wakawaka", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Action == "update" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+		t.Fatalf("Update 1 failed: %#v", resp)
+	}
+	if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.Node.TTL == 5) {
+		t.Fatalf("Update 1 prevValue failed: %#v", resp)
+	}
+
+	// This should fail because the key does not exist.
+	resp, err = c.Update("nonexistent", "whatever", 5)
+	if err == nil {
+		t.Fatalf("The key %v did not exist, so the update should have failed."+
+			"The response was: %#v", resp.Node.Key, resp)
+	}
+}
+
+func TestCreate(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("newKey", true)
+	}()
+
+	newKey := "/newKey"
+	newValue := "/newValue"
+
+	// This should succeed
+	resp, err := c.Create(newKey, newValue, 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Action == "create" && resp.Node.Key == newKey &&
+		resp.Node.Value == newValue && resp.Node.TTL == 5) {
+		t.Fatalf("Create 1 failed: %#v", resp)
+	}
+	if resp.PrevNode != nil {
+		t.Fatalf("Create 1 PrevNode failed: %#v", resp)
+	}
+
+	// This should fail, because the key is already there
+	resp, err = c.Create(newKey, newValue, 5)
+	if err == nil {
+		t.Fatalf("The key %v did exist, so the creation should have failed."+
+			"The response was: %#v", resp.Node.Key, resp)
+	}
+}
+
+func TestCreateInOrder(t *testing.T) {
+	c := NewClient(nil)
+	dir := "/queue"
+	defer func() {
+		c.DeleteDir(dir)
+	}()
+
+	var firstKey, secondKey string
+
+	resp, err := c.CreateInOrder(dir, "1", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Action == "create" && resp.Node.Value == "1" && resp.Node.TTL == 5) {
+		t.Fatalf("Create 1 failed: %#v", resp)
+	}
+
+	firstKey = resp.Node.Key
+
+	resp, err = c.CreateInOrder(dir, "2", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Action == "create" && resp.Node.Value == "2" && resp.Node.TTL == 5) {
+		t.Fatalf("Create 2 failed: %#v", resp)
+	}
+
+	secondKey = resp.Node.Key
+
+	if firstKey >= secondKey {
+		t.Fatalf("Expected first key to be greater than second key, but %s is not greater than %s",
+			firstKey, secondKey)
+	}
+}
+
+func TestSetDir(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+		c.Delete("fooDir", true)
+	}()
+
+	resp, err := c.CreateDir("fooDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/fooDir" && resp.Node.Value == "" && resp.Node.TTL == 5) {
+		t.Fatalf("SetDir 1 failed: %#v", resp)
+	}
+	if resp.PrevNode != nil {
+		t.Fatalf("SetDir 1 PrevNode failed: %#v", resp)
+	}
+
+	// This should fail because /fooDir already points to a directory
+	resp, err = c.CreateDir("/fooDir", 5)
+	if err == nil {
+		t.Fatalf("fooDir already points to a directory, so SetDir should have failed."+
+			"The response was: %#v", resp)
+	}
+
+	_, err = c.Set("foo", "bar", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// This should succeed
+	// It should replace the key
+	resp, err = c.SetDir("foo", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/foo" && resp.Node.Value == "" && resp.Node.TTL == 5) {
+		t.Fatalf("SetDir 2 failed: %#v", resp)
+	}
+	if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.PrevNode.TTL == 5) {
+		t.Fatalf("SetDir 2 failed: %#v", resp)
+	}
+}
+
+func TestUpdateDir(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("fooDir", true)
+	}()
+
+	resp, err := c.CreateDir("fooDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// This should succeed.
+	resp, err = c.UpdateDir("fooDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Action == "update" && resp.Node.Key == "/fooDir" &&
+		resp.Node.Value == "" && resp.Node.TTL == 5) {
+		t.Fatalf("UpdateDir 1 failed: %#v", resp)
+	}
+	if !(resp.PrevNode.Key == "/fooDir" && resp.PrevNode.Dir == true && resp.PrevNode.TTL == 5) {
+		t.Fatalf("UpdateDir 1 PrevNode failed: %#v", resp)
+	}
+
+	// This should fail because the key does not exist.
+	resp, err = c.UpdateDir("nonexistentDir", 5)
+	if err == nil {
+		t.Fatalf("The key %v did not exist, so the update should have failed."+
+			"The response was: %#v", resp.Node.Key, resp)
+	}
+}
+
+func TestCreateDir(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("fooDir", true)
+	}()
+
+	// This should succeed
+	resp, err := c.CreateDir("fooDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Action == "create" && resp.Node.Key == "/fooDir" &&
+		resp.Node.Value == "" && resp.Node.TTL == 5) {
+		t.Fatalf("CreateDir 1 failed: %#v", resp)
+	}
+	if resp.PrevNode != nil {
+		t.Fatalf("CreateDir 1 PrevNode failed: %#v", resp)
+	}
+
+	// This should fail, because the key is already there
+	resp, err = c.CreateDir("fooDir", 5)
+	if err == nil {
+		t.Fatalf("The key %v did exist, so the creation should have failed."+
+			"The response was: %#v", resp.Node.Key, resp)
+	}
+}

+ 6 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go

@@ -0,0 +1,6 @@
+package etcd
+
+const (
+	version        = "v2"
+	packageVersion = "v2.0.0+git"
+)

+ 103 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch.go

@@ -0,0 +1,103 @@
+package etcd
+
+import (
+	"errors"
+)
+
+// Errors introduced by the Watch command.
+var (
+	ErrWatchStoppedByUser = errors.New("Watch stopped by the user via stop channel")
+)
+
+// If recursive is set to true the watch returns the first change under the given
+// prefix since the given index.
+//
+// If recursive is set to false the watch returns the first change to the given key
+// since the given index.
+//
+// To watch for the latest change, set waitIndex = 0.
+//
+// If a receiver channel is given, it will be a long-term watch. Watch will block at the
+//channel. After someone receives the channel, it will go on to watch that
+// prefix.  If a stop channel is given, the client can close long-term watch using
+// the stop channel.
+func (c *Client) Watch(prefix string, waitIndex uint64, recursive bool,
+	receiver chan *Response, stop chan bool) (*Response, error) {
+	logger.Debugf("watch %s [%s]", prefix, c.cluster.Leader)
+	if receiver == nil {
+		raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
+
+		if err != nil {
+			return nil, err
+		}
+
+		return raw.Unmarshal()
+	}
+	defer close(receiver)
+
+	for {
+		raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
+
+		if err != nil {
+			return nil, err
+		}
+
+		resp, err := raw.Unmarshal()
+
+		if err != nil {
+			return nil, err
+		}
+
+		waitIndex = resp.Node.ModifiedIndex + 1
+		receiver <- resp
+	}
+}
+
+func (c *Client) RawWatch(prefix string, waitIndex uint64, recursive bool,
+	receiver chan *RawResponse, stop chan bool) (*RawResponse, error) {
+
+	logger.Debugf("rawWatch %s [%s]", prefix, c.cluster.Leader)
+	if receiver == nil {
+		return c.watchOnce(prefix, waitIndex, recursive, stop)
+	}
+
+	for {
+		raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
+
+		if err != nil {
+			return nil, err
+		}
+
+		resp, err := raw.Unmarshal()
+
+		if err != nil {
+			return nil, err
+		}
+
+		waitIndex = resp.Node.ModifiedIndex + 1
+		receiver <- raw
+	}
+}
+
+// helper func
+// return when there is change under the given prefix
+func (c *Client) watchOnce(key string, waitIndex uint64, recursive bool, stop chan bool) (*RawResponse, error) {
+
+	options := Options{
+		"wait": true,
+	}
+	if waitIndex > 0 {
+		options["waitIndex"] = waitIndex
+	}
+	if recursive {
+		options["recursive"] = true
+	}
+
+	resp, err := c.getCancelable(key, options, stop)
+
+	if err == ErrRequestCancelled {
+		return nil, ErrWatchStoppedByUser
+	}
+
+	return resp, err
+}

+ 119 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go

@@ -0,0 +1,119 @@
+package etcd
+
+import (
+	"fmt"
+	"runtime"
+	"testing"
+	"time"
+)
+
+func TestWatch(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("watch_foo", true)
+	}()
+
+	go setHelper("watch_foo", "bar", c)
+
+	resp, err := c.Watch("watch_foo", 0, false, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
+		t.Fatalf("Watch 1 failed: %#v", resp)
+	}
+
+	go setHelper("watch_foo", "bar", c)
+
+	resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, false, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
+		t.Fatalf("Watch 2 failed: %#v", resp)
+	}
+
+	routineNum := runtime.NumGoroutine()
+
+	ch := make(chan *Response, 10)
+	stop := make(chan bool, 1)
+
+	go setLoop("watch_foo", "bar", c)
+
+	go receiver(ch, stop)
+
+	_, err = c.Watch("watch_foo", 0, false, ch, stop)
+	if err != ErrWatchStoppedByUser {
+		t.Fatalf("Watch returned a non-user stop error")
+	}
+
+	if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
+		t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
+	}
+}
+
+func TestWatchAll(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("watch_foo", true)
+	}()
+
+	go setHelper("watch_foo/foo", "bar", c)
+
+	resp, err := c.Watch("watch_foo", 0, true, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
+		t.Fatalf("WatchAll 1 failed: %#v", resp)
+	}
+
+	go setHelper("watch_foo/foo", "bar", c)
+
+	resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, true, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
+		t.Fatalf("WatchAll 2 failed: %#v", resp)
+	}
+
+	ch := make(chan *Response, 10)
+	stop := make(chan bool, 1)
+
+	routineNum := runtime.NumGoroutine()
+
+	go setLoop("watch_foo/foo", "bar", c)
+
+	go receiver(ch, stop)
+
+	_, err = c.Watch("watch_foo", 0, true, ch, stop)
+	if err != ErrWatchStoppedByUser {
+		t.Fatalf("Watch returned a non-user stop error")
+	}
+
+	if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
+		t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
+	}
+}
+
+func setHelper(key, value string, c *Client) {
+	time.Sleep(time.Second)
+	c.Set(key, value, 100)
+}
+
+func setLoop(key, value string, c *Client) {
+	time.Sleep(time.Second)
+	for i := 0; i < 10; i++ {
+		newValue := fmt.Sprintf("%s_%v", value, i)
+		c.Set(key, newValue, 100)
+		time.Sleep(time.Second / 10)
+	}
+}
+
+func receiver(c chan *Response, stop chan bool) {
+	for i := 0; i < 10; i++ {
+		<-c
+	}
+	stop <- true
+}

+ 22 - 0
libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/.gitignore

@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe

+ 9 - 0
libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/.travis.yml

@@ -0,0 +1,9 @@
+language: go
+
+go:
+    - 1.2
+
+script:
+    - go test ./...
+    #- go test -race ./...
+

+ 22 - 0
libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/LICENSE

@@ -0,0 +1,22 @@
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 94 - 0
libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/README.md

@@ -0,0 +1,94 @@
+[![Build Status](https://travis-ci.org/deckarep/golang-set.png?branch=master)](https://travis-ci.org/deckarep/golang-set)
+[![GoDoc](https://godoc.org/github.com/deckarep/golang-set?status.png)](http://godoc.org/github.com/deckarep/golang-set)
+
+## golang-set
+
+
+The missing set collection for the Go language.  Until Go has sets built-in...use this.
+
+Coming from Python one of the things I miss is the superbly wonderful set collection.  This is my attempt to mimic the primary features of the set from Python.
+You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library.  To those I say simply ignore this repository
+and carry-on and to the rest that find this useful please contribute in helping me make it better by:
+
+* Helping to make more idiomatic improvements to the code.
+* Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~
+* Helping to make the unit-tests more robust and kick-ass.
+* Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set)
+* Simply offering feedback and suggestions.  (Positive, constructive feedback is appreciated.)
+
+I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang)
+
+*Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework.  This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types.
+
+## Features (as of 9/22/2014)
+
+* a CartesionProduct() method has been added with unit-tests: [Read more about the cartesion product](http://en.wikipedia.org/wiki/Cartesian_product)
+
+## Features (as of 9/15/2014)
+
+* a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set)
+
+## Features (as of 4/22/2014)
+
+* One common interface to both implementations
+* Two set implementations to choose from
+  * a thread-safe implementation designed for concurrent use
+  * a non-thread-safe implementation designed for performance
+* 75 benchmarks for both implementations
+* 35 unit tests for both implementations
+* 14 concurrent tests for the thread-safe implementation
+
+
+
+Please see the unit test file for additional usage examples.  The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html)    Please keep in mind
+however that the Python set is a built-in type and supports additional features and syntax that make it awesome.
+
+## Examples but not exhaustive:
+
+```go
+requiredClasses := mapset.NewSet()
+requiredClasses.Add("Cooking")
+requiredClasses.Add("English")
+requiredClasses.Add("Math")
+requiredClasses.Add("Biology")
+
+scienceSlice := []interface{}{"Biology", "Chemistry"}
+scienceClasses := mapset.NewSetFromSlice(scienceSlice)
+
+electiveClasses := mapset.NewSet()
+electiveClasses.Add("Welding")
+electiveClasses.Add("Music")
+electiveClasses.Add("Automotive")
+
+bonusClasses := mapset.NewSet()
+bonusClasses.Add("Go Programming")
+bonusClasses.Add("Python Programming")
+
+//Show me all the available classes I can take
+allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses)
+fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming}
+
+
+//Is cooking considered a science class?
+fmt.Println(scienceClasses.Contains("Cooking")) //false
+
+//Show me all classes that are not science classes, since I hate science.
+fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding}
+
+//Which science classes are also required classes?
+fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology}
+
+//How many bonus classes do you offer?
+fmt.Println(bonusClasses.Cardinality()) //2
+
+//Do you have the following classes? Welding, Automotive and English?
+fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true
+```
+
+Thanks!
+
+-Ralph
+
+[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/deckarep/golang-set/trend.png)](https://bitdeli.com/free "Bitdeli Badge")
+
+[![Analytics](https://ga-beacon.appspot.com/UA-42584447-2/deckarep/golang-set)](https://github.com/igrigorik/ga-beacon)

+ 523 - 0
libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/bench_test.go

@@ -0,0 +1,523 @@
+package mapset
+
+import (
+	"math/rand"
+	"testing"
+)
+
+func nrand(n int) []int {
+	i := make([]int, n)
+	for ind := range i {
+		i[ind] = rand.Int()
+	}
+	return i
+}
+
+func toInterfaces(i []int) []interface{} {
+	ifs := make([]interface{}, len(i))
+	for ind, v := range i {
+		ifs[ind] = v
+	}
+	return ifs
+}
+
+func benchAdd(b *testing.B, s Set) {
+	nums := nrand(b.N)
+	b.ResetTimer()
+	for _, v := range nums {
+		s.Add(v)
+	}
+}
+
+func BenchmarkAddSafe(b *testing.B) {
+	benchAdd(b, NewSet())
+}
+
+func BenchmarkAddUnsafe(b *testing.B) {
+	benchAdd(b, NewThreadUnsafeSet())
+}
+
+func benchRemove(b *testing.B, s Set) {
+	nums := nrand(b.N)
+	for _, v := range nums {
+		s.Add(v)
+	}
+
+	b.ResetTimer()
+	for _, v := range nums {
+		s.Remove(v)
+	}
+}
+
+func BenchmarkRemoveSafe(b *testing.B) {
+	benchRemove(b, NewSet())
+}
+
+func BenchmarkRemoveUnsafe(b *testing.B) {
+	benchRemove(b, NewThreadUnsafeSet())
+}
+
+func benchCardinality(b *testing.B, s Set) {
+	for i := 0; i < b.N; i++ {
+		s.Cardinality()
+	}
+}
+
+func BenchmarkCardinalitySafe(b *testing.B) {
+	benchCardinality(b, NewSet())
+}
+
+func BenchmarkCardinalityUnsafe(b *testing.B) {
+	benchCardinality(b, NewThreadUnsafeSet())
+}
+
+func benchClear(b *testing.B, s Set) {
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.Clear()
+	}
+}
+
+func BenchmarkClearSafe(b *testing.B) {
+	benchClear(b, NewSet())
+}
+
+func BenchmarkClearUnsafe(b *testing.B) {
+	benchClear(b, NewThreadUnsafeSet())
+}
+
+func benchClone(b *testing.B, n int, s Set) {
+	nums := toInterfaces(nrand(n))
+	for _, v := range nums {
+		s.Add(v)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.Clone()
+	}
+}
+
+func BenchmarkClone1Safe(b *testing.B) {
+	benchClone(b, 1, NewSet())
+}
+
+func BenchmarkClone1Unsafe(b *testing.B) {
+	benchClone(b, 1, NewThreadUnsafeSet())
+}
+
+func BenchmarkClone10Safe(b *testing.B) {
+	benchClone(b, 10, NewSet())
+}
+
+func BenchmarkClone10Unsafe(b *testing.B) {
+	benchClone(b, 10, NewThreadUnsafeSet())
+}
+
+func BenchmarkClone100Safe(b *testing.B) {
+	benchClone(b, 100, NewSet())
+}
+
+func BenchmarkClone100Unsafe(b *testing.B) {
+	benchClone(b, 100, NewThreadUnsafeSet())
+}
+
+func benchContains(b *testing.B, n int, s Set) {
+	nums := toInterfaces(nrand(n))
+	for _, v := range nums {
+		s.Add(v)
+	}
+
+	nums[n-1] = -1 // Definitely not in s
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.Contains(nums...)
+	}
+}
+
+func BenchmarkContains1Safe(b *testing.B) {
+	benchContains(b, 1, NewSet())
+}
+
+func BenchmarkContains1Unsafe(b *testing.B) {
+	benchContains(b, 1, NewThreadUnsafeSet())
+}
+
+func BenchmarkContains10Safe(b *testing.B) {
+	benchContains(b, 10, NewSet())
+}
+
+func BenchmarkContains10Unsafe(b *testing.B) {
+	benchContains(b, 10, NewThreadUnsafeSet())
+}
+
+func BenchmarkContains100Safe(b *testing.B) {
+	benchContains(b, 100, NewSet())
+}
+
+func BenchmarkContains100Unsafe(b *testing.B) {
+	benchContains(b, 100, NewThreadUnsafeSet())
+}
+
+func benchEqual(b *testing.B, n int, s, t Set) {
+	nums := nrand(n)
+	for _, v := range nums {
+		s.Add(v)
+		t.Add(v)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.Equal(t)
+	}
+}
+
+func BenchmarkEqual1Safe(b *testing.B) {
+	benchEqual(b, 1, NewSet(), NewSet())
+}
+
+func BenchmarkEqual1Unsafe(b *testing.B) {
+	benchEqual(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkEqual10Safe(b *testing.B) {
+	benchEqual(b, 10, NewSet(), NewSet())
+}
+
+func BenchmarkEqual10Unsafe(b *testing.B) {
+	benchEqual(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkEqual100Safe(b *testing.B) {
+	benchEqual(b, 100, NewSet(), NewSet())
+}
+
+func BenchmarkEqual100Unsafe(b *testing.B) {
+	benchEqual(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func benchDifference(b *testing.B, n int, s, t Set) {
+	nums := nrand(n)
+	for _, v := range nums {
+		s.Add(v)
+	}
+	for _, v := range nums[:n/2] {
+		t.Add(v)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.Difference(t)
+	}
+}
+
+func benchIsSubset(b *testing.B, n int, s, t Set) {
+	nums := nrand(n)
+	for _, v := range nums {
+		s.Add(v)
+		t.Add(v)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.IsSubset(t)
+	}
+}
+
+func BenchmarkIsSubset1Safe(b *testing.B) {
+	benchIsSubset(b, 1, NewSet(), NewSet())
+}
+
+func BenchmarkIsSubset1Unsafe(b *testing.B) {
+	benchIsSubset(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkIsSubset10Safe(b *testing.B) {
+	benchIsSubset(b, 10, NewSet(), NewSet())
+}
+
+func BenchmarkIsSubset10Unsafe(b *testing.B) {
+	benchIsSubset(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkIsSubset100Safe(b *testing.B) {
+	benchIsSubset(b, 100, NewSet(), NewSet())
+}
+
+func BenchmarkIsSubset100Unsafe(b *testing.B) {
+	benchIsSubset(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func benchIsSuperset(b *testing.B, n int, s, t Set) {
+	nums := nrand(n)
+	for _, v := range nums {
+		s.Add(v)
+		t.Add(v)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.IsSuperset(t)
+	}
+}
+
+func BenchmarkIsSuperset1Safe(b *testing.B) {
+	benchIsSuperset(b, 1, NewSet(), NewSet())
+}
+
+func BenchmarkIsSuperset1Unsafe(b *testing.B) {
+	benchIsSuperset(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkIsSuperset10Safe(b *testing.B) {
+	benchIsSuperset(b, 10, NewSet(), NewSet())
+}
+
+func BenchmarkIsSuperset10Unsafe(b *testing.B) {
+	benchIsSuperset(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkIsSuperset100Safe(b *testing.B) {
+	benchIsSuperset(b, 100, NewSet(), NewSet())
+}
+
+func BenchmarkIsSuperset100Unsafe(b *testing.B) {
+	benchIsSuperset(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkDifference1Safe(b *testing.B) {
+	benchDifference(b, 1, NewSet(), NewSet())
+}
+
+func BenchmarkDifference1Unsafe(b *testing.B) {
+	benchDifference(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkDifference10Safe(b *testing.B) {
+	benchDifference(b, 10, NewSet(), NewSet())
+}
+
+func BenchmarkDifference10Unsafe(b *testing.B) {
+	benchDifference(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkDifference100Safe(b *testing.B) {
+	benchDifference(b, 100, NewSet(), NewSet())
+}
+
+func BenchmarkDifference100Unsafe(b *testing.B) {
+	benchDifference(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func benchIntersect(b *testing.B, n int, s, t Set) {
+	nums := nrand(int(float64(n) * float64(1.5)))
+	for _, v := range nums[:n] {
+		s.Add(v)
+	}
+	for _, v := range nums[n/2:] {
+		t.Add(v)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.Intersect(t)
+	}
+}
+
+func BenchmarkIntersect1Safe(b *testing.B) {
+	benchIntersect(b, 1, NewSet(), NewSet())
+}
+
+func BenchmarkIntersect1Unsafe(b *testing.B) {
+	benchIntersect(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkIntersect10Safe(b *testing.B) {
+	benchIntersect(b, 10, NewSet(), NewSet())
+}
+
+func BenchmarkIntersect10Unsafe(b *testing.B) {
+	benchIntersect(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkIntersect100Safe(b *testing.B) {
+	benchIntersect(b, 100, NewSet(), NewSet())
+}
+
+func BenchmarkIntersect100Unsafe(b *testing.B) {
+	benchIntersect(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func benchSymmetricDifference(b *testing.B, n int, s, t Set) {
+	nums := nrand(int(float64(n) * float64(1.5)))
+	for _, v := range nums[:n] {
+		s.Add(v)
+	}
+	for _, v := range nums[n/2:] {
+		t.Add(v)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.SymmetricDifference(t)
+	}
+}
+
+func BenchmarkSymmetricDifference1Safe(b *testing.B) {
+	benchSymmetricDifference(b, 1, NewSet(), NewSet())
+}
+
+func BenchmarkSymmetricDifference1Unsafe(b *testing.B) {
+	benchSymmetricDifference(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkSymmetricDifference10Safe(b *testing.B) {
+	benchSymmetricDifference(b, 10, NewSet(), NewSet())
+}
+
+func BenchmarkSymmetricDifference10Unsafe(b *testing.B) {
+	benchSymmetricDifference(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkSymmetricDifference100Safe(b *testing.B) {
+	benchSymmetricDifference(b, 100, NewSet(), NewSet())
+}
+
+func BenchmarkSymmetricDifference100Unsafe(b *testing.B) {
+	benchSymmetricDifference(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func benchUnion(b *testing.B, n int, s, t Set) {
+	nums := nrand(n)
+	for _, v := range nums[:n/2] {
+		s.Add(v)
+	}
+	for _, v := range nums[n/2:] {
+		t.Add(v)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.Union(t)
+	}
+}
+
+func BenchmarkUnion1Safe(b *testing.B) {
+	benchUnion(b, 1, NewSet(), NewSet())
+}
+
+func BenchmarkUnion1Unsafe(b *testing.B) {
+	benchUnion(b, 1, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkUnion10Safe(b *testing.B) {
+	benchUnion(b, 10, NewSet(), NewSet())
+}
+
+func BenchmarkUnion10Unsafe(b *testing.B) {
+	benchUnion(b, 10, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func BenchmarkUnion100Safe(b *testing.B) {
+	benchUnion(b, 100, NewSet(), NewSet())
+}
+
+func BenchmarkUnion100Unsafe(b *testing.B) {
+	benchUnion(b, 100, NewThreadUnsafeSet(), NewThreadUnsafeSet())
+}
+
+func benchIter(b *testing.B, n int, s Set) {
+	nums := nrand(n)
+	for _, v := range nums {
+		s.Add(v)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		c := s.Iter()
+		for _ = range c {
+
+		}
+	}
+}
+
+func BenchmarkIter1Safe(b *testing.B) {
+	benchIter(b, 1, NewSet())
+}
+
+func BenchmarkIter1Unsafe(b *testing.B) {
+	benchIter(b, 1, NewThreadUnsafeSet())
+}
+
+func BenchmarkIter10Safe(b *testing.B) {
+	benchIter(b, 10, NewSet())
+}
+
+func BenchmarkIter10Unsafe(b *testing.B) {
+	benchIter(b, 10, NewThreadUnsafeSet())
+}
+
+func BenchmarkIter100Safe(b *testing.B) {
+	benchIter(b, 100, NewSet())
+}
+
+func BenchmarkIter100Unsafe(b *testing.B) {
+	benchIter(b, 100, NewThreadUnsafeSet())
+}
+
+func benchString(b *testing.B, n int, s Set) {
+	nums := nrand(n)
+	for _, v := range nums {
+		s.Add(v)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.String()
+	}
+}
+
+func BenchmarkString1Safe(b *testing.B) {
+	benchString(b, 1, NewSet())
+}
+
+func BenchmarkString1Unsafe(b *testing.B) {
+	benchString(b, 1, NewThreadUnsafeSet())
+}
+
+func BenchmarkString10Safe(b *testing.B) {
+	benchString(b, 10, NewSet())
+}
+
+func BenchmarkString10Unsafe(b *testing.B) {
+	benchString(b, 10, NewThreadUnsafeSet())
+}
+
+func BenchmarkString100Safe(b *testing.B) {
+	benchString(b, 100, NewSet())
+}
+
+func BenchmarkString100Unsafe(b *testing.B) {
+	benchString(b, 100, NewThreadUnsafeSet())
+}
+
+func benchToSlice(b *testing.B, s Set) {
+	nums := nrand(b.N)
+	for _, v := range nums {
+		s.Add(v)
+	}
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		s.ToSlice()
+	}
+}
+
+func BenchmarkToSliceSafe(b *testing.B) {
+	benchToSlice(b, NewSet())
+}
+
+func BenchmarkToSliceUnsafe(b *testing.B) {
+	benchToSlice(b, NewThreadUnsafeSet())
+}

+ 168 - 0
libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/set.go

@@ -0,0 +1,168 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+// Package mapset implements a simple and generic set collection.
+// Items stored within it are unordered and unique. It supports
+// typical set operations: membership testing, intersection, union,
+// difference, symmetric difference and cloning.
+//
+// Package mapset provides two implementations. The default
+// implementation is safe for concurrent access. There is a non-threadsafe
+// implementation which is slightly more performant.
+package mapset
+
+type Set interface {
+	// Adds an element to the set. Returns whether
+	// the item was added.
+	Add(i interface{}) bool
+
+	// Returns the number of elements in the set.
+	Cardinality() int
+
+	// Removes all elements from the set, leaving
+	// the emtpy set.
+	Clear()
+
+	// Returns a clone of the set using the same
+	// implementation, duplicating all keys.
+	Clone() Set
+
+	// Returns whether the given items
+	// are all in the set.
+	Contains(i ...interface{}) bool
+
+	// Returns the difference between this set
+	// and other. The returned set will contain
+	// all elements of this set that are not also
+	// elements of other.
+	//
+	// Note that the argument to Difference
+	// must be of the same type as the receiver
+	// of the method. Otherwise, Difference will
+	// panic.
+	Difference(other Set) Set
+
+	// Determines if two sets are equal to each
+	// other. If they have the same cardinality
+	// and contain the same elements, they are
+	// considered equal. The order in which
+	// the elements were added is irrelevant.
+	//
+	// Note that the argument to Equal must be
+	// of the same type as the receiver of the
+	// method. Otherwise, Equal will panic.
+	Equal(other Set) bool
+
+	// Returns a new set containing only the elements
+	// that exist only in both sets.
+	//
+	// Note that the argument to Intersect
+	// must be of the same type as the receiver
+	// of the method. Otherwise, Intersect will
+	// panic.
+	Intersect(other Set) Set
+
+	// Determines if every element in the other set
+	// is in this set.
+	//
+	// Note that the argument to IsSubset
+	// must be of the same type as the receiver
+	// of the method. Otherwise, IsSubset will
+	// panic.
+	IsSubset(other Set) bool
+
+	// Determines if every element in this set is in
+	// the other set.
+	//
+	// Note that the argument to IsSuperset
+	// must be of the same type as the receiver
+	// of the method. Otherwise, IsSuperset will
+	// panic.
+	IsSuperset(other Set) bool
+
+	// Returns a channel of elements that you can
+	// range over.
+	Iter() <-chan interface{}
+
+	// Remove a single element from the set.
+	Remove(i interface{})
+
+	// Provides a convenient string representation
+	// of the current state of the set.
+	String() string
+
+	// Returns a new set with all elements which are
+	// in either this set or the other set but not in both.
+	//
+	// Note that the argument to SymmetricDifference
+	// must be of the same type as the receiver
+	// of the method. Otherwise, SymmetricDifference
+	// will panic.
+	SymmetricDifference(other Set) Set
+
+	// Returns a new set with all elements in both sets.
+	//
+	// Note that the argument to Union must be of the
+	// same type as the receiver of the method.
+	// Otherwise, IsSuperset will panic.
+	Union(other Set) Set
+
+	// Returns all subsets of a given set (Power Set).
+	PowerSet() Set
+
+	// Returns the Cartesian Product of two sets.
+	CartesianProduct(other Set) Set
+
+	// Returns the members of the set as a slice.
+	ToSlice() []interface{}
+}
+
+// Creates and returns a reference to an empty set.
+func NewSet() Set {
+	set := newThreadSafeSet()
+	return &set
+}
+
+// Creates and returns a reference to a set from an existing slice
+func NewSetFromSlice(s []interface{}) Set {
+	a := NewSet()
+	for _, item := range s {
+		a.Add(item)
+	}
+	return a
+}
+
+func NewThreadUnsafeSet() Set {
+	set := newThreadUnsafeSet()
+	return &set
+}
+
+func NewThreadUnsafeSetFromSlice(s []interface{}) Set {
+	a := NewThreadUnsafeSet()
+	for _, item := range s {
+		a.Add(item)
+	}
+	return a
+}

+ 910 - 0
libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/set_test.go

@@ -0,0 +1,910 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package mapset
+
+import "testing"
+
+func makeSet(ints []int) Set {
+	set := NewSet()
+	for _, i := range ints {
+		set.Add(i)
+	}
+	return set
+}
+
+func makeUnsafeSet(ints []int) Set {
+	set := NewThreadUnsafeSet()
+	for _, i := range ints {
+		set.Add(i)
+	}
+	return set
+}
+
+func Test_NewSet(t *testing.T) {
+	a := NewSet()
+
+	if a.Cardinality() != 0 {
+		t.Error("NewSet should start out as an empty set")
+	}
+}
+
+func Test_NewUnsafeSet(t *testing.T) {
+	a := NewThreadUnsafeSet()
+
+	if a.Cardinality() != 0 {
+		t.Error("NewSet should start out as an empty set")
+	}
+}
+
+func Test_AddSet(t *testing.T) {
+	a := makeSet([]int{1, 2, 3})
+
+	if a.Cardinality() != 3 {
+		t.Error("AddSet does not have a size of 3 even though 3 items were added to a new set")
+	}
+}
+
+func Test_AddUnsafeSet(t *testing.T) {
+	a := makeUnsafeSet([]int{1, 2, 3})
+
+	if a.Cardinality() != 3 {
+		t.Error("AddSet does not have a size of 3 even though 3 items were added to a new set")
+	}
+}
+
+func Test_AddSetNoDuplicate(t *testing.T) {
+	a := makeSet([]int{7, 5, 3, 7})
+
+	if a.Cardinality() != 3 {
+		t.Error("AddSetNoDuplicate set should have 3 elements since 7 is a duplicate")
+	}
+
+	if !(a.Contains(7) && a.Contains(5) && a.Contains(3)) {
+		t.Error("AddSetNoDuplicate set should have a 7, 5, and 3 in it.")
+	}
+}
+
+func Test_AddUnsafeSetNoDuplicate(t *testing.T) {
+	a := makeUnsafeSet([]int{7, 5, 3, 7})
+
+	if a.Cardinality() != 3 {
+		t.Error("AddSetNoDuplicate set should have 3 elements since 7 is a duplicate")
+	}
+
+	if !(a.Contains(7) && a.Contains(5) && a.Contains(3)) {
+		t.Error("AddSetNoDuplicate set should have a 7, 5, and 3 in it.")
+	}
+}
+
+func Test_RemoveSet(t *testing.T) {
+	a := makeSet([]int{6, 3, 1})
+
+	a.Remove(3)
+
+	if a.Cardinality() != 2 {
+		t.Error("RemoveSet should only have 2 items in the set")
+	}
+
+	if !(a.Contains(6) && a.Contains(1)) {
+		t.Error("RemoveSet should have only items 6 and 1 in the set")
+	}
+
+	a.Remove(6)
+	a.Remove(1)
+
+	if a.Cardinality() != 0 {
+		t.Error("RemoveSet should be an empty set after removing 6 and 1")
+	}
+}
+
+func Test_RemoveUnsafeSet(t *testing.T) {
+	a := makeUnsafeSet([]int{6, 3, 1})
+
+	a.Remove(3)
+
+	if a.Cardinality() != 2 {
+		t.Error("RemoveSet should only have 2 items in the set")
+	}
+
+	if !(a.Contains(6) && a.Contains(1)) {
+		t.Error("RemoveSet should have only items 6 and 1 in the set")
+	}
+
+	a.Remove(6)
+	a.Remove(1)
+
+	if a.Cardinality() != 0 {
+		t.Error("RemoveSet should be an empty set after removing 6 and 1")
+	}
+}
+
+func Test_ContainsSet(t *testing.T) {
+	a := NewSet()
+
+	a.Add(71)
+
+	if !a.Contains(71) {
+		t.Error("ContainsSet should contain 71")
+	}
+
+	a.Remove(71)
+
+	if a.Contains(71) {
+		t.Error("ContainsSet should not contain 71")
+	}
+
+	a.Add(13)
+	a.Add(7)
+	a.Add(1)
+
+	if !(a.Contains(13) && a.Contains(7) && a.Contains(1)) {
+		t.Error("ContainsSet should contain 13, 7, 1")
+	}
+}
+
+func Test_ContainsUnsafeSet(t *testing.T) {
+	a := NewThreadUnsafeSet()
+
+	a.Add(71)
+
+	if !a.Contains(71) {
+		t.Error("ContainsSet should contain 71")
+	}
+
+	a.Remove(71)
+
+	if a.Contains(71) {
+		t.Error("ContainsSet should not contain 71")
+	}
+
+	a.Add(13)
+	a.Add(7)
+	a.Add(1)
+
+	if !(a.Contains(13) && a.Contains(7) && a.Contains(1)) {
+		t.Error("ContainsSet should contain 13, 7, 1")
+	}
+}
+
+func Test_ContainsMultipleSet(t *testing.T) {
+	a := makeSet([]int{8, 6, 7, 5, 3, 0, 9})
+
+	if !a.Contains(8, 6, 7, 5, 3, 0, 9) {
+		t.Error("ContainsAll should contain Jenny's phone number")
+	}
+
+	if a.Contains(8, 6, 11, 5, 3, 0, 9) {
+		t.Error("ContainsAll should not have all of these numbers")
+	}
+}
+
+func Test_ContainsMultipleUnsafeSet(t *testing.T) {
+	a := makeUnsafeSet([]int{8, 6, 7, 5, 3, 0, 9})
+
+	if !a.Contains(8, 6, 7, 5, 3, 0, 9) {
+		t.Error("ContainsAll should contain Jenny's phone number")
+	}
+
+	if a.Contains(8, 6, 11, 5, 3, 0, 9) {
+		t.Error("ContainsAll should not have all of these numbers")
+	}
+}
+
+func Test_ClearSet(t *testing.T) {
+	a := makeSet([]int{2, 5, 9, 10})
+
+	a.Clear()
+
+	if a.Cardinality() != 0 {
+		t.Error("ClearSet should be an empty set")
+	}
+}
+
+func Test_ClearUnsafeSet(t *testing.T) {
+	a := makeUnsafeSet([]int{2, 5, 9, 10})
+
+	a.Clear()
+
+	if a.Cardinality() != 0 {
+		t.Error("ClearSet should be an empty set")
+	}
+}
+
+func Test_CardinalitySet(t *testing.T) {
+	a := NewSet()
+
+	if a.Cardinality() != 0 {
+		t.Error("set should be an empty set")
+	}
+
+	a.Add(1)
+
+	if a.Cardinality() != 1 {
+		t.Error("set should have a size of 1")
+	}
+
+	a.Remove(1)
+
+	if a.Cardinality() != 0 {
+		t.Error("set should be an empty set")
+	}
+
+	a.Add(9)
+
+	if a.Cardinality() != 1 {
+		t.Error("set should have a size of 1")
+	}
+
+	a.Clear()
+
+	if a.Cardinality() != 0 {
+		t.Error("set should have a size of 1")
+	}
+}
+
+func Test_CardinalityUnsafeSet(t *testing.T) {
+	a := NewThreadUnsafeSet()
+
+	if a.Cardinality() != 0 {
+		t.Error("set should be an empty set")
+	}
+
+	a.Add(1)
+
+	if a.Cardinality() != 1 {
+		t.Error("set should have a size of 1")
+	}
+
+	a.Remove(1)
+
+	if a.Cardinality() != 0 {
+		t.Error("set should be an empty set")
+	}
+
+	a.Add(9)
+
+	if a.Cardinality() != 1 {
+		t.Error("set should have a size of 1")
+	}
+
+	a.Clear()
+
+	if a.Cardinality() != 0 {
+		t.Error("set should have a size of 1")
+	}
+}
+
+func Test_SetIsSubset(t *testing.T) {
+	a := makeSet([]int{1, 2, 3, 5, 7})
+
+	b := NewSet()
+	b.Add(3)
+	b.Add(5)
+	b.Add(7)
+
+	if !b.IsSubset(a) {
+		t.Error("set b should be a subset of set a")
+	}
+
+	b.Add(72)
+
+	if b.IsSubset(a) {
+		t.Error("set b should not be a subset of set a because it contains 72 which is not in the set of a")
+	}
+}
+
+func Test_UnsafeSetIsSubset(t *testing.T) {
+	a := makeUnsafeSet([]int{1, 2, 3, 5, 7})
+
+	b := NewThreadUnsafeSet()
+	b.Add(3)
+	b.Add(5)
+	b.Add(7)
+
+	if !b.IsSubset(a) {
+		t.Error("set b should be a subset of set a")
+	}
+
+	b.Add(72)
+
+	if b.IsSubset(a) {
+		t.Error("set b should not be a subset of set a because it contains 72 which is not in the set of a")
+	}
+}
+
+func Test_SetIsSuperSet(t *testing.T) {
+	a := NewSet()
+	a.Add(9)
+	a.Add(5)
+	a.Add(2)
+	a.Add(1)
+	a.Add(11)
+
+	b := NewSet()
+	b.Add(5)
+	b.Add(2)
+	b.Add(11)
+
+	if !a.IsSuperset(b) {
+		t.Error("set a should be a superset of set b")
+	}
+
+	b.Add(42)
+
+	if a.IsSuperset(b) {
+		t.Error("set a should not be a superset of set b because set a has a 42")
+	}
+}
+
+func Test_UnsafeSetIsSuperSet(t *testing.T) {
+	a := NewThreadUnsafeSet()
+	a.Add(9)
+	a.Add(5)
+	a.Add(2)
+	a.Add(1)
+	a.Add(11)
+
+	b := NewThreadUnsafeSet()
+	b.Add(5)
+	b.Add(2)
+	b.Add(11)
+
+	if !a.IsSuperset(b) {
+		t.Error("set a should be a superset of set b")
+	}
+
+	b.Add(42)
+
+	if a.IsSuperset(b) {
+		t.Error("set a should not be a superset of set b because set a has a 42")
+	}
+}
+
+func Test_SetUnion(t *testing.T) {
+	a := NewSet()
+
+	b := NewSet()
+	b.Add(1)
+	b.Add(2)
+	b.Add(3)
+	b.Add(4)
+	b.Add(5)
+
+	c := a.Union(b)
+
+	if c.Cardinality() != 5 {
+		t.Error("set c is unioned with an empty set and therefore should have 5 elements in it")
+	}
+
+	d := NewSet()
+	d.Add(10)
+	d.Add(14)
+	d.Add(0)
+
+	e := c.Union(d)
+	if e.Cardinality() != 8 {
+		t.Error("set e should should have 8 elements in it after being unioned with set c to d")
+	}
+
+	f := NewSet()
+	f.Add(14)
+	f.Add(3)
+
+	g := f.Union(e)
+	if g.Cardinality() != 8 {
+		t.Error("set g should still ahve 8 elements in it after being unioned with set f that has duplicates")
+	}
+}
+
+func Test_UnsafeSetUnion(t *testing.T) {
+	a := NewThreadUnsafeSet()
+
+	b := NewThreadUnsafeSet()
+	b.Add(1)
+	b.Add(2)
+	b.Add(3)
+	b.Add(4)
+	b.Add(5)
+
+	c := a.Union(b)
+
+	if c.Cardinality() != 5 {
+		t.Error("set c is unioned with an empty set and therefore should have 5 elements in it")
+	}
+
+	d := NewThreadUnsafeSet()
+	d.Add(10)
+	d.Add(14)
+	d.Add(0)
+
+	e := c.Union(d)
+	if e.Cardinality() != 8 {
+		t.Error("set e should should have 8 elements in it after being unioned with set c to d")
+	}
+
+	f := NewThreadUnsafeSet()
+	f.Add(14)
+	f.Add(3)
+
+	g := f.Union(e)
+	if g.Cardinality() != 8 {
+		t.Error("set g should still ahve 8 elements in it after being unioned with set f that has duplicates")
+	}
+}
+
+func Test_SetIntersect(t *testing.T) {
+	a := NewSet()
+	a.Add(1)
+	a.Add(3)
+	a.Add(5)
+
+	b := NewSet()
+	a.Add(2)
+	a.Add(4)
+	a.Add(6)
+
+	c := a.Intersect(b)
+
+	if c.Cardinality() != 0 {
+		t.Error("set c should be the empty set because there is no common items to intersect")
+	}
+
+	a.Add(10)
+	b.Add(10)
+
+	d := a.Intersect(b)
+
+	if !(d.Cardinality() == 1 && d.Contains(10)) {
+		t.Error("set d should have a size of 1 and contain the item 10")
+	}
+}
+
+func Test_UnsafeSetIntersect(t *testing.T) {
+	a := NewThreadUnsafeSet()
+	a.Add(1)
+	a.Add(3)
+	a.Add(5)
+
+	b := NewThreadUnsafeSet()
+	a.Add(2)
+	a.Add(4)
+	a.Add(6)
+
+	c := a.Intersect(b)
+
+	if c.Cardinality() != 0 {
+		t.Error("set c should be the empty set because there is no common items to intersect")
+	}
+
+	a.Add(10)
+	b.Add(10)
+
+	d := a.Intersect(b)
+
+	if !(d.Cardinality() == 1 && d.Contains(10)) {
+		t.Error("set d should have a size of 1 and contain the item 10")
+	}
+}
+
+func Test_SetDifference(t *testing.T) {
+	a := NewSet()
+	a.Add(1)
+	a.Add(2)
+	a.Add(3)
+
+	b := NewSet()
+	b.Add(1)
+	b.Add(3)
+	b.Add(4)
+	b.Add(5)
+	b.Add(6)
+	b.Add(99)
+
+	c := a.Difference(b)
+
+	if !(c.Cardinality() == 1 && c.Contains(2)) {
+		t.Error("the difference of set a to b is the set of 1 item: 2")
+	}
+}
+
+func Test_UnsafeSetDifference(t *testing.T) {
+	a := NewThreadUnsafeSet()
+	a.Add(1)
+	a.Add(2)
+	a.Add(3)
+
+	b := NewThreadUnsafeSet()
+	b.Add(1)
+	b.Add(3)
+	b.Add(4)
+	b.Add(5)
+	b.Add(6)
+	b.Add(99)
+
+	c := a.Difference(b)
+
+	if !(c.Cardinality() == 1 && c.Contains(2)) {
+		t.Error("the difference of set a to b is the set of 1 item: 2")
+	}
+}
+
+func Test_SetSymmetricDifference(t *testing.T) {
+	a := NewSet()
+	a.Add(1)
+	a.Add(2)
+	a.Add(3)
+	a.Add(45)
+
+	b := NewSet()
+	b.Add(1)
+	b.Add(3)
+	b.Add(4)
+	b.Add(5)
+	b.Add(6)
+	b.Add(99)
+
+	c := a.SymmetricDifference(b)
+
+	if !(c.Cardinality() == 6 && c.Contains(2) && c.Contains(45) && c.Contains(4) && c.Contains(5) && c.Contains(6) && c.Contains(99)) {
+		t.Error("the symmetric difference of set a to b is the set of 6 items: 2, 45, 4, 5, 6, 99")
+	}
+}
+
+func Test_UnsafeSetSymmetricDifference(t *testing.T) {
+	a := NewThreadUnsafeSet()
+	a.Add(1)
+	a.Add(2)
+	a.Add(3)
+	a.Add(45)
+
+	b := NewThreadUnsafeSet()
+	b.Add(1)
+	b.Add(3)
+	b.Add(4)
+	b.Add(5)
+	b.Add(6)
+	b.Add(99)
+
+	c := a.SymmetricDifference(b)
+
+	if !(c.Cardinality() == 6 && c.Contains(2) && c.Contains(45) && c.Contains(4) && c.Contains(5) && c.Contains(6) && c.Contains(99)) {
+		t.Error("the symmetric difference of set a to b is the set of 6 items: 2, 45, 4, 5, 6, 99")
+	}
+}
+
+func Test_SetEqual(t *testing.T) {
+	a := NewSet()
+	b := NewSet()
+
+	if !a.Equal(b) {
+		t.Error("Both a and b are empty sets, and should be equal")
+	}
+
+	a.Add(10)
+
+	if a.Equal(b) {
+		t.Error("a should not be equal to b because b is empty and a has item 1 in it")
+	}
+
+	b.Add(10)
+
+	if !a.Equal(b) {
+		t.Error("a is now equal again to b because both have the item 10 in them")
+	}
+
+	b.Add(8)
+	b.Add(3)
+	b.Add(47)
+
+	if a.Equal(b) {
+		t.Error("b has 3 more elements in it so therefore should not be equal to a")
+	}
+
+	a.Add(8)
+	a.Add(3)
+	a.Add(47)
+
+	if !a.Equal(b) {
+		t.Error("a and b should be equal with the same number of elements")
+	}
+}
+
+func Test_UnsafeSetEqual(t *testing.T) {
+	a := NewThreadUnsafeSet()
+	b := NewThreadUnsafeSet()
+
+	if !a.Equal(b) {
+		t.Error("Both a and b are empty sets, and should be equal")
+	}
+
+	a.Add(10)
+
+	if a.Equal(b) {
+		t.Error("a should not be equal to b because b is empty and a has item 1 in it")
+	}
+
+	b.Add(10)
+
+	if !a.Equal(b) {
+		t.Error("a is now equal again to b because both have the item 10 in them")
+	}
+
+	b.Add(8)
+	b.Add(3)
+	b.Add(47)
+
+	if a.Equal(b) {
+		t.Error("b has 3 more elements in it so therefore should not be equal to a")
+	}
+
+	a.Add(8)
+	a.Add(3)
+	a.Add(47)
+
+	if !a.Equal(b) {
+		t.Error("a and b should be equal with the same number of elements")
+	}
+}
+
+func Test_SetClone(t *testing.T) {
+	a := NewSet()
+	a.Add(1)
+	a.Add(2)
+
+	b := a.Clone()
+
+	if !a.Equal(b) {
+		t.Error("Clones should be equal")
+	}
+
+	a.Add(3)
+	if a.Equal(b) {
+		t.Error("a contains one more element, they should not be equal")
+	}
+
+	c := a.Clone()
+	c.Remove(1)
+
+	if a.Equal(c) {
+		t.Error("C contains one element less, they should not be equal")
+	}
+}
+
+func Test_UnsafeSetClone(t *testing.T) {
+	a := NewThreadUnsafeSet()
+	a.Add(1)
+	a.Add(2)
+
+	b := a.Clone()
+
+	if !a.Equal(b) {
+		t.Error("Clones should be equal")
+	}
+
+	a.Add(3)
+	if a.Equal(b) {
+		t.Error("a contains one more element, they should not be equal")
+	}
+
+	c := a.Clone()
+	c.Remove(1)
+
+	if a.Equal(c) {
+		t.Error("C contains one element less, they should not be equal")
+	}
+}
+
+func Test_Iterator(t *testing.T) {
+	a := NewSet()
+
+	a.Add("Z")
+	a.Add("Y")
+	a.Add("X")
+	a.Add("W")
+
+	b := NewSet()
+	for val := range a.Iter() {
+		b.Add(val)
+	}
+
+	if !a.Equal(b) {
+		t.Error("The sets are not equal after iterating through the first set")
+	}
+}
+
+func Test_UnsafeIterator(t *testing.T) {
+	a := NewThreadUnsafeSet()
+
+	a.Add("Z")
+	a.Add("Y")
+	a.Add("X")
+	a.Add("W")
+
+	b := NewThreadUnsafeSet()
+	for val := range a.Iter() {
+		b.Add(val)
+	}
+
+	if !a.Equal(b) {
+		t.Error("The sets are not equal after iterating through the first set")
+	}
+}
+
+func Test_PowerSet(t *testing.T) {
+	a := NewThreadUnsafeSet()
+
+	a.Add(1)
+	a.Add("delta")
+	a.Add("chi")
+	a.Add(4)
+
+	b := a.PowerSet()
+	if b.Cardinality() != 16 {
+		t.Error("unexpected PowerSet cardinality")
+	}
+}
+
+func Test_EmptySetProperties(t *testing.T) {
+	empty := NewSet()
+
+	a := NewSet()
+	a.Add(1)
+	a.Add("foo")
+	a.Add("bar")
+
+	b := NewSet()
+	b.Add("one")
+	b.Add("two")
+	b.Add(3)
+	b.Add(4)
+
+	c := NewSet()
+
+	if !empty.IsSubset(a) || !empty.IsSubset(b) {
+		t.Error("The empty set is supposed to be a subset of all sets")
+	}
+
+	if !a.IsSuperset(empty) || !b.IsSuperset(empty) {
+		t.Error("All sets are supposed to be a superset of the empty set")
+	}
+
+	if !empty.IsSubset(empty) || !empty.IsSuperset(empty) {
+		t.Error("The empty set is supposed to be a subset and a superset of itself")
+	}
+
+	c = a.Union(empty)
+	if !c.Equal(a) {
+		t.Error("The union of any set with the empty set is supposed to be equal to itself")
+	}
+
+	c = a.Intersect(empty)
+	if !c.Equal(empty) {
+		t.Error("The intesection of any set with the empty set is supposed to be the empty set")
+	}
+
+	c = a.CartesianProduct(empty)
+	if c.Cardinality() != 0 {
+		t.Error("Cartesian product of any set and the empty set must be the empty set")
+	}
+
+	if empty.Cardinality() != 0 {
+		t.Error("Cardinality of the empty set is supposed to be zero")
+	}
+
+	c = empty.PowerSet()
+	if c.Cardinality() != 1 {
+		t.Error("Cardinality of the power set of the empty set is supposed to be one { {} }")
+	}
+}
+
+func Test_CartesianProduct(t *testing.T) {
+	a := NewThreadUnsafeSet()
+	b := NewThreadUnsafeSet()
+	empty := NewThreadUnsafeSet()
+
+	a.Add(1)
+	a.Add(2)
+	a.Add(3)
+
+	b.Add("one")
+	b.Add("two")
+	b.Add("three")
+	b.Add("alpha")
+	b.Add("gamma")
+
+	c := a.CartesianProduct(b)
+	d := b.CartesianProduct(a)
+
+	if c.Cardinality() != d.Cardinality() {
+		t.Error("Cardinality of AxB must be equal to BxA")
+	}
+
+	if c.Cardinality() != (a.Cardinality() * b.Cardinality()) {
+		t.Error("Unexpected cardinality for cartesian product set")
+	}
+
+	c = a.CartesianProduct(empty)
+	d = empty.CartesianProduct(b)
+
+	if c.Cardinality() != 0 || d.Cardinality() != 0 {
+		t.Error("Cartesian product of any set and the emtpy set Ax0 || 0xA must be the empty set")
+	}
+}
+
+func Test_ToSliceUnthreadsafe(t *testing.T) {
+	s := makeUnsafeSet([]int{1, 2, 3})
+	setAsSlice := s.ToSlice()
+	if len(setAsSlice) != s.Cardinality() {
+		t.Errorf("Set length is incorrect: %v", len(setAsSlice))
+	}
+
+	for _, i := range setAsSlice {
+		if !s.Contains(i) {
+			t.Errorf("Set is missing element: %v", i)
+		}
+	}
+}
+
+func Test_Example(t *testing.T) {
+	/*
+	   requiredClasses := NewSet()
+	   requiredClasses.Add("Cooking")
+	   requiredClasses.Add("English")
+	   requiredClasses.Add("Math")
+	   requiredClasses.Add("Biology")
+
+	   scienceSlice := []interface{}{"Biology", "Chemistry"}
+	   scienceClasses := NewSetFromSlice(scienceSlice)
+
+	   electiveClasses := NewSet()
+	   electiveClasses.Add("Welding")
+	   electiveClasses.Add("Music")
+	   electiveClasses.Add("Automotive")
+
+	   bonusClasses := NewSet()
+	   bonusClasses.Add("Go Programming")
+	   bonusClasses.Add("Python Programming")
+
+	   //Show me all the available classes I can take
+	   allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses)
+	   fmt.Println(allClasses) //Set{English, Chemistry, Automotive, Cooking, Math, Biology, Welding, Music, Go Programming}
+
+	   //Is cooking considered a science class?
+	   fmt.Println(scienceClasses.Contains("Cooking")) //false
+
+	   //Show me all classes that are not science classes, since I hate science.
+	   fmt.Println(allClasses.Difference(scienceClasses)) //Set{English, Automotive, Cooking, Math, Welding, Music, Go Programming}
+
+	   //Which science classes are also required classes?
+	   fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology}
+
+	   //How many bonus classes do you offer?
+	   fmt.Println(bonusClasses.Cardinality()) //2
+
+	   //Do you have the following classes? Welding, Automotive and English?
+	   fmt.Println(allClasses.ContainsAll("Welding", "Automotive", "English"))
+	*/
+}

+ 204 - 0
libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/threadsafe.go

@@ -0,0 +1,204 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package mapset
+
+import "sync"
+
+type threadSafeSet struct {
+	s threadUnsafeSet
+	sync.RWMutex
+}
+
+func newThreadSafeSet() threadSafeSet {
+	return threadSafeSet{s: newThreadUnsafeSet()}
+}
+
+func (set *threadSafeSet) Add(i interface{}) bool {
+	set.Lock()
+	ret := set.s.Add(i)
+	set.Unlock()
+	return ret
+}
+
+func (set *threadSafeSet) Contains(i ...interface{}) bool {
+	set.RLock()
+	ret := set.s.Contains(i...)
+	set.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) IsSubset(other Set) bool {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	ret := set.s.IsSubset(&o.s)
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) IsSuperset(other Set) bool {
+	return other.IsSubset(set)
+}
+
+func (set *threadSafeSet) Union(other Set) Set {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet)
+	ret := &threadSafeSet{s: *unsafeUnion}
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) Intersect(other Set) Set {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet)
+	ret := &threadSafeSet{s: *unsafeIntersection}
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) Difference(other Set) Set {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet)
+	ret := &threadSafeSet{s: *unsafeDifference}
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) SymmetricDifference(other Set) Set {
+	o := other.(*threadSafeSet)
+
+	unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet)
+	return &threadSafeSet{s: *unsafeDifference}
+}
+
+func (set *threadSafeSet) Clear() {
+	set.Lock()
+	set.s = newThreadUnsafeSet()
+	set.Unlock()
+}
+
+func (set *threadSafeSet) Remove(i interface{}) {
+	set.Lock()
+	delete(set.s, i)
+	set.Unlock()
+}
+
+func (set *threadSafeSet) Cardinality() int {
+	set.RLock()
+	defer set.RUnlock()
+	return len(set.s)
+}
+
+func (set *threadSafeSet) Iter() <-chan interface{} {
+	ch := make(chan interface{})
+	go func() {
+		set.RLock()
+
+		for elem := range set.s {
+			ch <- elem
+		}
+		close(ch)
+		set.RUnlock()
+	}()
+
+	return ch
+}
+
+func (set *threadSafeSet) Equal(other Set) bool {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	ret := set.s.Equal(&o.s)
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) Clone() Set {
+	set.RLock()
+
+	unsafeClone := set.s.Clone().(*threadUnsafeSet)
+	ret := &threadSafeSet{s: *unsafeClone}
+	set.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) String() string {
+	set.RLock()
+	ret := set.s.String()
+	set.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) PowerSet() Set {
+	set.RLock()
+	ret := set.s.PowerSet()
+	set.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) CartesianProduct(other Set) Set {
+	o := other.(*threadSafeSet)
+
+	set.RLock()
+	o.RLock()
+
+	unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet)
+	ret := &threadSafeSet{s: *unsafeCartProduct}
+	set.RUnlock()
+	o.RUnlock()
+	return ret
+}
+
+func (set *threadSafeSet) ToSlice() []interface{} {
+	set.RLock()
+	keys := make([]interface{}, 0, set.Cardinality())
+	for elem := range set.s {
+		keys = append(keys, elem)
+	}
+	set.RUnlock()
+	return keys
+}

+ 376 - 0
libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/threadsafe_test.go

@@ -0,0 +1,376 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package mapset
+
+import (
+	"math/rand"
+	"runtime"
+	"sync"
+	"testing"
+)
+
+const N = 1000
+
+func Test_AddConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s := NewSet()
+	ints := rand.Perm(N)
+
+	var wg sync.WaitGroup
+	wg.Add(len(ints))
+	for i := 0; i < len(ints); i++ {
+		go func(i int) {
+			s.Add(i)
+			wg.Done()
+		}(i)
+	}
+
+	wg.Wait()
+	for _, i := range ints {
+		if !s.Contains(i) {
+			t.Errorf("Set is missing element: %v", i)
+		}
+	}
+}
+
+func Test_CardinalityConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s := NewSet()
+
+	var wg sync.WaitGroup
+	wg.Add(1)
+	go func() {
+		elems := s.Cardinality()
+		for i := 0; i < N; i++ {
+			newElems := s.Cardinality()
+			if newElems < elems {
+				t.Errorf("Cardinality shrunk from %v to %v", elems, newElems)
+			}
+		}
+		wg.Done()
+	}()
+
+	for i := 0; i < N; i++ {
+		s.Add(rand.Int())
+	}
+	wg.Wait()
+}
+
+func Test_ClearConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s := NewSet()
+	ints := rand.Perm(N)
+
+	var wg sync.WaitGroup
+	wg.Add(len(ints))
+	for i := 0; i < len(ints); i++ {
+		go func() {
+			s.Clear()
+			wg.Done()
+		}()
+		go func(i int) {
+			s.Add(i)
+		}(i)
+	}
+
+	wg.Wait()
+}
+
+func Test_CloneConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s := NewSet()
+	ints := rand.Perm(N)
+
+	for _, v := range ints {
+		s.Add(v)
+	}
+
+	var wg sync.WaitGroup
+	wg.Add(len(ints))
+	for i := range ints {
+		go func(i int) {
+			s.Remove(i)
+			wg.Done()
+		}(i)
+	}
+
+	s.Clone()
+}
+
+func Test_ContainsConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s := NewSet()
+	ints := rand.Perm(N)
+	interfaces := make([]interface{}, 0)
+	for _, v := range ints {
+		s.Add(v)
+		interfaces = append(interfaces, v)
+	}
+
+	var wg sync.WaitGroup
+	for _ = range ints {
+		go func() {
+			s.Contains(interfaces...)
+		}()
+	}
+	wg.Wait()
+}
+
+func Test_DifferenceConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s, ss := NewSet(), NewSet()
+	ints := rand.Perm(N)
+	interfaces := make([]interface{}, 0)
+	for _, v := range ints {
+		s.Add(v)
+		ss.Add(v)
+		interfaces = append(interfaces, v)
+	}
+
+	var wg sync.WaitGroup
+	for _ = range ints {
+		go func() {
+			s.Difference(ss)
+		}()
+	}
+	wg.Wait()
+}
+
+func Test_EqualConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s, ss := NewSet(), NewSet()
+	ints := rand.Perm(N)
+	interfaces := make([]interface{}, 0)
+	for _, v := range ints {
+		s.Add(v)
+		ss.Add(v)
+		interfaces = append(interfaces, v)
+	}
+
+	var wg sync.WaitGroup
+	for _ = range ints {
+		go func() {
+			s.Equal(ss)
+		}()
+	}
+	wg.Wait()
+}
+
+func Test_IntersectConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s, ss := NewSet(), NewSet()
+	ints := rand.Perm(N)
+	interfaces := make([]interface{}, 0)
+	for _, v := range ints {
+		s.Add(v)
+		ss.Add(v)
+		interfaces = append(interfaces, v)
+	}
+
+	var wg sync.WaitGroup
+	for _ = range ints {
+		go func() {
+			s.Intersect(ss)
+		}()
+	}
+	wg.Wait()
+}
+
+func Test_IsSubsetConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s, ss := NewSet(), NewSet()
+	ints := rand.Perm(N)
+	interfaces := make([]interface{}, 0)
+	for _, v := range ints {
+		s.Add(v)
+		ss.Add(v)
+		interfaces = append(interfaces, v)
+	}
+
+	var wg sync.WaitGroup
+	for _ = range ints {
+		go func() {
+			s.IsSubset(ss)
+		}()
+	}
+	wg.Wait()
+}
+
+func Test_IsSupersetConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s, ss := NewSet(), NewSet()
+	ints := rand.Perm(N)
+	interfaces := make([]interface{}, 0)
+	for _, v := range ints {
+		s.Add(v)
+		ss.Add(v)
+		interfaces = append(interfaces, v)
+	}
+
+	var wg sync.WaitGroup
+	for _ = range ints {
+		go func() {
+			s.IsSuperset(ss)
+		}()
+	}
+	wg.Wait()
+}
+
+func Test_IterConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s := NewSet()
+	ints := rand.Perm(N)
+	for _, v := range ints {
+		s.Add(v)
+	}
+
+	cs := make([]<-chan interface{}, 0)
+	for _ = range ints {
+		cs = append(cs, s.Iter())
+	}
+
+	c := make(chan interface{})
+	go func() {
+		for n := 0; n < len(ints)*N; {
+			for _, d := range cs {
+				select {
+				case <-d:
+					n++
+					c <- nil
+				default:
+				}
+			}
+		}
+		close(c)
+	}()
+
+	for _ = range c {
+	}
+}
+
+func Test_RemoveConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s := NewSet()
+	ints := rand.Perm(N)
+	for _, v := range ints {
+		s.Add(v)
+	}
+
+	var wg sync.WaitGroup
+	wg.Add(len(ints))
+	for _, v := range ints {
+		go func(i int) {
+			s.Remove(i)
+			wg.Done()
+		}(v)
+	}
+	wg.Wait()
+
+	if s.Cardinality() != 0 {
+		t.Errorf("Expected cardinality 0; got %v", s.Cardinality())
+	}
+}
+
+func Test_StringConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s := NewSet()
+	ints := rand.Perm(N)
+	for _, v := range ints {
+		s.Add(v)
+	}
+
+	var wg sync.WaitGroup
+	wg.Add(len(ints))
+	for _ = range ints {
+		go func() {
+			s.String()
+			wg.Done()
+		}()
+	}
+	wg.Wait()
+}
+
+func Test_SymmetricDifferenceConcurrent(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s, ss := NewSet(), NewSet()
+	ints := rand.Perm(N)
+	interfaces := make([]interface{}, 0)
+	for _, v := range ints {
+		s.Add(v)
+		ss.Add(v)
+		interfaces = append(interfaces, v)
+	}
+
+	var wg sync.WaitGroup
+	for _ = range ints {
+		go func() {
+			s.SymmetricDifference(ss)
+		}()
+	}
+	wg.Wait()
+}
+
+func Test_ToSlice(t *testing.T) {
+	runtime.GOMAXPROCS(2)
+
+	s := NewSet()
+	ints := rand.Perm(N)
+
+	var wg sync.WaitGroup
+	wg.Add(len(ints))
+	for i := 0; i < len(ints); i++ {
+		go func(i int) {
+			s.Add(i)
+			wg.Done()
+		}(i)
+	}
+
+	wg.Wait()
+	setAsSlice := s.ToSlice()
+	if len(setAsSlice) != s.Cardinality() {
+		t.Errorf("Set length is incorrect: %v", len(setAsSlice))
+	}
+
+	for _, i := range setAsSlice {
+		if !s.Contains(i) {
+			t.Errorf("Set is missing element: %v", i)
+		}
+	}
+}

+ 246 - 0
libnetwork/Godeps/_workspace/src/github.com/deckarep/golang-set/threadunsafe.go

@@ -0,0 +1,246 @@
+/*
+Open Source Initiative OSI - The MIT License (MIT):Licensing
+
+The MIT License (MIT)
+Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package mapset
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+type threadUnsafeSet map[interface{}]struct{}
+
+type orderedPair struct {
+	first  interface{}
+	second interface{}
+}
+
+func newThreadUnsafeSet() threadUnsafeSet {
+	return make(threadUnsafeSet)
+}
+
+func (pair *orderedPair) Equal(other orderedPair) bool {
+	if pair.first == other.first &&
+		pair.second == other.second {
+		return true
+	}
+
+	return false
+}
+
+func (set *threadUnsafeSet) Add(i interface{}) bool {
+	_, found := (*set)[i]
+	(*set)[i] = struct{}{}
+	return !found //False if it existed already
+}
+
+func (set *threadUnsafeSet) Contains(i ...interface{}) bool {
+	for _, val := range i {
+		if _, ok := (*set)[val]; !ok {
+			return false
+		}
+	}
+	return true
+}
+
+func (set *threadUnsafeSet) IsSubset(other Set) bool {
+	_ = other.(*threadUnsafeSet)
+	for elem := range *set {
+		if !other.Contains(elem) {
+			return false
+		}
+	}
+	return true
+}
+
+func (set *threadUnsafeSet) IsSuperset(other Set) bool {
+	return other.IsSubset(set)
+}
+
+func (set *threadUnsafeSet) Union(other Set) Set {
+	o := other.(*threadUnsafeSet)
+
+	unionedSet := newThreadUnsafeSet()
+
+	for elem := range *set {
+		unionedSet.Add(elem)
+	}
+	for elem := range *o {
+		unionedSet.Add(elem)
+	}
+	return &unionedSet
+}
+
+func (set *threadUnsafeSet) Intersect(other Set) Set {
+	o := other.(*threadUnsafeSet)
+
+	intersection := newThreadUnsafeSet()
+	// loop over smaller set
+	if set.Cardinality() < other.Cardinality() {
+		for elem := range *set {
+			if other.Contains(elem) {
+				intersection.Add(elem)
+			}
+		}
+	} else {
+		for elem := range *o {
+			if set.Contains(elem) {
+				intersection.Add(elem)
+			}
+		}
+	}
+	return &intersection
+}
+
+func (set *threadUnsafeSet) Difference(other Set) Set {
+	_ = other.(*threadUnsafeSet)
+
+	difference := newThreadUnsafeSet()
+	for elem := range *set {
+		if !other.Contains(elem) {
+			difference.Add(elem)
+		}
+	}
+	return &difference
+}
+
+func (set *threadUnsafeSet) SymmetricDifference(other Set) Set {
+	_ = other.(*threadUnsafeSet)
+
+	aDiff := set.Difference(other)
+	bDiff := other.Difference(set)
+	return aDiff.Union(bDiff)
+}
+
+func (set *threadUnsafeSet) Clear() {
+	*set = newThreadUnsafeSet()
+}
+
+func (set *threadUnsafeSet) Remove(i interface{}) {
+	delete(*set, i)
+}
+
+func (set *threadUnsafeSet) Cardinality() int {
+	return len(*set)
+}
+
+func (set *threadUnsafeSet) Iter() <-chan interface{} {
+	ch := make(chan interface{})
+	go func() {
+		for elem := range *set {
+			ch <- elem
+		}
+		close(ch)
+	}()
+
+	return ch
+}
+
+func (set *threadUnsafeSet) Equal(other Set) bool {
+	_ = other.(*threadUnsafeSet)
+
+	if set.Cardinality() != other.Cardinality() {
+		return false
+	}
+	for elem := range *set {
+		if !other.Contains(elem) {
+			return false
+		}
+	}
+	return true
+}
+
+func (set *threadUnsafeSet) Clone() Set {
+	clonedSet := newThreadUnsafeSet()
+	for elem := range *set {
+		clonedSet.Add(elem)
+	}
+	return &clonedSet
+}
+
+func (set *threadUnsafeSet) String() string {
+	items := make([]string, 0, len(*set))
+
+	for elem := range *set {
+		items = append(items, fmt.Sprintf("%v", elem))
+	}
+	return fmt.Sprintf("Set{%s}", strings.Join(items, ", "))
+}
+
+func (pair orderedPair) String() string {
+	return fmt.Sprintf("(%v, %v)", pair.first, pair.second)
+}
+
+func (set *threadUnsafeSet) PowerSet() Set {
+	powSet := NewThreadUnsafeSet()
+	nullset := newThreadUnsafeSet()
+	powSet.Add(&nullset)
+
+	for es := range *set {
+		u := newThreadUnsafeSet()
+		j := powSet.Iter()
+		for er := range j {
+			p := newThreadUnsafeSet()
+			if reflect.TypeOf(er).Name() == "" {
+				k := er.(*threadUnsafeSet)
+				for ek := range *(k) {
+					p.Add(ek)
+				}
+			} else {
+				p.Add(er)
+			}
+			p.Add(es)
+			u.Add(&p)
+		}
+
+		powSet = powSet.Union(&u)
+	}
+
+	return powSet
+}
+
+func (set *threadUnsafeSet) CartesianProduct(other Set) Set {
+	o := other.(*threadUnsafeSet)
+	cartProduct := NewThreadUnsafeSet()
+
+	for i := range *set {
+		for j := range *o {
+			elem := orderedPair{first: i, second: j}
+			cartProduct.Add(elem)
+		}
+	}
+
+	return cartProduct
+}
+
+func (set *threadUnsafeSet) ToSlice() []interface{} {
+	keys := make([]interface{}, 0, set.Cardinality())
+	for elem := range *set {
+		keys = append(keys, elem)
+	}
+
+	return keys
+}

+ 274 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/README.md

@@ -0,0 +1,274 @@
+---
+page_title: Docker Swarm discovery
+page_description: Swarm discovery
+page_keywords: docker, swarm, clustering, discovery
+---
+
+# Discovery
+
+Docker Swarm comes with multiple Discovery backends.
+
+## Backends
+
+### Hosted Discovery with Docker Hub
+
+First we create a cluster.
+
+```bash
+# create a cluster
+$ swarm create
+6856663cdefdec325839a4b7e1de38e8 # <- this is your unique <cluster_id>
+```
+
+Then we create each node and join them to the cluster.
+
+```bash
+# on each of your nodes, start the swarm agent
+#  <node_ip> doesn't have to be public (eg. 192.168.0.X),
+#  as long as the swarm manager can access it.
+$ swarm join --addr=<node_ip:2375> token://<cluster_id>
+```
+
+Finally, we start the Swarm manager. This can be on any machine or even
+your laptop.
+
+```bash
+$ swarm manage -H tcp://<swarm_ip:swarm_port> token://<cluster_id>
+```
+
+You can then use regular Docker commands to interact with your swarm.
+
+```bash
+docker -H tcp://<swarm_ip:swarm_port> info
+docker -H tcp://<swarm_ip:swarm_port> run ...
+docker -H tcp://<swarm_ip:swarm_port> ps
+docker -H tcp://<swarm_ip:swarm_port> logs ...
+...
+```
+
+You can also list the nodes in your cluster.
+
+```bash
+swarm list token://<cluster_id>
+<node_ip:2375>
+```
+
+### Using a static file describing the cluster
+
+For each of your nodes, add a line to a file. The node IP address
+doesn't need to be public as long the Swarm manager can access it.
+
+```bash
+echo <node_ip1:2375> >> /tmp/my_cluster
+echo <node_ip2:2375> >> /tmp/my_cluster
+echo <node_ip3:2375> >> /tmp/my_cluster
+```
+
+Then start the Swarm manager on any machine.
+
+```bash
+swarm manage -H tcp://<swarm_ip:swarm_port> file:///tmp/my_cluster
+```
+
+And then use the regular Docker commands.
+
+```bash
+docker -H tcp://<swarm_ip:swarm_port> info
+docker -H tcp://<swarm_ip:swarm_port> run ...
+docker -H tcp://<swarm_ip:swarm_port> ps
+docker -H tcp://<swarm_ip:swarm_port> logs ...
+...
+```
+
+You can list the nodes in your cluster.
+
+```bash
+$ swarm list file:///tmp/my_cluster
+<node_ip1:2375>
+<node_ip2:2375>
+<node_ip3:2375>
+```
+
+### Using etcd
+
+On each of your nodes, start the Swarm agent. The node IP address
+doesn't have to be public as long as the swarm manager can access it.
+
+```bash
+swarm join --addr=<node_ip:2375> etcd://<etcd_ip>/<path>
+```
+
+Start the manager on any machine or your laptop.
+
+```bash
+swarm manage -H tcp://<swarm_ip:swarm_port> etcd://<etcd_ip>/<path>
+```
+
+And then use the regular Docker commands.
+
+```bash
+docker -H tcp://<swarm_ip:swarm_port> info
+docker -H tcp://<swarm_ip:swarm_port> run ...
+docker -H tcp://<swarm_ip:swarm_port> ps
+docker -H tcp://<swarm_ip:swarm_port> logs ...
+...
+```
+
+You can list the nodes in your cluster.
+
+```bash
+swarm list etcd://<etcd_ip>/<path>
+<node_ip:2375>
+```
+
+### Using consul
+
+On each of your nodes, start the Swarm agent. The node IP address
+doesn't need to be public as long as the Swarm manager can access it.
+
+```bash
+swarm join --addr=<node_ip:2375> consul://<consul_addr>/<path>
+```
+
+Start the manager on any machine or your laptop.
+
+```bash
+swarm manage -H tcp://<swarm_ip:swarm_port> consul://<consul_addr>/<path>
+```
+
+And then use the regular Docker commands.
+
+```bash
+docker -H tcp://<swarm_ip:swarm_port> info
+docker -H tcp://<swarm_ip:swarm_port> run ...
+docker -H tcp://<swarm_ip:swarm_port> ps
+docker -H tcp://<swarm_ip:swarm_port> logs ...
+...
+```
+
+You can list the nodes in your cluster.
+
+```bash
+swarm list consul://<consul_addr>/<path>
+<node_ip:2375>
+```
+
+### Using zookeeper
+
+On each of your nodes, start the Swarm agent. The node IP doesn't have
+to be public as long as the swarm manager can access it.
+
+```bash
+swarm join --addr=<node_ip:2375> zk://<zookeeper_addr1>,<zookeeper_addr2>/<path>
+```
+
+Start the manager on any machine or your laptop.
+
+```bash
+swarm manage -H tcp://<swarm_ip:swarm_port> zk://<zookeeper_addr1>,<zookeeper_addr2>/<path>
+```
+
+You can then use the regular Docker commands.
+
+```bash
+docker -H tcp://<swarm_ip:swarm_port> info
+docker -H tcp://<swarm_ip:swarm_port> run ...
+docker -H tcp://<swarm_ip:swarm_port> ps
+docker -H tcp://<swarm_ip:swarm_port> logs ...
+...
+```
+
+You can list the nodes in the cluster.
+
+```bash
+swarm list zk://<zookeeper_addr1>,<zookeeper_addr2>/<path>
+<node_ip:2375>
+```
+
+### Using a static list of IP addresses
+
+Start the manager on any machine or your laptop
+
+```bash
+swarm manage -H <swarm_ip:swarm_port> nodes://<node_ip1:2375>,<node_ip2:2375>
+```
+
+Or
+
+```bash
+swarm manage -H <swarm_ip:swarm_port> <node_ip1:2375>,<node_ip2:2375>
+```
+
+Then use the regular Docker commands.
+
+```bash
+docker -H <swarm_ip:swarm_port> info
+docker -H <swarm_ip:swarm_port> run ...
+docker -H <swarm_ip:swarm_port> ps
+docker -H <swarm_ip:swarm_port> logs ...
+...
+```
+
+### Range pattern for IP addresses
+
+The `file` and `nodes` discoveries support a range pattern to specify IP
+addresses, i.e., `10.0.0.[10:200]` will be a list of nodes starting from
+`10.0.0.10` to `10.0.0.200`.
+
+For example for the `file` discovery method.
+
+```bash
+$ echo "10.0.0.[11:100]:2375"   >> /tmp/my_cluster
+$ echo "10.0.1.[15:20]:2375"    >> /tmp/my_cluster
+$ echo "192.168.1.2:[2:20]375"  >> /tmp/my_cluster
+```
+
+Then start the manager.
+
+```bash
+swarm manage -H tcp://<swarm_ip:swarm_port> file:///tmp/my_cluster
+```
+
+And for the `nodes` discovery method.
+
+```bash
+swarm manage -H <swarm_ip:swarm_port> "nodes://10.0.0.[10:200]:2375,10.0.1.[2:250]:2375"
+```
+
+## Contributing a new discovery backend
+
+Contributing a new discovery backend is easy, simply implement this
+interface:
+
+```go
+type Discovery interface {
+     Initialize(string, int) error
+     Fetch() ([]string, error)
+     Watch(WatchCallback)
+     Register(string) error
+}
+```
+
+### Initialize
+
+The parameters are `discovery` location without the scheme and a heartbeat (in seconds).
+
+### Fetch
+
+Returns the list of all the nodes from the discovery.
+
+### Watch
+
+Triggers an update (`Fetch`). This can happen either via a timer (like
+`token`) or use backend specific features (like `etcd`).
+
+### Register
+
+Add a new node to the discovery service.
+
+## Docker Swarm documentation index
+
+- [User guide](./index.md)
+- [Sheduler strategies](./scheduler/strategy.md)
+- [Sheduler filters](./scheduler/filter.md)
+- [Swarm API](./API.md)

+ 106 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery.go

@@ -0,0 +1,106 @@
+package discovery
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"strings"
+
+	log "github.com/Sirupsen/logrus"
+)
+
+// Entry is exported
+type Entry struct {
+	Host string
+	Port string
+}
+
+// NewEntry is exported
+func NewEntry(url string) (*Entry, error) {
+	host, port, err := net.SplitHostPort(url)
+	if err != nil {
+		return nil, err
+	}
+	return &Entry{host, port}, nil
+}
+
+func (m Entry) String() string {
+	return fmt.Sprintf("%s:%s", m.Host, m.Port)
+}
+
+// WatchCallback is exported
+type WatchCallback func(entries []*Entry)
+
+// Discovery is exported
+type Discovery interface {
+	Initialize(string, uint64) error
+	Fetch() ([]*Entry, error)
+	Watch(WatchCallback)
+	Register(string) error
+}
+
+var (
+	discoveries map[string]Discovery
+	// ErrNotSupported is exported
+	ErrNotSupported = errors.New("discovery service not supported")
+	// ErrNotImplemented is exported
+	ErrNotImplemented = errors.New("not implemented in this discovery service")
+)
+
+func init() {
+	discoveries = make(map[string]Discovery)
+}
+
+// Register is exported
+func Register(scheme string, d Discovery) error {
+	if _, exists := discoveries[scheme]; exists {
+		return fmt.Errorf("scheme already registered %s", scheme)
+	}
+	log.WithField("name", scheme).Debug("Registering discovery service")
+	discoveries[scheme] = d
+
+	return nil
+}
+
+func parse(rawurl string) (string, string) {
+	parts := strings.SplitN(rawurl, "://", 2)
+
+	// nodes:port,node2:port => nodes://node1:port,node2:port
+	if len(parts) == 1 {
+		return "nodes", parts[0]
+	}
+	return parts[0], parts[1]
+}
+
+// New is exported
+func New(rawurl string, heartbeat uint64) (Discovery, error) {
+	scheme, uri := parse(rawurl)
+
+	if discovery, exists := discoveries[scheme]; exists {
+		log.WithFields(log.Fields{"name": scheme, "uri": uri}).Debug("Initializing discovery service")
+		err := discovery.Initialize(uri, heartbeat)
+		return discovery, err
+	}
+
+	return nil, ErrNotSupported
+}
+
+// CreateEntries is exported
+func CreateEntries(addrs []string) ([]*Entry, error) {
+	entries := []*Entry{}
+	if addrs == nil {
+		return entries, nil
+	}
+
+	for _, addr := range addrs {
+		if len(addr) == 0 {
+			continue
+		}
+		entry, err := NewEntry(addr)
+		if err != nil {
+			return nil, err
+		}
+		entries = append(entries, entry)
+	}
+	return entries, nil
+}

+ 54 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery_test.go

@@ -0,0 +1,54 @@
+package discovery
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestNewEntry(t *testing.T) {
+	entry, err := NewEntry("127.0.0.1:2375")
+	assert.Equal(t, entry.Host, "127.0.0.1")
+	assert.Equal(t, entry.Port, "2375")
+	assert.NoError(t, err)
+
+	_, err = NewEntry("127.0.0.1")
+	assert.Error(t, err)
+}
+
+func TestParse(t *testing.T) {
+	scheme, uri := parse("127.0.0.1:2375")
+	assert.Equal(t, scheme, "nodes")
+	assert.Equal(t, uri, "127.0.0.1:2375")
+
+	scheme, uri = parse("localhost:2375")
+	assert.Equal(t, scheme, "nodes")
+	assert.Equal(t, uri, "localhost:2375")
+
+	scheme, uri = parse("scheme://127.0.0.1:2375")
+	assert.Equal(t, scheme, "scheme")
+	assert.Equal(t, uri, "127.0.0.1:2375")
+
+	scheme, uri = parse("scheme://localhost:2375")
+	assert.Equal(t, scheme, "scheme")
+	assert.Equal(t, uri, "localhost:2375")
+
+	scheme, uri = parse("")
+	assert.Equal(t, scheme, "nodes")
+	assert.Equal(t, uri, "")
+}
+
+func TestCreateEntries(t *testing.T) {
+	entries, err := CreateEntries(nil)
+	assert.Equal(t, entries, []*Entry{})
+	assert.NoError(t, err)
+
+	entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""})
+	assert.Equal(t, len(entries), 2)
+	assert.Equal(t, entries[0].String(), "127.0.0.1:2375")
+	assert.Equal(t, entries[1].String(), "127.0.0.2:2375")
+	assert.NoError(t, err)
+
+	_, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"})
+	assert.Error(t, err)
+}

+ 71 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file.go

@@ -0,0 +1,71 @@
+package file
+
+import (
+	"io/ioutil"
+	"strings"
+	"time"
+
+	"github.com/docker/swarm/discovery"
+)
+
+// Discovery is exported
+type Discovery struct {
+	heartbeat uint64
+	path      string
+}
+
+func init() {
+	discovery.Register("file", &Discovery{})
+}
+
+// Initialize is exported
+func (s *Discovery) Initialize(path string, heartbeat uint64) error {
+	s.path = path
+	s.heartbeat = heartbeat
+	return nil
+}
+
+func parseFileContent(content []byte) []string {
+	var result []string
+	for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") {
+		line = strings.TrimSpace(line)
+		// Ignoring line starts with #
+		if strings.HasPrefix(line, "#") {
+			continue
+		}
+		// Inlined # comment also ignored.
+		if strings.Contains(line, "#") {
+			line = line[0:strings.Index(line, "#")]
+			// Trim additional spaces caused by above stripping.
+			line = strings.TrimSpace(line)
+		}
+		for _, ip := range discovery.Generate(line) {
+			result = append(result, ip)
+		}
+	}
+	return result
+}
+
+// Fetch is exported
+func (s *Discovery) Fetch() ([]*discovery.Entry, error) {
+	fileContent, err := ioutil.ReadFile(s.path)
+	if err != nil {
+		return nil, err
+	}
+	return discovery.CreateEntries(parseFileContent(fileContent))
+}
+
+// Watch is exported
+func (s *Discovery) Watch(callback discovery.WatchCallback) {
+	for _ = range time.Tick(time.Duration(s.heartbeat) * time.Second) {
+		entries, err := s.Fetch()
+		if err == nil {
+			callback(entries)
+		}
+	}
+}
+
+// Register is exported
+func (s *Discovery) Register(addr string) error {
+	return discovery.ErrNotImplemented
+}

+ 46 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file_test.go

@@ -0,0 +1,46 @@
+package file
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestInitialize(t *testing.T) {
+	discovery := &Discovery{}
+	discovery.Initialize("/path/to/file", 0)
+	assert.Equal(t, discovery.path, "/path/to/file")
+}
+
+func TestContent(t *testing.T) {
+	data := `
+1.1.1.[1:2]:1111
+2.2.2.[2:4]:2222
+`
+	ips := parseFileContent([]byte(data))
+	assert.Equal(t, ips[0], "1.1.1.1:1111")
+	assert.Equal(t, ips[1], "1.1.1.2:1111")
+	assert.Equal(t, ips[2], "2.2.2.2:2222")
+	assert.Equal(t, ips[3], "2.2.2.3:2222")
+	assert.Equal(t, ips[4], "2.2.2.4:2222")
+}
+
+func TestRegister(t *testing.T) {
+	discovery := &Discovery{path: "/path/to/file"}
+	assert.Error(t, discovery.Register("0.0.0.0"))
+}
+
+func TestParsingContentsWithComments(t *testing.T) {
+	data := `
+### test ###
+1.1.1.1:1111 # inline comment
+# 2.2.2.2:2222
+      ### empty line with comment
+    3.3.3.3:3333
+### test ###
+`
+	ips := parseFileContent([]byte(data))
+	assert.Equal(t, 2, len(ips))
+	assert.Equal(t, "1.1.1.1:1111", ips[0])
+	assert.Equal(t, "3.3.3.3:3333", ips[1])
+}

+ 35 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator.go

@@ -0,0 +1,35 @@
+package discovery
+
+import (
+	"fmt"
+	"regexp"
+	"strconv"
+)
+
+// Generate takes care of IP generation
+func Generate(pattern string) []string {
+	re, _ := regexp.Compile(`\[(.+):(.+)\]`)
+	submatch := re.FindStringSubmatch(pattern)
+	if submatch == nil {
+		return []string{pattern}
+	}
+
+	from, err := strconv.Atoi(submatch[1])
+	if err != nil {
+		return []string{pattern}
+	}
+	to, err := strconv.Atoi(submatch[2])
+	if err != nil {
+		return []string{pattern}
+	}
+
+	template := re.ReplaceAllString(pattern, "%d")
+
+	var result []string
+	for val := from; val <= to; val++ {
+		entry := fmt.Sprintf(template, val)
+		result = append(result, entry)
+	}
+
+	return result
+}

+ 55 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator_test.go

@@ -0,0 +1,55 @@
+package discovery
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestGeneratorNotGenerate(t *testing.T) {
+	ips := Generate("127.0.0.1")
+	assert.Equal(t, len(ips), 1)
+	assert.Equal(t, ips[0], "127.0.0.1")
+}
+
+func TestGeneratorWithPortNotGenerate(t *testing.T) {
+	ips := Generate("127.0.0.1:8080")
+	assert.Equal(t, len(ips), 1)
+	assert.Equal(t, ips[0], "127.0.0.1:8080")
+}
+
+func TestGeneratorMatchFailedNotGenerate(t *testing.T) {
+	ips := Generate("127.0.0.[1]")
+	assert.Equal(t, len(ips), 1)
+	assert.Equal(t, ips[0], "127.0.0.[1]")
+}
+
+func TestGeneratorWithPort(t *testing.T) {
+	ips := Generate("127.0.0.[1:11]:2375")
+	assert.Equal(t, len(ips), 11)
+	assert.Equal(t, ips[0], "127.0.0.1:2375")
+	assert.Equal(t, ips[1], "127.0.0.2:2375")
+	assert.Equal(t, ips[2], "127.0.0.3:2375")
+	assert.Equal(t, ips[3], "127.0.0.4:2375")
+	assert.Equal(t, ips[4], "127.0.0.5:2375")
+	assert.Equal(t, ips[5], "127.0.0.6:2375")
+	assert.Equal(t, ips[6], "127.0.0.7:2375")
+	assert.Equal(t, ips[7], "127.0.0.8:2375")
+	assert.Equal(t, ips[8], "127.0.0.9:2375")
+	assert.Equal(t, ips[9], "127.0.0.10:2375")
+	assert.Equal(t, ips[10], "127.0.0.11:2375")
+}
+
+func TestGenerateWithMalformedInputAtRangeStart(t *testing.T) {
+	malformedInput := "127.0.0.[x:11]:2375"
+	ips := Generate(malformedInput)
+	assert.Equal(t, len(ips), 1)
+	assert.Equal(t, ips[0], malformedInput)
+}
+
+func TestGenerateWithMalformedInputAtRangeEnd(t *testing.T) {
+	malformedInput := "127.0.0.[1:x]:2375"
+	ips := Generate(malformedInput)
+	assert.Equal(t, len(ips), 1)
+	assert.Equal(t, ips[0], malformedInput)
+}

+ 92 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv.go

@@ -0,0 +1,92 @@
+package kv
+
+import (
+	"fmt"
+	"path"
+	"strings"
+	"time"
+
+	"github.com/docker/swarm/discovery"
+	"github.com/docker/swarm/pkg/store"
+)
+
+// Discovery is exported
+type Discovery struct {
+	store     store.Store
+	name      string
+	heartbeat time.Duration
+	prefix    string
+}
+
+func init() {
+	discovery.Register("zk", &Discovery{name: "zk"})
+	discovery.Register("consul", &Discovery{name: "consul"})
+	discovery.Register("etcd", &Discovery{name: "etcd"})
+}
+
+// Initialize is exported
+func (s *Discovery) Initialize(uris string, heartbeat uint64) error {
+	var (
+		parts = strings.SplitN(uris, "/", 2)
+		ips   = strings.Split(parts[0], ",")
+		addrs []string
+		err   error
+	)
+
+	if len(parts) != 2 {
+		return fmt.Errorf("invalid format %q, missing <path>", uris)
+	}
+
+	for _, ip := range ips {
+		addrs = append(addrs, ip)
+	}
+
+	s.heartbeat = time.Duration(heartbeat) * time.Second
+	s.prefix = parts[1]
+
+	// Creates a new store, will ignore options given
+	// if not supported by the chosen store
+	s.store, err = store.CreateStore(
+		s.name, // name of the store
+		addrs,
+		store.Config{
+			Timeout: s.heartbeat,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Fetch is exported
+func (s *Discovery) Fetch() ([]*discovery.Entry, error) {
+	addrs, err := s.store.GetRange(s.prefix)
+	if err != nil {
+		return nil, err
+	}
+	return discovery.CreateEntries(convertToStringArray(addrs))
+}
+
+// Watch is exported
+func (s *Discovery) Watch(callback discovery.WatchCallback) {
+	s.store.WatchRange(s.prefix, "", s.heartbeat, func(kvalues []store.KVEntry) {
+		// Traduce byte array entries to discovery.Entry
+		entries, _ := discovery.CreateEntries(convertToStringArray(kvalues))
+		callback(entries)
+	})
+}
+
+// Register is exported
+func (s *Discovery) Register(addr string) error {
+	err := s.store.Put(path.Join(s.prefix, addr), []byte(addr))
+	return err
+}
+
+func convertToStringArray(entries []store.KVEntry) (addrs []string) {
+	for _, entry := range entries {
+		addrs = append(addrs, string(entry.Value()))
+	}
+	return addrs
+}

+ 20 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv_test.go

@@ -0,0 +1,20 @@
+package kv
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestInitialize(t *testing.T) {
+	discoveryService := &Discovery{}
+
+	assert.Equal(t, discoveryService.Initialize("127.0.0.1", 0).Error(), "invalid format \"127.0.0.1\", missing <path>")
+
+	assert.Error(t, discoveryService.Initialize("127.0.0.1/path", 0))
+	assert.Equal(t, discoveryService.prefix, "path")
+
+	assert.Error(t, discoveryService.Initialize("127.0.0.1,127.0.0.2,127.0.0.3/path", 0))
+	assert.Equal(t, discoveryService.prefix, "path")
+
+}

+ 45 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes.go

@@ -0,0 +1,45 @@
+package nodes
+
+import (
+	"strings"
+
+	"github.com/docker/swarm/discovery"
+)
+
+// Discovery is exported
+type Discovery struct {
+	entries []*discovery.Entry
+}
+
+func init() {
+	discovery.Register("nodes", &Discovery{})
+}
+
+// Initialize is exported
+func (s *Discovery) Initialize(uris string, _ uint64) error {
+	for _, input := range strings.Split(uris, ",") {
+		for _, ip := range discovery.Generate(input) {
+			entry, err := discovery.NewEntry(ip)
+			if err != nil {
+				return err
+			}
+			s.entries = append(s.entries, entry)
+		}
+	}
+
+	return nil
+}
+
+// Fetch is exported
+func (s *Discovery) Fetch() ([]*discovery.Entry, error) {
+	return s.entries, nil
+}
+
+// Watch is exported
+func (s *Discovery) Watch(callback discovery.WatchCallback) {
+}
+
+// Register is exported
+func (s *Discovery) Register(addr string) error {
+	return discovery.ErrNotImplemented
+}

+ 31 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes_test.go

@@ -0,0 +1,31 @@
+package nodes
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestInitialise(t *testing.T) {
+	discovery := &Discovery{}
+	discovery.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0)
+	assert.Equal(t, len(discovery.entries), 2)
+	assert.Equal(t, discovery.entries[0].String(), "1.1.1.1:1111")
+	assert.Equal(t, discovery.entries[1].String(), "2.2.2.2:2222")
+}
+
+func TestInitialiseWithPattern(t *testing.T) {
+	discovery := &Discovery{}
+	discovery.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0)
+	assert.Equal(t, len(discovery.entries), 5)
+	assert.Equal(t, discovery.entries[0].String(), "1.1.1.1:1111")
+	assert.Equal(t, discovery.entries[1].String(), "1.1.1.2:1111")
+	assert.Equal(t, discovery.entries[2].String(), "2.2.2.2:2222")
+	assert.Equal(t, discovery.entries[3].String(), "2.2.2.3:2222")
+	assert.Equal(t, discovery.entries[4].String(), "2.2.2.4:2222")
+}
+
+func TestRegister(t *testing.T) {
+	discovery := &Discovery{}
+	assert.Error(t, discovery.Register("0.0.0.0"))
+}

+ 31 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/README.md

@@ -0,0 +1,31 @@
+#discovery-stage.hub.docker.com
+
+Docker Swarm comes with a simple discovery service built into the [Docker Hub](http://hub.docker.com)
+
+The discovery service is still in alpha stage and currently hosted at `https://discovery-stage.hub.docker.com`
+
+#####Create a new cluster
+`-> POST https://discovery-stage.hub.docker.com/v1/clusters`
+
+`<- <token>`
+
+#####Add new nodes to a cluster
+`-> POST https://discovery-stage.hub.docker.com/v1/clusters/<token> Request body: "<ip>:<port1>"`
+
+`<- OK`
+
+`-> POST https://discovery-stage.hub.docker.com/v1/clusters/<token> Request body: "<ip>:<port2>")`
+
+`<- OK`
+
+
+#####List nodes in a cluster
+`-> GET https://discovery-stage.hub.docker.com/v1/clusters/<token>`
+
+`<- ["<ip>:<port1>", "<ip>:<port2>"]`
+
+
+#####Delete a cluster (all the nodes in a cluster)
+`-> DELETE https://discovery-stage.hub.docker.com/v1/clusters/<token>`
+
+`<- OK`

+ 104 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token.go

@@ -0,0 +1,104 @@
+package token
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/docker/swarm/discovery"
+)
+
+// DiscoveryUrl is exported
+const DiscoveryURL = "https://discovery-stage.hub.docker.com/v1"
+
+// Discovery is exported
+type Discovery struct {
+	heartbeat uint64
+	url       string
+	token     string
+}
+
+func init() {
+	discovery.Register("token", &Discovery{})
+}
+
+// Initialize is exported
+func (s *Discovery) Initialize(urltoken string, heartbeat uint64) error {
+	if i := strings.LastIndex(urltoken, "/"); i != -1 {
+		s.url = "https://" + urltoken[:i]
+		s.token = urltoken[i+1:]
+	} else {
+		s.url = DiscoveryURL
+		s.token = urltoken
+	}
+
+	if s.token == "" {
+		return errors.New("token is empty")
+	}
+	s.heartbeat = heartbeat
+
+	return nil
+}
+
+// Fetch returns the list of entries for the discovery service at the specified endpoint
+func (s *Discovery) Fetch() ([]*discovery.Entry, error) {
+
+	resp, err := http.Get(fmt.Sprintf("%s/%s/%s", s.url, "clusters", s.token))
+	if err != nil {
+		return nil, err
+	}
+
+	defer resp.Body.Close()
+
+	var addrs []string
+	if resp.StatusCode == http.StatusOK {
+		if err := json.NewDecoder(resp.Body).Decode(&addrs); err != nil {
+			return nil, err
+		}
+	} else {
+		return nil, fmt.Errorf("Failed to fetch entries, Discovery service returned %d HTTP status code", resp.StatusCode)
+	}
+
+	return discovery.CreateEntries(addrs)
+}
+
+// Watch is exported
+func (s *Discovery) Watch(callback discovery.WatchCallback) {
+	for _ = range time.Tick(time.Duration(s.heartbeat) * time.Second) {
+		entries, err := s.Fetch()
+		if err == nil {
+			callback(entries)
+		}
+	}
+}
+
+// Register adds a new entry identified by the into the discovery service
+func (s *Discovery) Register(addr string) error {
+	buf := strings.NewReader(addr)
+
+	resp, err := http.Post(fmt.Sprintf("%s/%s/%s", s.url,
+		"clusters", s.token), "application/json", buf)
+
+	if err != nil {
+		return err
+	}
+
+	resp.Body.Close()
+	return nil
+}
+
+// CreateCluster returns a unique cluster token
+func (s *Discovery) CreateCluster() (string, error) {
+	resp, err := http.Post(fmt.Sprintf("%s/%s", s.url, "clusters"), "", nil)
+	if err != nil {
+		return "", err
+	}
+
+	defer resp.Body.Close()
+	token, err := ioutil.ReadAll(resp.Body)
+	return string(token), err
+}

+ 36 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token_test.go

@@ -0,0 +1,36 @@
+package token
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestInitialize(t *testing.T) {
+	discovery := &Discovery{}
+	err := discovery.Initialize("token", 0)
+	assert.NoError(t, err)
+	assert.Equal(t, discovery.token, "token")
+	assert.Equal(t, discovery.url, DiscoveryURL)
+
+	err = discovery.Initialize("custom/path/token", 0)
+	assert.NoError(t, err)
+	assert.Equal(t, discovery.token, "token")
+	assert.Equal(t, discovery.url, "https://custom/path")
+
+	err = discovery.Initialize("", 0)
+	assert.Error(t, err)
+}
+
+func TestRegister(t *testing.T) {
+	discovery := &Discovery{token: "TEST_TOKEN", url: DiscoveryURL}
+	expected := "127.0.0.1:2675"
+	assert.NoError(t, discovery.Register(expected))
+
+	addrs, err := discovery.Fetch()
+	assert.NoError(t, err)
+	assert.Equal(t, len(addrs), 1)
+	assert.Equal(t, addrs[0].String(), expected)
+
+	assert.NoError(t, discovery.Register(expected))
+}

+ 79 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/README.md

@@ -0,0 +1,79 @@
+# Storage
+
+This package is used by the discovery service to register machines inside the cluster. It is also used to store cluster's metadata.
+
+## Example of usage
+
+### Create a new store and use Put/Get
+
+```go
+package main
+
+import (
+	"fmt"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/swarm/store"
+)
+
+func main() {
+	var (
+		client = "localhost:8500"
+	)
+
+	// Initialize a new store with consul
+	kv, err := store.CreateStore(
+		store.Consul,
+		[]string{client},
+		store.Config{
+		    Timeout: 10*time.Second
+		},
+	)
+	if err != nil {
+		log.Error("Cannot create store consul")
+	}
+
+	key := "foo"
+	err = kv.Put(key, []byte("bar"))
+	if err != nil {
+		log.Error("Error trying to put value at key `", key, "`")
+	}
+
+	value, _, err := kv.Get(key)
+	if err != nil {
+		log.Error("Error trying accessing value at key `", key, "`")
+	}
+
+	log.Info("value: ", string(value))
+}
+```
+
+
+
+## Contributing to a new storage backend
+
+A new **storage backend** should include those calls:
+
+```go
+type Store interface {
+	Put(key string, value []byte) error
+	Get(key string) (value []byte, lastIndex uint64, err error)
+	Delete(key string) error
+	Exists(key string) (bool, error)
+	Watch(key string, ttl uint64, callback WatchCallback) error
+	CancelWatch(key string) error
+	Acquire(key string, value []byte) (string, error)
+	Release(session string) error
+	GetRange(prefix string) (value [][]byte, err error)
+	DeleteRange(prefix string) error
+	WatchRange(prefix string, filter string, heartbeat uint64, callback WatchCallback) error
+	CancelWatchRange(prefix string) error
+	AtomicPut(key string, oldValue []byte, newValue []byte, index uint64) (bool, error)
+	AtomicDelete(key string, oldValue []byte, index uint64) (bool, error)
+}
+```
+
+To be elligible as a **discovery backend** only, a K/V store implementation should at least offer `Get`, `Put`, `WatchRange`, `GetRange`.
+
+You can get inspiration from existing backends to create a new one. This interface could be subject to changes to improve the experience of using the library and contributing to a new backend.

+ 301 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/consul.go

@@ -0,0 +1,301 @@
+package store
+
+import (
+	"crypto/tls"
+	"errors"
+	"net/http"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	api "github.com/hashicorp/consul/api"
+)
+
+var (
+	// ErrSessionUndefined is exported
+	ErrSessionUndefined = errors.New("Session does not exist")
+)
+
+// Consul embeds the client and watches/lock sessions
+type Consul struct {
+	config   *api.Config
+	client   *api.Client
+	sessions map[string]*api.Session
+	watches  map[string]*Watch
+}
+
+// Watch embeds the event channel and the
+// refresh interval
+type Watch struct {
+	LastIndex uint64
+	Interval  time.Duration
+}
+
+// InitializeConsul creates a new Consul client given
+// a list of endpoints and optional tls config
+func InitializeConsul(endpoints []string, options Config) (Store, error) {
+	s := &Consul{}
+	s.sessions = make(map[string]*api.Session)
+	s.watches = make(map[string]*Watch)
+
+	// Create Consul client
+	config := api.DefaultConfig()
+	s.config = config
+	config.HttpClient = http.DefaultClient
+	config.Address = endpoints[0]
+	config.Scheme = "http"
+
+	if options.TLS != nil {
+		s.setTLS(options.TLS)
+	}
+
+	if options.Timeout != 0 {
+		s.setTimeout(options.Timeout)
+	}
+
+	// Creates a new client
+	client, err := api.NewClient(config)
+	if err != nil {
+		log.Errorf("Couldn't initialize consul client..")
+		return nil, err
+	}
+	s.client = client
+
+	return s, nil
+}
+
+// SetTLS sets Consul TLS options
+func (s *Consul) setTLS(tls *tls.Config) {
+	s.config.HttpClient.Transport = &http.Transport{
+		TLSClientConfig: tls,
+	}
+	s.config.Scheme = "https"
+}
+
+// SetTimeout sets the timout for connecting to Consul
+func (s *Consul) setTimeout(time time.Duration) {
+	s.config.WaitTime = time
+}
+
+// Get the value at "key", returns the last modified index
+// to use in conjunction to CAS calls
+func (s *Consul) Get(key string) (value []byte, lastIndex uint64, err error) {
+	pair, meta, err := s.client.KV().Get(partialFormat(key), nil)
+	if err != nil {
+		return nil, 0, err
+	}
+	if pair == nil {
+		return nil, 0, ErrKeyNotFound
+	}
+	return pair.Value, meta.LastIndex, nil
+}
+
+// Put a value at "key"
+func (s *Consul) Put(key string, value []byte) error {
+	p := &api.KVPair{Key: partialFormat(key), Value: value}
+	if s.client == nil {
+		log.Error("Error initializing client")
+	}
+	_, err := s.client.KV().Put(p, nil)
+	return err
+}
+
+// Delete a value at "key"
+func (s *Consul) Delete(key string) error {
+	_, err := s.client.KV().Delete(partialFormat(key), nil)
+	return err
+}
+
+// Exists checks that the key exists inside the store
+func (s *Consul) Exists(key string) (bool, error) {
+	_, _, err := s.Get(key)
+	if err != nil && err == ErrKeyNotFound {
+		return false, err
+	}
+	return true, nil
+}
+
+// GetRange gets a range of values at "directory"
+func (s *Consul) GetRange(prefix string) (kvi []KVEntry, err error) {
+	pairs, _, err := s.client.KV().List(partialFormat(prefix), nil)
+	if err != nil {
+		return nil, err
+	}
+	if len(pairs) == 0 {
+		return nil, ErrKeyNotFound
+	}
+	for _, pair := range pairs {
+		if pair.Key == prefix {
+			continue
+		}
+		kvi = append(kvi, &kviTuple{pair.Key, pair.Value, pair.ModifyIndex})
+	}
+	return kvi, nil
+}
+
+// DeleteRange deletes a range of values at "directory"
+func (s *Consul) DeleteRange(prefix string) error {
+	_, err := s.client.KV().DeleteTree(partialFormat(prefix), nil)
+	return err
+}
+
+// Watch a single key for modifications
+func (s *Consul) Watch(key string, heartbeat time.Duration, callback WatchCallback) error {
+	fkey := partialFormat(key)
+
+	// We get the last index first
+	_, meta, err := s.client.KV().Get(fkey, nil)
+	if err != nil {
+		return err
+	}
+
+	// Add watch to map
+	s.watches[fkey] = &Watch{LastIndex: meta.LastIndex, Interval: heartbeat}
+	eventChan := s.waitForChange(fkey)
+
+	for _ = range eventChan {
+		log.WithField("name", "consul").Debug("Key watch triggered")
+		entry, index, err := s.Get(key)
+		if err != nil {
+			log.Error("Cannot refresh the key: ", fkey, ", cancelling watch")
+			s.watches[fkey] = nil
+			return err
+		}
+
+		value := []KVEntry{&kviTuple{key, entry, index}}
+		callback(value)
+	}
+
+	return nil
+}
+
+// CancelWatch cancels a watch, sends a signal to the appropriate
+// stop channel
+func (s *Consul) CancelWatch(key string) error {
+	key = partialFormat(key)
+	if _, ok := s.watches[key]; !ok {
+		log.Error("Chan does not exist for key: ", key)
+		return ErrWatchDoesNotExist
+	}
+	s.watches[key] = nil
+	return nil
+}
+
+// Internal function to check if a key has changed
+func (s *Consul) waitForChange(key string) <-chan uint64 {
+	ch := make(chan uint64)
+	kv := s.client.KV()
+	go func() {
+		for {
+			watch, ok := s.watches[key]
+			if !ok {
+				log.Error("Cannot access last index for key: ", key, " closing channel")
+				break
+			}
+			option := &api.QueryOptions{
+				WaitIndex: watch.LastIndex,
+				WaitTime:  watch.Interval,
+			}
+			_, meta, err := kv.List(key, option)
+			if err != nil {
+				log.WithField("name", "consul").Errorf("Discovery error: %v", err)
+				break
+			}
+			watch.LastIndex = meta.LastIndex
+			ch <- watch.LastIndex
+		}
+		close(ch)
+	}()
+	return ch
+}
+
+// WatchRange triggers a watch on a range of values at "directory"
+func (s *Consul) WatchRange(prefix string, filter string, heartbeat time.Duration, callback WatchCallback) error {
+	fprefix := partialFormat(prefix)
+
+	// We get the last index first
+	_, meta, err := s.client.KV().Get(prefix, nil)
+	if err != nil {
+		return err
+	}
+
+	// Add watch to map
+	s.watches[fprefix] = &Watch{LastIndex: meta.LastIndex, Interval: heartbeat}
+	eventChan := s.waitForChange(fprefix)
+
+	for _ = range eventChan {
+		log.WithField("name", "consul").Debug("Key watch triggered")
+		kvi, err := s.GetRange(prefix)
+		if err != nil {
+			log.Error("Cannot refresh keys with prefix: ", fprefix, ", cancelling watch")
+			s.watches[fprefix] = nil
+			return err
+		}
+		callback(kvi)
+	}
+
+	return nil
+}
+
+// CancelWatchRange stops the watch on the range of values, sends
+// a signal to the appropriate stop channel
+func (s *Consul) CancelWatchRange(prefix string) error {
+	return s.CancelWatch(prefix)
+}
+
+// Acquire the lock for "key"/"directory"
+func (s *Consul) Acquire(key string, value []byte) (string, error) {
+	key = partialFormat(key)
+	session := s.client.Session()
+	id, _, err := session.CreateNoChecks(nil, nil)
+	if err != nil {
+		return "", err
+	}
+
+	// Add session to map
+	s.sessions[id] = session
+
+	p := &api.KVPair{Key: key, Value: value, Session: id}
+	if work, _, err := s.client.KV().Acquire(p, nil); err != nil {
+		return "", err
+	} else if !work {
+		return "", ErrCannotLock
+	}
+
+	return id, nil
+}
+
+// Release the lock for "key"/"directory"
+func (s *Consul) Release(id string) error {
+	if _, ok := s.sessions[id]; !ok {
+		log.Error("Lock session does not exist")
+		return ErrSessionUndefined
+	}
+	session := s.sessions[id]
+	session.Destroy(id, nil)
+	s.sessions[id] = nil
+	return nil
+}
+
+// AtomicPut put a value at "key" if the key has not been
+// modified in the meantime, throws an error if this is the case
+func (s *Consul) AtomicPut(key string, _ []byte, newValue []byte, index uint64) (bool, error) {
+	p := &api.KVPair{Key: partialFormat(key), Value: newValue, ModifyIndex: index}
+	if work, _, err := s.client.KV().CAS(p, nil); err != nil {
+		return false, err
+	} else if !work {
+		return false, ErrKeyModified
+	}
+	return true, nil
+}
+
+// AtomicDelete deletes a value at "key" if the key has not
+// been modified in the meantime, throws an error if this is the case
+func (s *Consul) AtomicDelete(key string, oldValue []byte, index uint64) (bool, error) {
+	p := &api.KVPair{Key: partialFormat(key), ModifyIndex: index}
+	if work, _, err := s.client.KV().DeleteCAS(p, nil); err != nil {
+		return false, err
+	} else if !work {
+		return false, ErrKeyModified
+	}
+	return true, nil
+}

+ 264 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/etcd.go

@@ -0,0 +1,264 @@
+package store
+
+import (
+	"crypto/tls"
+	"net"
+	"net/http"
+	"strings"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	etcd "github.com/coreos/go-etcd/etcd"
+)
+
+// Etcd embeds the client
+type Etcd struct {
+	client  *etcd.Client
+	watches map[string]chan<- bool
+}
+
+// InitializeEtcd creates a new Etcd client given
+// a list of endpoints and optional tls config
+func InitializeEtcd(addrs []string, options Config) (Store, error) {
+	s := &Etcd{}
+	s.watches = make(map[string]chan<- bool)
+
+	entries := createEndpoints(addrs, "http")
+	s.client = etcd.NewClient(entries)
+
+	if options.TLS != nil {
+		s.setTLS(options.TLS)
+	}
+
+	if options.Timeout != 0 {
+		s.setTimeout(options.Timeout)
+	}
+
+	return s, nil
+}
+
+// SetTLS sets the tls configuration given the path
+// of certificate files
+func (s *Etcd) setTLS(tls *tls.Config) {
+	// Change to https scheme
+	var addrs []string
+	entries := s.client.GetCluster()
+	for _, entry := range entries {
+		addrs = append(addrs, strings.Replace(entry, "http", "https", -1))
+	}
+	s.client.SetCluster(addrs)
+
+	// Set transport
+	t := http.Transport{
+		Dial: (&net.Dialer{
+			Timeout:   30 * time.Second, // default timeout
+			KeepAlive: 30 * time.Second,
+		}).Dial,
+		TLSHandshakeTimeout: 10 * time.Second,
+		TLSClientConfig:     tls,
+	}
+	s.client.SetTransport(&t)
+}
+
+// SetTimeout sets the timeout used for connecting to the store
+func (s *Etcd) setTimeout(time time.Duration) {
+	s.client.SetDialTimeout(time)
+}
+
+// Create the entire path for a directory that does not exist
+func (s *Etcd) createDirectory(path string) error {
+	// TODO Handle TTL at key/dir creation -> use K/V struct for key infos?
+	if _, err := s.client.CreateDir(format(path), 10); err != nil {
+		if etcdError, ok := err.(*etcd.EtcdError); ok {
+			if etcdError.ErrorCode != 105 { // Skip key already exists
+				return err
+			}
+		} else {
+			return err
+		}
+	}
+	return nil
+}
+
+// Get the value at "key", returns the last modified index
+// to use in conjunction to CAS calls
+func (s *Etcd) Get(key string) (value []byte, lastIndex uint64, err error) {
+	result, err := s.client.Get(format(key), false, false)
+	if err != nil {
+		if etcdError, ok := err.(*etcd.EtcdError); ok {
+			// Not a Directory or Not a file
+			if etcdError.ErrorCode == 102 || etcdError.ErrorCode == 104 {
+				return nil, 0, ErrKeyNotFound
+			}
+		}
+		return nil, 0, err
+	}
+	return []byte(result.Node.Value), result.Node.ModifiedIndex, nil
+}
+
+// Put a value at "key"
+func (s *Etcd) Put(key string, value []byte) error {
+	if _, err := s.client.Set(key, string(value), 0); err != nil {
+		if etcdError, ok := err.(*etcd.EtcdError); ok {
+			if etcdError.ErrorCode == 104 { // Not a directory
+				// Remove the last element (the actual key) and set the prefix as a dir
+				err = s.createDirectory(getDir(key))
+				if _, err := s.client.Set(key, string(value), 0); err != nil {
+					return err
+				}
+			}
+		}
+		return err
+	}
+	return nil
+}
+
+// Delete a value at "key"
+func (s *Etcd) Delete(key string) error {
+	if _, err := s.client.Delete(format(key), false); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Exists checks if the key exists inside the store
+func (s *Etcd) Exists(key string) (bool, error) {
+	value, _, err := s.Get(key)
+	if err != nil {
+		if err == ErrKeyNotFound || value == nil {
+			return false, nil
+		}
+		return false, err
+	}
+	return true, nil
+}
+
+// Watch a single key for modifications
+func (s *Etcd) Watch(key string, _ time.Duration, callback WatchCallback) error {
+	key = format(key)
+	watchChan := make(chan *etcd.Response)
+	stopChan := make(chan bool)
+
+	// Create new Watch entry
+	s.watches[key] = stopChan
+
+	// Start watch
+	go s.client.Watch(key, 0, false, watchChan, stopChan)
+
+	for _ = range watchChan {
+		log.WithField("name", "etcd").Debug("Discovery watch triggered")
+		entry, index, err := s.Get(key)
+		if err != nil {
+			log.Error("Cannot refresh the key: ", key, ", cancelling watch")
+			s.watches[key] = nil
+			return err
+		}
+		kvi := []KVEntry{&kviTuple{key, entry, index}}
+		callback(kvi)
+	}
+	return nil
+}
+
+// CancelWatch cancels a watch, sends a signal to the appropriate
+// stop channel
+func (s *Etcd) CancelWatch(key string) error {
+	key = format(key)
+	if _, ok := s.watches[key]; !ok {
+		log.Error("Chan does not exist for key: ", key)
+		return ErrWatchDoesNotExist
+	}
+	// Send stop signal to event chan
+	s.watches[key] <- true
+	s.watches[key] = nil
+	return nil
+}
+
+// AtomicPut put a value at "key" if the key has not been
+// modified in the meantime, throws an error if this is the case
+func (s *Etcd) AtomicPut(key string, oldValue []byte, newValue []byte, index uint64) (bool, error) {
+	resp, err := s.client.CompareAndSwap(format(key), string(newValue), 5, string(oldValue), 0)
+	if err != nil {
+		return false, err
+	}
+	if !(resp.Node.Value == string(newValue) && resp.Node.Key == key && resp.Node.TTL == 5) {
+		return false, ErrKeyModified
+	}
+	if !(resp.PrevNode.Value == string(newValue) && resp.PrevNode.Key == key && resp.PrevNode.TTL == 5) {
+		return false, ErrKeyModified
+	}
+	return true, nil
+}
+
+// AtomicDelete deletes a value at "key" if the key has not
+// been modified in the meantime, throws an error if this is the case
+func (s *Etcd) AtomicDelete(key string, oldValue []byte, index uint64) (bool, error) {
+	resp, err := s.client.CompareAndDelete(format(key), string(oldValue), 0)
+	if err != nil {
+		return false, err
+	}
+	if !(resp.PrevNode.Value == string(oldValue) && resp.PrevNode.Key == key && resp.PrevNode.TTL == 5) {
+		return false, ErrKeyModified
+	}
+	return true, nil
+}
+
+// GetRange gets a range of values at "directory"
+func (s *Etcd) GetRange(prefix string) ([]KVEntry, error) {
+	resp, err := s.client.Get(format(prefix), true, true)
+	if err != nil {
+		return nil, err
+	}
+	kvi := make([]KVEntry, len(resp.Node.Nodes))
+	for i, n := range resp.Node.Nodes {
+		kvi[i] = &kviTuple{n.Key, []byte(n.Value), n.ModifiedIndex}
+	}
+	return kvi, nil
+}
+
+// DeleteRange deletes a range of values at "directory"
+func (s *Etcd) DeleteRange(prefix string) error {
+	if _, err := s.client.Delete(format(prefix), true); err != nil {
+		return err
+	}
+	return nil
+}
+
+// WatchRange triggers a watch on a range of values at "directory"
+func (s *Etcd) WatchRange(prefix string, filter string, _ time.Duration, callback WatchCallback) error {
+	prefix = format(prefix)
+	watchChan := make(chan *etcd.Response)
+	stopChan := make(chan bool)
+
+	// Create new Watch entry
+	s.watches[prefix] = stopChan
+
+	// Start watch
+	go s.client.Watch(prefix, 0, true, watchChan, stopChan)
+	for _ = range watchChan {
+		log.WithField("name", "etcd").Debug("Discovery watch triggered")
+		kvi, err := s.GetRange(prefix)
+		if err != nil {
+			log.Error("Cannot refresh the key: ", prefix, ", cancelling watch")
+			s.watches[prefix] = nil
+			return err
+		}
+		callback(kvi)
+	}
+	return nil
+}
+
+// CancelWatchRange stops the watch on the range of values, sends
+// a signal to the appropriate stop channel
+func (s *Etcd) CancelWatchRange(prefix string) error {
+	return s.CancelWatch(format(prefix))
+}
+
+// Acquire the lock for "key"/"directory"
+func (s *Etcd) Acquire(key string, value []byte) (string, error) {
+	return "", ErrNotImplemented
+}
+
+// Release the lock for "key"/"directory"
+func (s *Etcd) Release(session string) error {
+	return ErrNotImplemented
+}

+ 51 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/helpers.go

@@ -0,0 +1,51 @@
+package store
+
+import (
+	"strings"
+)
+
+// Creates a list of endpoints given the right scheme
+func createEndpoints(addrs []string, scheme string) (entries []string) {
+	for _, addr := range addrs {
+		entries = append(entries, scheme+"://"+addr)
+	}
+	return entries
+}
+
+// Formats the key
+func format(key string) string {
+	return fullpath(splitKey(key))
+}
+
+// Formats the key partially (omits the first '/')
+func partialFormat(key string) string {
+	return partialpath(splitKey(key))
+}
+
+// Get the full directory part of the key
+func getDir(key string) string {
+	parts := splitKey(key)
+	parts = parts[:len(parts)-1]
+	return fullpath(parts)
+}
+
+// SplitKey splits the key to extract path informations
+func splitKey(key string) (path []string) {
+	if strings.Contains(key, "/") {
+		path = strings.Split(key, "/")
+	} else {
+		path = []string{key}
+	}
+	return path
+}
+
+// Get the full correct path representation of a splitted key/directory
+func fullpath(path []string) string {
+	return "/" + strings.Join(path, "/")
+}
+
+// Get the partial correct path representation of a splitted key/directory
+// Omits the first '/'
+func partialpath(path []string) string {
+	return strings.Join(path, "/")
+}

+ 92 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/store.go

@@ -0,0 +1,92 @@
+package store
+
+import (
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+)
+
+// WatchCallback is used for watch methods on keys
+// and is triggered on key change
+type WatchCallback func(kviTuple []KVEntry)
+
+// Initialize creates a new Store object, initializing the client
+type Initialize func(addrs []string, options Config) (Store, error)
+
+// Store represents the backend K/V storage
+// Each store should support every call listed
+// here. Or it couldn't be implemented as a K/V
+// backend for libkv
+type Store interface {
+	// Put a value at the specified key
+	Put(key string, value []byte) error
+
+	// Get a value given its key
+	Get(key string) (value []byte, lastIndex uint64, err error)
+
+	// Delete the value at the specified key
+	Delete(key string) error
+
+	// Verify if a Key exists in the store
+	Exists(key string) (bool, error)
+
+	// Watch changes on a key
+	Watch(key string, heartbeat time.Duration, callback WatchCallback) error
+
+	// Cancel watch key
+	CancelWatch(key string) error
+
+	// Acquire the lock at key
+	Acquire(key string, value []byte) (string, error)
+
+	// Release the lock at key
+	Release(session string) error
+
+	// Get range of keys based on prefix
+	GetRange(prefix string) ([]KVEntry, error)
+
+	// Delete range of keys based on prefix
+	DeleteRange(prefix string) error
+
+	// Watch key namespaces
+	WatchRange(prefix string, filter string, heartbeat time.Duration, callback WatchCallback) error
+
+	// Cancel watch key range
+	CancelWatchRange(prefix string) error
+
+	// Atomic operation on a single value
+	AtomicPut(key string, oldValue []byte, newValue []byte, index uint64) (bool, error)
+
+	// Atomic delete of a single value
+	AtomicDelete(key string, oldValue []byte, index uint64) (bool, error)
+}
+
+// KVEntry represents {Key, Value, Lastindex} tuple
+type KVEntry interface {
+	Key() string
+	Value() []byte
+	LastIndex() uint64
+}
+
+var (
+	// List of Store services
+	stores map[string]Initialize
+)
+
+func init() {
+	stores = make(map[string]Initialize)
+	stores["consul"] = InitializeConsul
+	stores["etcd"] = InitializeEtcd
+	stores["zk"] = InitializeZookeeper
+}
+
+// CreateStore creates a an instance of store
+func CreateStore(store string, addrs []string, options Config) (Store, error) {
+
+	if init, exists := stores[store]; exists {
+		log.WithFields(log.Fields{"store": store}).Debug("Initializing store service")
+		return init(addrs, options)
+	}
+
+	return nil, ErrNotSupported
+}

+ 60 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/structs.go

@@ -0,0 +1,60 @@
+package store
+
+import (
+	"crypto/tls"
+	"errors"
+	"time"
+)
+
+var (
+	// ErrNotSupported is exported
+	ErrNotSupported = errors.New("Backend storage not supported yet, please choose another one")
+	// ErrNotImplemented is exported
+	ErrNotImplemented = errors.New("Call not implemented in current backend")
+	// ErrNotReachable is exported
+	ErrNotReachable = errors.New("Api not reachable")
+	// ErrCannotLock is exported
+	ErrCannotLock = errors.New("Error acquiring the lock")
+	// ErrWatchDoesNotExist is exported
+	ErrWatchDoesNotExist = errors.New("No watch found for specified key")
+	// ErrKeyModified is exported
+	ErrKeyModified = errors.New("Unable to complete atomic operation, key modified")
+	// ErrKeyNotFound is exported
+	ErrKeyNotFound = errors.New("Key not found in store")
+)
+
+// KV represents the different supported K/V
+type KV string
+
+const (
+	// CONSUL is exported
+	CONSUL KV = "consul"
+	// ETCD is exported
+	ETCD = "etcd"
+	// ZOOKEEPER is exported
+	ZOOKEEPER = "zookeeper"
+)
+
+// Config contains the options for a storage client
+type Config struct {
+	TLS     *tls.Config
+	Timeout time.Duration
+}
+
+type kviTuple struct {
+	key       string
+	value     []byte
+	lastIndex uint64
+}
+
+func (kvi *kviTuple) Key() string {
+	return kvi.key
+}
+
+func (kvi *kviTuple) Value() []byte {
+	return kvi.value
+}
+
+func (kvi *kviTuple) LastIndex() uint64 {
+	return kvi.lastIndex
+}

+ 213 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/zookeeper.go

@@ -0,0 +1,213 @@
+package store
+
+import (
+	"strings"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	zk "github.com/samuel/go-zookeeper/zk"
+)
+
+// Zookeeper embeds the zookeeper client
+// and list of watches
+type Zookeeper struct {
+	timeout time.Duration
+	client  *zk.Conn
+	watches map[string]<-chan zk.Event
+}
+
+// InitializeZookeeper creates a new Zookeeper client
+// given a list of endpoints and optional tls config
+func InitializeZookeeper(endpoints []string, options Config) (Store, error) {
+	s := &Zookeeper{}
+	s.watches = make(map[string]<-chan zk.Event)
+	s.timeout = 5 * time.Second // default timeout
+
+	if options.Timeout != 0 {
+		s.setTimeout(options.Timeout)
+	}
+
+	conn, _, err := zk.Connect(endpoints, s.timeout)
+	if err != nil {
+		log.Error(err)
+		return nil, err
+	}
+	s.client = conn
+	return s, nil
+}
+
+// SetTimeout sets the timout for connecting to Zookeeper
+func (s *Zookeeper) setTimeout(time time.Duration) {
+	s.timeout = time
+}
+
+// Get the value at "key", returns the last modified index
+// to use in conjunction to CAS calls
+func (s *Zookeeper) Get(key string) (value []byte, lastIndex uint64, err error) {
+	resp, meta, err := s.client.Get(format(key))
+	if err != nil {
+		return nil, 0, err
+	}
+	if resp == nil {
+		return nil, 0, ErrKeyNotFound
+	}
+	return resp, uint64(meta.Mzxid), nil
+}
+
+// Create the entire path for a directory that does not exist
+func (s *Zookeeper) createFullpath(path []string) error {
+	for i := 1; i <= len(path); i++ {
+		newpath := "/" + strings.Join(path[:i], "/")
+		_, err := s.client.Create(newpath, []byte{1}, 0, zk.WorldACL(zk.PermAll))
+		if err != nil {
+			// Skip if node already exists
+			if err != zk.ErrNodeExists {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// Put a value at "key"
+func (s *Zookeeper) Put(key string, value []byte) error {
+	fkey := format(key)
+	exists, err := s.Exists(key)
+	if err != nil {
+		return err
+	}
+	if !exists {
+		s.createFullpath(splitKey(key))
+	}
+	_, err = s.client.Set(fkey, value, -1)
+	return err
+}
+
+// Delete a value at "key"
+func (s *Zookeeper) Delete(key string) error {
+	err := s.client.Delete(format(key), -1)
+	return err
+}
+
+// Exists checks if the key exists inside the store
+func (s *Zookeeper) Exists(key string) (bool, error) {
+	exists, _, err := s.client.Exists(format(key))
+	if err != nil {
+		return false, err
+	}
+	return exists, nil
+}
+
+// Watch a single key for modifications
+func (s *Zookeeper) Watch(key string, _ time.Duration, callback WatchCallback) error {
+	fkey := format(key)
+	_, _, eventChan, err := s.client.GetW(fkey)
+	if err != nil {
+		return err
+	}
+
+	// Create a new Watch entry with eventChan
+	s.watches[fkey] = eventChan
+
+	for e := range eventChan {
+		if e.Type == zk.EventNodeChildrenChanged {
+			log.WithField("name", "zk").Debug("Discovery watch triggered")
+			entry, index, err := s.Get(key)
+			kvi := []KVEntry{&kviTuple{key, []byte(entry), index}}
+			if err == nil {
+				callback(kvi)
+			}
+		}
+	}
+
+	return nil
+}
+
+// CancelWatch cancels a watch, sends a signal to the appropriate
+// stop channel
+func (s *Zookeeper) CancelWatch(key string) error {
+	key = format(key)
+	if _, ok := s.watches[key]; !ok {
+		log.Error("Chan does not exist for key: ", key)
+		return ErrWatchDoesNotExist
+	}
+	// Just remove the entry on watches key
+	s.watches[key] = nil
+	return nil
+}
+
+// GetRange gets a range of values at "directory"
+func (s *Zookeeper) GetRange(prefix string) (kvi []KVEntry, err error) {
+	prefix = format(prefix)
+	entries, stat, err := s.client.Children(prefix)
+	if err != nil {
+		log.Error("Cannot fetch range of keys beginning with prefix: ", prefix)
+		return nil, err
+	}
+	for _, item := range entries {
+		kvi = append(kvi, &kviTuple{prefix, []byte(item), uint64(stat.Mzxid)})
+	}
+	return kvi, err
+}
+
+// DeleteRange deletes a range of values at "directory"
+func (s *Zookeeper) DeleteRange(prefix string) error {
+	err := s.client.Delete(format(prefix), -1)
+	return err
+}
+
+// WatchRange triggers a watch on a range of values at "directory"
+func (s *Zookeeper) WatchRange(prefix string, filter string, _ time.Duration, callback WatchCallback) error {
+	fprefix := format(prefix)
+	_, _, eventChan, err := s.client.ChildrenW(fprefix)
+	if err != nil {
+		return err
+	}
+
+	// Create a new Watch entry with eventChan
+	s.watches[fprefix] = eventChan
+
+	for e := range eventChan {
+		if e.Type == zk.EventNodeChildrenChanged {
+			log.WithField("name", "zk").Debug("Discovery watch triggered")
+			kvi, err := s.GetRange(prefix)
+			if err == nil {
+				callback(kvi)
+			}
+		}
+	}
+
+	return nil
+}
+
+// CancelWatchRange stops the watch on the range of values, sends
+// a signal to the appropriate stop channel
+func (s *Zookeeper) CancelWatchRange(prefix string) error {
+	return s.CancelWatch(prefix)
+}
+
+// AtomicPut put a value at "key" if the key has not been
+// modified in the meantime, throws an error if this is the case
+func (s *Zookeeper) AtomicPut(key string, oldValue []byte, newValue []byte, index uint64) (bool, error) {
+	// Use index of Set method to implement CAS
+	return false, ErrNotImplemented
+}
+
+// AtomicDelete deletes a value at "key" if the key has not
+// been modified in the meantime, throws an error if this is the case
+func (s *Zookeeper) AtomicDelete(key string, oldValue []byte, index uint64) (bool, error) {
+	return false, ErrNotImplemented
+}
+
+// Acquire the lock for "key"/"directory"
+func (s *Zookeeper) Acquire(path string, value []byte) (string, error) {
+	// lock := zk.NewLock(s.client, path, nil)
+	// locks[path] = lock
+	// lock.Lock()
+	return "", ErrNotImplemented
+}
+
+// Release the lock for "key"/"directory"
+func (s *Zookeeper) Release(session string) error {
+	return ErrNotImplemented
+}

+ 39 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md

@@ -0,0 +1,39 @@
+Consul API client
+=================
+
+This package provides the `api` package which attempts to
+provide programmatic access to the full Consul API.
+
+Currently, all of the Consul APIs included in version 0.3 are supported.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/consul/api)
+
+Usage
+=====
+
+Below is an example of using the Consul client:
+
+```go
+// Get a new client, with KV endpoints
+client, _ := api.NewClient(api.DefaultConfig())
+kv := client.KV()
+
+// PUT a new KV pair
+p := &api.KVPair{Key: "foo", Value: []byte("test")}
+_, err := kv.Put(p, nil)
+if err != nil {
+    panic(err)
+}
+
+// Lookup the pair
+pair, _, err := kv.Get("foo", nil)
+if err != nil {
+    panic(err)
+}
+fmt.Printf("KV: %v", pair)
+
+```
+

+ 140 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go

@@ -0,0 +1,140 @@
+package api
+
+const (
+	// ACLCLientType is the client type token
+	ACLClientType = "client"
+
+	// ACLManagementType is the management type token
+	ACLManagementType = "management"
+)
+
+// ACLEntry is used to represent an ACL entry
+type ACLEntry struct {
+	CreateIndex uint64
+	ModifyIndex uint64
+	ID          string
+	Name        string
+	Type        string
+	Rules       string
+}
+
+// ACL can be used to query the ACL endpoints
+type ACL struct {
+	c *Client
+}
+
+// ACL returns a handle to the ACL endpoints
+func (c *Client) ACL() *ACL {
+	return &ACL{c}
+}
+
+// Create is used to generate a new token with the given parameters
+func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/create")
+	r.setWriteOptions(q)
+	r.obj = acl
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Update is used to update the rules of an existing token
+func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/update")
+	r.setWriteOptions(q)
+	r.obj = acl
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// Destroy is used to destroy a given ACL token ID
+func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// Clone is used to return a new token cloned from an existing one
+func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Info is used to query for information about an ACL token
+func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/info/"+id)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	if len(entries) > 0 {
+		return entries[0], qm, nil
+	}
+	return nil, qm, nil
+}
+
+// List is used to get all the ACL tokens
+func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/list")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}

+ 148 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go

@@ -0,0 +1,148 @@
+package api
+
+import (
+	"os"
+	"testing"
+)
+
+// ROOT is a management token for the tests
+var CONSUL_ROOT string
+
+func init() {
+	CONSUL_ROOT = os.Getenv("CONSUL_ROOT")
+}
+
+func TestACL_CreateDestroy(t *testing.T) {
+	if CONSUL_ROOT == "" {
+		t.SkipNow()
+	}
+	c, s := makeClient(t)
+	defer s.stop()
+
+	c.config.Token = CONSUL_ROOT
+	acl := c.ACL()
+
+	ae := ACLEntry{
+		Name:  "API test",
+		Type:  ACLClientType,
+		Rules: `key "" { policy = "deny" }`,
+	}
+
+	id, wm, err := acl.Create(&ae, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if wm.RequestTime == 0 {
+		t.Fatalf("bad: %v", wm)
+	}
+
+	if id == "" {
+		t.Fatalf("invalid: %v", id)
+	}
+
+	ae2, _, err := acl.Info(id, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules {
+		t.Fatalf("Bad: %#v", ae2)
+	}
+
+	wm, err = acl.Destroy(id, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if wm.RequestTime == 0 {
+		t.Fatalf("bad: %v", wm)
+	}
+}
+
+func TestACL_CloneDestroy(t *testing.T) {
+	if CONSUL_ROOT == "" {
+		t.SkipNow()
+	}
+	c, s := makeClient(t)
+	defer s.stop()
+
+	c.config.Token = CONSUL_ROOT
+	acl := c.ACL()
+
+	id, wm, err := acl.Clone(CONSUL_ROOT, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if wm.RequestTime == 0 {
+		t.Fatalf("bad: %v", wm)
+	}
+
+	if id == "" {
+		t.Fatalf("invalid: %v", id)
+	}
+
+	wm, err = acl.Destroy(id, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if wm.RequestTime == 0 {
+		t.Fatalf("bad: %v", wm)
+	}
+}
+
+func TestACL_Info(t *testing.T) {
+	if CONSUL_ROOT == "" {
+		t.SkipNow()
+	}
+	c, s := makeClient(t)
+	defer s.stop()
+
+	c.config.Token = CONSUL_ROOT
+	acl := c.ACL()
+
+	ae, qm, err := acl.Info(CONSUL_ROOT, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if qm.LastIndex == 0 {
+		t.Fatalf("bad: %v", qm)
+	}
+	if !qm.KnownLeader {
+		t.Fatalf("bad: %v", qm)
+	}
+
+	if ae == nil || ae.ID != CONSUL_ROOT || ae.Type != ACLManagementType {
+		t.Fatalf("bad: %#v", ae)
+	}
+}
+
+func TestACL_List(t *testing.T) {
+	if CONSUL_ROOT == "" {
+		t.SkipNow()
+	}
+	c, s := makeClient(t)
+	defer s.stop()
+
+	c.config.Token = CONSUL_ROOT
+	acl := c.ACL()
+
+	acls, qm, err := acl.List(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if len(acls) < 2 {
+		t.Fatalf("bad: %v", acls)
+	}
+
+	if qm.LastIndex == 0 {
+		t.Fatalf("bad: %v", qm)
+	}
+	if !qm.KnownLeader {
+		t.Fatalf("bad: %v", qm)
+	}
+}

+ 331 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go

@@ -0,0 +1,331 @@
+package api
+
+import (
+	"fmt"
+)
+
+// AgentCheck represents a check known to the agent
+type AgentCheck struct {
+	Node        string
+	CheckID     string
+	Name        string
+	Status      string
+	Notes       string
+	Output      string
+	ServiceID   string
+	ServiceName string
+}
+
+// AgentService represents a service known to the agent
+type AgentService struct {
+	ID      string
+	Service string
+	Tags    []string
+	Port    int
+	Address string
+}
+
+// AgentMember represents a cluster member known to the agent
+type AgentMember struct {
+	Name        string
+	Addr        string
+	Port        uint16
+	Tags        map[string]string
+	Status      int
+	ProtocolMin uint8
+	ProtocolMax uint8
+	ProtocolCur uint8
+	DelegateMin uint8
+	DelegateMax uint8
+	DelegateCur uint8
+}
+
+// AgentServiceRegistration is used to register a new service
+type AgentServiceRegistration struct {
+	ID      string   `json:",omitempty"`
+	Name    string   `json:",omitempty"`
+	Tags    []string `json:",omitempty"`
+	Port    int      `json:",omitempty"`
+	Address string   `json:",omitempty"`
+	Check   *AgentServiceCheck
+	Checks  AgentServiceChecks
+}
+
+// AgentCheckRegistration is used to register a new check
+type AgentCheckRegistration struct {
+	ID        string `json:",omitempty"`
+	Name      string `json:",omitempty"`
+	Notes     string `json:",omitempty"`
+	ServiceID string `json:",omitempty"`
+	AgentServiceCheck
+}
+
+// AgentServiceCheck is used to create an associated
+// check for a service
+type AgentServiceCheck struct {
+	Script   string `json:",omitempty"`
+	Interval string `json:",omitempty"`
+	TTL      string `json:",omitempty"`
+}
+type AgentServiceChecks []*AgentServiceCheck
+
+// Agent can be used to query the Agent endpoints
+type Agent struct {
+	c *Client
+
+	// cache the node name
+	nodeName string
+}
+
+// Agent returns a handle to the agent endpoints
+func (c *Client) Agent() *Agent {
+	return &Agent{c: c}
+}
+
+// Self is used to query the agent we are speaking to for
+// information about itself
+func (a *Agent) Self() (map[string]map[string]interface{}, error) {
+	r := a.c.newRequest("GET", "/v1/agent/self")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]map[string]interface{}
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// NodeName is used to get the node name of the agent
+func (a *Agent) NodeName() (string, error) {
+	if a.nodeName != "" {
+		return a.nodeName, nil
+	}
+	info, err := a.Self()
+	if err != nil {
+		return "", err
+	}
+	name := info["Config"]["NodeName"].(string)
+	a.nodeName = name
+	return name, nil
+}
+
+// Checks returns the locally registered checks
+func (a *Agent) Checks() (map[string]*AgentCheck, error) {
+	r := a.c.newRequest("GET", "/v1/agent/checks")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]*AgentCheck
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Services returns the locally registered services
+func (a *Agent) Services() (map[string]*AgentService, error) {
+	r := a.c.newRequest("GET", "/v1/agent/services")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]*AgentService
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Members returns the known gossip members. The WAN
+// flag can be used to query a server for WAN members.
+func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
+	r := a.c.newRequest("GET", "/v1/agent/members")
+	if wan {
+		r.params.Set("wan", "1")
+	}
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []*AgentMember
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// ServiceRegister is used to register a new service with
+// the local agent
+func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/register")
+	r.obj = service
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// ServiceDeregister is used to deregister a service with
+// the local agent
+func (a *Agent) ServiceDeregister(serviceID string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// PassTTL is used to set a TTL check to the passing state
+func (a *Agent) PassTTL(checkID, note string) error {
+	return a.UpdateTTL(checkID, note, "pass")
+}
+
+// WarnTTL is used to set a TTL check to the warning state
+func (a *Agent) WarnTTL(checkID, note string) error {
+	return a.UpdateTTL(checkID, note, "warn")
+}
+
+// FailTTL is used to set a TTL check to the failing state
+func (a *Agent) FailTTL(checkID, note string) error {
+	return a.UpdateTTL(checkID, note, "fail")
+}
+
+// UpdateTTL is used to update the TTL of a check
+func (a *Agent) UpdateTTL(checkID, note, status string) error {
+	switch status {
+	case "pass":
+	case "warn":
+	case "fail":
+	default:
+		return fmt.Errorf("Invalid status: %s", status)
+	}
+	endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
+	r := a.c.newRequest("PUT", endpoint)
+	r.params.Set("note", note)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// CheckRegister is used to register a new check with
+// the local agent
+func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
+	r := a.c.newRequest("PUT", "/v1/agent/check/register")
+	r.obj = check
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// CheckDeregister is used to deregister a check with
+// the local agent
+func (a *Agent) CheckDeregister(checkID string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// Join is used to instruct the agent to attempt a join to
+// another cluster member
+func (a *Agent) Join(addr string, wan bool) error {
+	r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
+	if wan {
+		r.params.Set("wan", "1")
+	}
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// ForceLeave is used to have the agent eject a failed node
+func (a *Agent) ForceLeave(node string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// EnableServiceMaintenance toggles service maintenance mode on
+// for the given service ID.
+func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
+	r.params.Set("enable", "true")
+	r.params.Set("reason", reason)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// DisableServiceMaintenance toggles service maintenance mode off
+// for the given service ID.
+func (a *Agent) DisableServiceMaintenance(serviceID string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
+	r.params.Set("enable", "false")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// EnableNodeMaintenance toggles node maintenance mode on for the
+// agent we are connected to.
+func (a *Agent) EnableNodeMaintenance(reason string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/maintenance")
+	r.params.Set("enable", "true")
+	r.params.Set("reason", reason)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// DisableNodeMaintenance toggles node maintenance mode off for the
+// agent we are connected to.
+func (a *Agent) DisableNodeMaintenance() error {
+	r := a.c.newRequest("PUT", "/v1/agent/maintenance")
+	r.params.Set("enable", "false")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}

+ 404 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go

@@ -0,0 +1,404 @@
+package api
+
+import (
+	"strings"
+	"testing"
+)
+
+func TestAgent_Self(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	info, err := agent.Self()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	name := info["Config"]["NodeName"]
+	if name == "" {
+		t.Fatalf("bad: %v", info)
+	}
+}
+
+func TestAgent_Members(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	members, err := agent.Members(false)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if len(members) != 1 {
+		t.Fatalf("bad: %v", members)
+	}
+}
+
+func TestAgent_Services(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	reg := &AgentServiceRegistration{
+		Name: "foo",
+		Tags: []string{"bar", "baz"},
+		Port: 8000,
+		Check: &AgentServiceCheck{
+			TTL: "15s",
+		},
+	}
+	if err := agent.ServiceRegister(reg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	services, err := agent.Services()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if _, ok := services["foo"]; !ok {
+		t.Fatalf("missing service: %v", services)
+	}
+
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if _, ok := checks["service:foo"]; !ok {
+		t.Fatalf("missing check: %v", checks)
+	}
+
+	if err := agent.ServiceDeregister("foo"); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestAgent_ServiceAddress(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	reg1 := &AgentServiceRegistration{
+		Name:    "foo1",
+		Port:    8000,
+		Address: "192.168.0.42",
+	}
+	reg2 := &AgentServiceRegistration{
+		Name: "foo2",
+		Port: 8000,
+	}
+	if err := agent.ServiceRegister(reg1); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if err := agent.ServiceRegister(reg2); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	services, err := agent.Services()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if _, ok := services["foo1"]; !ok {
+		t.Fatalf("missing service: %v", services)
+	}
+	if _, ok := services["foo2"]; !ok {
+		t.Fatalf("missing service: %v", services)
+	}
+
+	if services["foo1"].Address != "192.168.0.42" {
+		t.Fatalf("missing Address field in service foo1: %v", services)
+	}
+	if services["foo2"].Address != "" {
+		t.Fatalf("missing Address field in service foo2: %v", services)
+	}
+
+	if err := agent.ServiceDeregister("foo"); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestAgent_Services_MultipleChecks(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	reg := &AgentServiceRegistration{
+		Name: "foo",
+		Tags: []string{"bar", "baz"},
+		Port: 8000,
+		Checks: AgentServiceChecks{
+			&AgentServiceCheck{
+				TTL: "15s",
+			},
+			&AgentServiceCheck{
+				TTL: "30s",
+			},
+		},
+	}
+	if err := agent.ServiceRegister(reg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	services, err := agent.Services()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if _, ok := services["foo"]; !ok {
+		t.Fatalf("missing service: %v", services)
+	}
+
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if _, ok := checks["service:foo:1"]; !ok {
+		t.Fatalf("missing check: %v", checks)
+	}
+	if _, ok := checks["service:foo:2"]; !ok {
+		t.Fatalf("missing check: %v", checks)
+	}
+}
+
+func TestAgent_SetTTLStatus(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	reg := &AgentServiceRegistration{
+		Name: "foo",
+		Check: &AgentServiceCheck{
+			TTL: "15s",
+		},
+	}
+	if err := agent.ServiceRegister(reg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if err := agent.WarnTTL("service:foo", "test"); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	chk, ok := checks["service:foo"]
+	if !ok {
+		t.Fatalf("missing check: %v", checks)
+	}
+	if chk.Status != "warning" {
+		t.Fatalf("Bad: %#v", chk)
+	}
+	if chk.Output != "test" {
+		t.Fatalf("Bad: %#v", chk)
+	}
+
+	if err := agent.ServiceDeregister("foo"); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestAgent_Checks(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	reg := &AgentCheckRegistration{
+		Name: "foo",
+	}
+	reg.TTL = "15s"
+	if err := agent.CheckRegister(reg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if _, ok := checks["foo"]; !ok {
+		t.Fatalf("missing check: %v", checks)
+	}
+
+	if err := agent.CheckDeregister("foo"); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestAgent_Checks_serviceBound(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	// First register a service
+	serviceReg := &AgentServiceRegistration{
+		Name: "redis",
+	}
+	if err := agent.ServiceRegister(serviceReg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Register a check bound to the service
+	reg := &AgentCheckRegistration{
+		Name:      "redischeck",
+		ServiceID: "redis",
+	}
+	reg.TTL = "15s"
+	if err := agent.CheckRegister(reg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	check, ok := checks["redischeck"]
+	if !ok {
+		t.Fatalf("missing check: %v", checks)
+	}
+	if check.ServiceID != "redis" {
+		t.Fatalf("missing service association for check: %v", check)
+	}
+}
+
+func TestAgent_Join(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	info, err := agent.Self()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Join ourself
+	addr := info["Config"]["AdvertiseAddr"].(string)
+	err = agent.Join(addr, false)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestAgent_ForceLeave(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	// Eject somebody
+	err := agent.ForceLeave("foo")
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestServiceMaintenance(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	// First register a service
+	serviceReg := &AgentServiceRegistration{
+		Name: "redis",
+	}
+	if err := agent.ServiceRegister(serviceReg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Enable maintenance mode
+	if err := agent.EnableServiceMaintenance("redis", "broken"); err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	// Ensure a critical check was added
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	found := false
+	for _, check := range checks {
+		if strings.Contains(check.CheckID, "maintenance") {
+			found = true
+			if check.Status != "critical" || check.Notes != "broken" {
+				t.Fatalf("bad: %#v", checks)
+			}
+		}
+	}
+	if !found {
+		t.Fatalf("bad: %#v", checks)
+	}
+
+	// Disable maintenance mode
+	if err := agent.DisableServiceMaintenance("redis"); err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	// Ensure the critical health check was removed
+	checks, err = agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	for _, check := range checks {
+		if strings.Contains(check.CheckID, "maintenance") {
+			t.Fatalf("should have removed health check")
+		}
+	}
+}
+
+func TestNodeMaintenance(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	// Enable maintenance mode
+	if err := agent.EnableNodeMaintenance("broken"); err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	// Check that a critical check was added
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	found := false
+	for _, check := range checks {
+		if strings.Contains(check.CheckID, "maintenance") {
+			found = true
+			if check.Status != "critical" || check.Notes != "broken" {
+				t.Fatalf("bad: %#v", checks)
+			}
+		}
+	}
+	if !found {
+		t.Fatalf("bad: %#v", checks)
+	}
+
+	// Disable maintenance mode
+	if err := agent.DisableNodeMaintenance(); err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	// Ensure the check was removed
+	checks, err = agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	for _, check := range checks {
+		if strings.Contains(check.CheckID, "maintenance") {
+			t.Fatalf("should have removed health check")
+		}
+	}
+}

+ 351 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go

@@ -0,0 +1,351 @@
+package api
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// QueryOptions are used to parameterize a query
+type QueryOptions struct {
+	// Providing a datacenter overwrites the DC provided
+	// by the Config
+	Datacenter string
+
+	// AllowStale allows any Consul server (non-leader) to service
+	// a read. This allows for lower latency and higher throughput
+	AllowStale bool
+
+	// RequireConsistent forces the read to be fully consistent.
+	// This is more expensive but prevents ever performing a stale
+	// read.
+	RequireConsistent bool
+
+	// WaitIndex is used to enable a blocking query. Waits
+	// until the timeout or the next index is reached
+	WaitIndex uint64
+
+	// WaitTime is used to bound the duration of a wait.
+	// Defaults to that of the Config, but can be overriden.
+	WaitTime time.Duration
+
+	// Token is used to provide a per-request ACL token
+	// which overrides the agent's default token.
+	Token string
+}
+
+// WriteOptions are used to parameterize a write
+type WriteOptions struct {
+	// Providing a datacenter overwrites the DC provided
+	// by the Config
+	Datacenter string
+
+	// Token is used to provide a per-request ACL token
+	// which overrides the agent's default token.
+	Token string
+}
+
+// QueryMeta is used to return meta data about a query
+type QueryMeta struct {
+	// LastIndex. This can be used as a WaitIndex to perform
+	// a blocking query
+	LastIndex uint64
+
+	// Time of last contact from the leader for the
+	// server servicing the request
+	LastContact time.Duration
+
+	// Is there a known leader
+	KnownLeader bool
+
+	// How long did the request take
+	RequestTime time.Duration
+}
+
+// WriteMeta is used to return meta data about a write
+type WriteMeta struct {
+	// How long did the request take
+	RequestTime time.Duration
+}
+
+// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
+type HttpBasicAuth struct {
+	// Username to use for HTTP Basic Authentication
+	Username string
+
+	// Password to use for HTTP Basic Authentication
+	Password string
+}
+
+// Config is used to configure the creation of a client
+type Config struct {
+	// Address is the address of the Consul server
+	Address string
+
+	// Scheme is the URI scheme for the Consul server
+	Scheme string
+
+	// Datacenter to use. If not provided, the default agent datacenter is used.
+	Datacenter string
+
+	// HttpClient is the client to use. Default will be
+	// used if not provided.
+	HttpClient *http.Client
+
+	// HttpAuth is the auth info to use for http access.
+	HttpAuth *HttpBasicAuth
+
+	// WaitTime limits how long a Watch will block. If not provided,
+	// the agent default values will be used.
+	WaitTime time.Duration
+
+	// Token is used to provide a per-request ACL token
+	// which overrides the agent's default token.
+	Token string
+}
+
+// DefaultConfig returns a default configuration for the client
+func DefaultConfig() *Config {
+	config := &Config{
+		Address:    "127.0.0.1:8500",
+		Scheme:     "http",
+		HttpClient: http.DefaultClient,
+	}
+
+	if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" {
+		config.Address = addr
+	}
+
+	return config
+}
+
+// Client provides a client to the Consul API
+type Client struct {
+	config Config
+}
+
+// NewClient returns a new client
+func NewClient(config *Config) (*Client, error) {
+	// bootstrap the config
+	defConfig := DefaultConfig()
+
+	if len(config.Address) == 0 {
+		config.Address = defConfig.Address
+	}
+
+	if len(config.Scheme) == 0 {
+		config.Scheme = defConfig.Scheme
+	}
+
+	if config.HttpClient == nil {
+		config.HttpClient = defConfig.HttpClient
+	}
+
+	if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 {
+		config.HttpClient = &http.Client{
+			Transport: &http.Transport{
+				Dial: func(_, _ string) (net.Conn, error) {
+					return net.Dial("unix", parts[1])
+				},
+			},
+		}
+		config.Address = parts[1]
+	}
+
+	client := &Client{
+		config: *config,
+	}
+	return client, nil
+}
+
+// request is used to help build up a request
+type request struct {
+	config *Config
+	method string
+	url    *url.URL
+	params url.Values
+	body   io.Reader
+	obj    interface{}
+}
+
+// setQueryOptions is used to annotate the request with
+// additional query options
+func (r *request) setQueryOptions(q *QueryOptions) {
+	if q == nil {
+		return
+	}
+	if q.Datacenter != "" {
+		r.params.Set("dc", q.Datacenter)
+	}
+	if q.AllowStale {
+		r.params.Set("stale", "")
+	}
+	if q.RequireConsistent {
+		r.params.Set("consistent", "")
+	}
+	if q.WaitIndex != 0 {
+		r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
+	}
+	if q.WaitTime != 0 {
+		r.params.Set("wait", durToMsec(q.WaitTime))
+	}
+	if q.Token != "" {
+		r.params.Set("token", q.Token)
+	}
+}
+
+// durToMsec converts a duration to a millisecond specified string
+func durToMsec(dur time.Duration) string {
+	return fmt.Sprintf("%dms", dur/time.Millisecond)
+}
+
+// setWriteOptions is used to annotate the request with
+// additional write options
+func (r *request) setWriteOptions(q *WriteOptions) {
+	if q == nil {
+		return
+	}
+	if q.Datacenter != "" {
+		r.params.Set("dc", q.Datacenter)
+	}
+	if q.Token != "" {
+		r.params.Set("token", q.Token)
+	}
+}
+
+// toHTTP converts the request to an HTTP request
+func (r *request) toHTTP() (*http.Request, error) {
+	// Encode the query parameters
+	r.url.RawQuery = r.params.Encode()
+
+	// Check if we should encode the body
+	if r.body == nil && r.obj != nil {
+		if b, err := encodeBody(r.obj); err != nil {
+			return nil, err
+		} else {
+			r.body = b
+		}
+	}
+
+	// Create the HTTP request
+	req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
+	if err != nil {
+		return nil, err
+	}
+
+	req.URL.Host = r.url.Host
+	req.URL.Scheme = r.url.Scheme
+	req.Host = r.url.Host
+
+	// Setup auth
+	if r.config.HttpAuth != nil {
+		req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
+	}
+
+	return req, nil
+}
+
+// newRequest is used to create a new request
+func (c *Client) newRequest(method, path string) *request {
+	r := &request{
+		config: &c.config,
+		method: method,
+		url: &url.URL{
+			Scheme: c.config.Scheme,
+			Host:   c.config.Address,
+			Path:   path,
+		},
+		params: make(map[string][]string),
+	}
+	if c.config.Datacenter != "" {
+		r.params.Set("dc", c.config.Datacenter)
+	}
+	if c.config.WaitTime != 0 {
+		r.params.Set("wait", durToMsec(r.config.WaitTime))
+	}
+	if c.config.Token != "" {
+		r.params.Set("token", r.config.Token)
+	}
+	return r
+}
+
+// doRequest runs a request with our client
+func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
+	req, err := r.toHTTP()
+	if err != nil {
+		return 0, nil, err
+	}
+	start := time.Now()
+	resp, err := c.config.HttpClient.Do(req)
+	diff := time.Now().Sub(start)
+	return diff, resp, err
+}
+
+// parseQueryMeta is used to help parse query meta-data
+func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
+	header := resp.Header
+
+	// Parse the X-Consul-Index
+	index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64)
+	if err != nil {
+		return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
+	}
+	q.LastIndex = index
+
+	// Parse the X-Consul-LastContact
+	last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
+	if err != nil {
+		return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
+	}
+	q.LastContact = time.Duration(last) * time.Millisecond
+
+	// Parse the X-Consul-KnownLeader
+	switch header.Get("X-Consul-KnownLeader") {
+	case "true":
+		q.KnownLeader = true
+	default:
+		q.KnownLeader = false
+	}
+	return nil
+}
+
+// decodeBody is used to JSON decode a body
+func decodeBody(resp *http.Response, out interface{}) error {
+	dec := json.NewDecoder(resp.Body)
+	return dec.Decode(out)
+}
+
+// encodeBody is used to encode a request body
+func encodeBody(obj interface{}) (io.Reader, error) {
+	buf := bytes.NewBuffer(nil)
+	enc := json.NewEncoder(buf)
+	if err := enc.Encode(obj); err != nil {
+		return nil, err
+	}
+	return buf, nil
+}
+
+// requireOK is used to wrap doRequest and check for a 200
+func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
+	if e != nil {
+		if resp != nil {
+			resp.Body.Close()
+		}
+		return d, nil, e
+	}
+	if resp.StatusCode != 200 {
+		var buf bytes.Buffer
+		io.Copy(&buf, resp.Body)
+		resp.Body.Close()
+		return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
+	}
+	return d, resp, nil
+}

+ 339 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go

@@ -0,0 +1,339 @@
+package api
+
+import (
+	crand "crypto/rand"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"runtime"
+	"testing"
+	"time"
+
+	"github.com/hashicorp/consul/testutil"
+)
+
+var consulConfig = `{
+	"ports": {
+		"dns": 19000,
+		"http": 18800,
+		"rpc": 18600,
+		"serf_lan": 18200,
+		"serf_wan": 18400,
+		"server": 18000
+	},
+	"bind_addr": "127.0.0.1",
+	"data_dir": "%s",
+	"bootstrap": true,
+	"log_level": "debug",
+	"server": true
+}`
+
+type testServer struct {
+	pid        int
+	dataDir    string
+	configFile string
+}
+
+type testPortConfig struct {
+	DNS     int `json:"dns,omitempty"`
+	HTTP    int `json:"http,omitempty"`
+	RPC     int `json:"rpc,omitempty"`
+	SerfLan int `json:"serf_lan,omitempty"`
+	SerfWan int `json:"serf_wan,omitempty"`
+	Server  int `json:"server,omitempty"`
+}
+
+type testAddressConfig struct {
+	HTTP string `json:"http,omitempty"`
+}
+
+type testServerConfig struct {
+	Bootstrap bool               `json:"bootstrap,omitempty"`
+	Server    bool               `json:"server,omitempty"`
+	DataDir   string             `json:"data_dir,omitempty"`
+	LogLevel  string             `json:"log_level,omitempty"`
+	Addresses *testAddressConfig `json:"addresses,omitempty"`
+	Ports     testPortConfig     `json:"ports,omitempty"`
+}
+
+// Callback functions for modifying config
+type configCallback func(c *Config)
+type serverConfigCallback func(c *testServerConfig)
+
+func defaultConfig() *testServerConfig {
+	return &testServerConfig{
+		Bootstrap: true,
+		Server:    true,
+		LogLevel:  "debug",
+		Ports: testPortConfig{
+			DNS:     19000,
+			HTTP:    18800,
+			RPC:     18600,
+			SerfLan: 18200,
+			SerfWan: 18400,
+			Server:  18000,
+		},
+	}
+}
+
+func (s *testServer) stop() {
+	defer os.RemoveAll(s.dataDir)
+	defer os.RemoveAll(s.configFile)
+
+	cmd := exec.Command("kill", "-9", fmt.Sprintf("%d", s.pid))
+	if err := cmd.Run(); err != nil {
+		panic(err)
+	}
+}
+
+func newTestServer(t *testing.T) *testServer {
+	return newTestServerWithConfig(t, func(c *testServerConfig) {})
+}
+
+func newTestServerWithConfig(t *testing.T, cb serverConfigCallback) *testServer {
+	if path, err := exec.LookPath("consul"); err != nil || path == "" {
+		t.Log("consul not found on $PATH, skipping")
+		t.SkipNow()
+	}
+
+	pidFile, err := ioutil.TempFile("", "consul")
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	pidFile.Close()
+	os.Remove(pidFile.Name())
+
+	dataDir, err := ioutil.TempDir("", "consul")
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	configFile, err := ioutil.TempFile("", "consul")
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	consulConfig := defaultConfig()
+	consulConfig.DataDir = dataDir
+
+	cb(consulConfig)
+
+	configContent, err := json.Marshal(consulConfig)
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	if _, err := configFile.Write(configContent); err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	configFile.Close()
+
+	// Start the server
+	cmd := exec.Command("consul", "agent", "-config-file", configFile.Name())
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	if err := cmd.Start(); err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	return &testServer{
+		pid:        cmd.Process.Pid,
+		dataDir:    dataDir,
+		configFile: configFile.Name(),
+	}
+}
+
+func makeClient(t *testing.T) (*Client, *testServer) {
+	return makeClientWithConfig(t, func(c *Config) {
+		c.Address = "127.0.0.1:18800"
+	}, func(c *testServerConfig) {})
+}
+
+func makeClientWithConfig(t *testing.T, cb1 configCallback, cb2 serverConfigCallback) (*Client, *testServer) {
+	// Make client config
+	conf := DefaultConfig()
+	cb1(conf)
+
+	// Create client
+	client, err := NewClient(conf)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Create server
+	server := newTestServerWithConfig(t, cb2)
+
+	// Allow the server some time to start, and verify we have a leader.
+	testutil.WaitForResult(func() (bool, error) {
+		req := client.newRequest("GET", "/v1/catalog/nodes")
+		_, resp, err := client.doRequest(req)
+		if err != nil {
+			return false, err
+		}
+		resp.Body.Close()
+
+		// Ensure we have a leader and a node registeration
+		if leader := resp.Header.Get("X-Consul-KnownLeader"); leader != "true" {
+			return false, fmt.Errorf("Consul leader status: %#v", leader)
+		}
+		if resp.Header.Get("X-Consul-Index") == "0" {
+			return false, fmt.Errorf("Consul index is 0")
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+
+	return client, server
+}
+
+func testKey() string {
+	buf := make([]byte, 16)
+	if _, err := crand.Read(buf); err != nil {
+		panic(fmt.Errorf("Failed to read random bytes: %v", err))
+	}
+
+	return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
+		buf[0:4],
+		buf[4:6],
+		buf[6:8],
+		buf[8:10],
+		buf[10:16])
+}
+
+func TestSetQueryOptions(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	r := c.newRequest("GET", "/v1/kv/foo")
+	q := &QueryOptions{
+		Datacenter:        "foo",
+		AllowStale:        true,
+		RequireConsistent: true,
+		WaitIndex:         1000,
+		WaitTime:          100 * time.Second,
+		Token:             "12345",
+	}
+	r.setQueryOptions(q)
+
+	if r.params.Get("dc") != "foo" {
+		t.Fatalf("bad: %v", r.params)
+	}
+	if _, ok := r.params["stale"]; !ok {
+		t.Fatalf("bad: %v", r.params)
+	}
+	if _, ok := r.params["consistent"]; !ok {
+		t.Fatalf("bad: %v", r.params)
+	}
+	if r.params.Get("index") != "1000" {
+		t.Fatalf("bad: %v", r.params)
+	}
+	if r.params.Get("wait") != "100000ms" {
+		t.Fatalf("bad: %v", r.params)
+	}
+	if r.params.Get("token") != "12345" {
+		t.Fatalf("bad: %v", r.params)
+	}
+}
+
+func TestSetWriteOptions(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	r := c.newRequest("GET", "/v1/kv/foo")
+	q := &WriteOptions{
+		Datacenter: "foo",
+		Token:      "23456",
+	}
+	r.setWriteOptions(q)
+
+	if r.params.Get("dc") != "foo" {
+		t.Fatalf("bad: %v", r.params)
+	}
+	if r.params.Get("token") != "23456" {
+		t.Fatalf("bad: %v", r.params)
+	}
+}
+
+func TestRequestToHTTP(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	r := c.newRequest("DELETE", "/v1/kv/foo")
+	q := &QueryOptions{
+		Datacenter: "foo",
+	}
+	r.setQueryOptions(q)
+	req, err := r.toHTTP()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if req.Method != "DELETE" {
+		t.Fatalf("bad: %v", req)
+	}
+	if req.URL.RequestURI() != "/v1/kv/foo?dc=foo" {
+		t.Fatalf("bad: %v", req)
+	}
+}
+
+func TestParseQueryMeta(t *testing.T) {
+	resp := &http.Response{
+		Header: make(map[string][]string),
+	}
+	resp.Header.Set("X-Consul-Index", "12345")
+	resp.Header.Set("X-Consul-LastContact", "80")
+	resp.Header.Set("X-Consul-KnownLeader", "true")
+
+	qm := &QueryMeta{}
+	if err := parseQueryMeta(resp, qm); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if qm.LastIndex != 12345 {
+		t.Fatalf("Bad: %v", qm)
+	}
+	if qm.LastContact != 80*time.Millisecond {
+		t.Fatalf("Bad: %v", qm)
+	}
+	if !qm.KnownLeader {
+		t.Fatalf("Bad: %v", qm)
+	}
+}
+
+func TestAPI_UnixSocket(t *testing.T) {
+	if runtime.GOOS == "windows" {
+		t.SkipNow()
+	}
+
+	tempDir, err := ioutil.TempDir("", "consul")
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	defer os.RemoveAll(tempDir)
+	socket := filepath.Join(tempDir, "test.sock")
+
+	c, s := makeClientWithConfig(t, func(c *Config) {
+		c.Address = "unix://" + socket
+	}, func(c *testServerConfig) {
+		c.Addresses = &testAddressConfig{
+			HTTP: "unix://" + socket,
+		}
+	})
+	defer s.stop()
+
+	agent := c.Agent()
+
+	info, err := agent.Self()
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	if info["Config"]["NodeName"] == "" {
+		t.Fatalf("bad: %v", info)
+	}
+}

+ 181 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go

@@ -0,0 +1,181 @@
+package api
+
+type Node struct {
+	Node    string
+	Address string
+}
+
+type CatalogService struct {
+	Node        string
+	Address     string
+	ServiceID   string
+	ServiceName string
+	ServiceTags []string
+	ServicePort int
+}
+
+type CatalogNode struct {
+	Node     *Node
+	Services map[string]*AgentService
+}
+
+type CatalogRegistration struct {
+	Node       string
+	Address    string
+	Datacenter string
+	Service    *AgentService
+	Check      *AgentCheck
+}
+
+type CatalogDeregistration struct {
+	Node       string
+	Address    string
+	Datacenter string
+	ServiceID  string
+	CheckID    string
+}
+
+// Catalog can be used to query the Catalog endpoints
+type Catalog struct {
+	c *Client
+}
+
+// Catalog returns a handle to the catalog endpoints
+func (c *Client) Catalog() *Catalog {
+	return &Catalog{c}
+}
+
+func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
+	r := c.c.newRequest("PUT", "/v1/catalog/register")
+	r.setWriteOptions(q)
+	r.obj = reg
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	return wm, nil
+}
+
+func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
+	r := c.c.newRequest("PUT", "/v1/catalog/deregister")
+	r.setWriteOptions(q)
+	r.obj = dereg
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	return wm, nil
+}
+
+// Datacenters is used to query for all the known datacenters
+func (c *Catalog) Datacenters() ([]string, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/datacenters")
+	_, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []string
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Nodes is used to query all the known nodes
+func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/nodes")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*Node
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Services is used to query for all known services
+func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/services")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out map[string][]string
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Service is used to query catalog entries for a given service
+func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/service/"+service)
+	r.setQueryOptions(q)
+	if tag != "" {
+		r.params.Set("tag", tag)
+	}
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*CatalogService
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Node is used to query for service information about a single node
+func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out *CatalogNode
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}

+ 273 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go

@@ -0,0 +1,273 @@
+package api
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/hashicorp/consul/testutil"
+)
+
+func TestCatalog_Datacenters(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	catalog := c.Catalog()
+
+	testutil.WaitForResult(func() (bool, error) {
+		datacenters, err := catalog.Datacenters()
+		if err != nil {
+			return false, err
+		}
+
+		if len(datacenters) == 0 {
+			return false, fmt.Errorf("Bad: %v", datacenters)
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestCatalog_Nodes(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	catalog := c.Catalog()
+
+	testutil.WaitForResult(func() (bool, error) {
+		nodes, meta, err := catalog.Nodes(nil)
+		if err != nil {
+			return false, err
+		}
+
+		if meta.LastIndex == 0 {
+			return false, fmt.Errorf("Bad: %v", meta)
+		}
+
+		if len(nodes) == 0 {
+			return false, fmt.Errorf("Bad: %v", nodes)
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestCatalog_Services(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	catalog := c.Catalog()
+
+	testutil.WaitForResult(func() (bool, error) {
+		services, meta, err := catalog.Services(nil)
+		if err != nil {
+			return false, err
+		}
+
+		if meta.LastIndex == 0 {
+			return false, fmt.Errorf("Bad: %v", meta)
+		}
+
+		if len(services) == 0 {
+			return false, fmt.Errorf("Bad: %v", services)
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestCatalog_Service(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	catalog := c.Catalog()
+
+	testutil.WaitForResult(func() (bool, error) {
+		services, meta, err := catalog.Service("consul", "", nil)
+		if err != nil {
+			return false, err
+		}
+
+		if meta.LastIndex == 0 {
+			return false, fmt.Errorf("Bad: %v", meta)
+		}
+
+		if len(services) == 0 {
+			return false, fmt.Errorf("Bad: %v", services)
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestCatalog_Node(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	catalog := c.Catalog()
+	name, _ := c.Agent().NodeName()
+
+	testutil.WaitForResult(func() (bool, error) {
+		info, meta, err := catalog.Node(name, nil)
+		if err != nil {
+			return false, err
+		}
+
+		if meta.LastIndex == 0 {
+			return false, fmt.Errorf("Bad: %v", meta)
+		}
+		if len(info.Services) == 0 {
+			return false, fmt.Errorf("Bad: %v", info)
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestCatalog_Registration(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	catalog := c.Catalog()
+
+	service := &AgentService{
+		ID:      "redis1",
+		Service: "redis",
+		Tags:    []string{"master", "v1"},
+		Port:    8000,
+	}
+
+	check := &AgentCheck{
+		Node:      "foobar",
+		CheckID:   "service:redis1",
+		Name:      "Redis health check",
+		Notes:     "Script based health check",
+		Status:    "passing",
+		ServiceID: "redis1",
+	}
+
+	reg := &CatalogRegistration{
+		Datacenter: "dc1",
+		Node:       "foobar",
+		Address:    "192.168.10.10",
+		Service:    service,
+		Check:      check,
+	}
+
+	testutil.WaitForResult(func() (bool, error) {
+		if _, err := catalog.Register(reg, nil); err != nil {
+			return false, err
+		}
+
+		node, _, err := catalog.Node("foobar", nil)
+		if err != nil {
+			return false, err
+		}
+
+		if _, ok := node.Services["redis1"]; !ok {
+			return false, fmt.Errorf("missing service: redis1")
+		}
+
+		health, _, err := c.Health().Node("foobar", nil)
+		if err != nil {
+			return false, err
+		}
+
+		if health[0].CheckID != "service:redis1" {
+			return false, fmt.Errorf("missing checkid service:redis1")
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+
+	// Test catalog deregistration of the previously registered service
+	dereg := &CatalogDeregistration{
+		Datacenter: "dc1",
+		Node:       "foobar",
+		Address:    "192.168.10.10",
+		ServiceID:  "redis1",
+	}
+
+	if _, err := catalog.Deregister(dereg, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	testutil.WaitForResult(func() (bool, error) {
+		node, _, err := catalog.Node("foobar", nil)
+		if err != nil {
+			return false, err
+		}
+
+		if _, ok := node.Services["redis1"]; ok {
+			return false, fmt.Errorf("ServiceID:redis1 is not deregistered")
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+
+	// Test deregistration of the previously registered check
+	dereg = &CatalogDeregistration{
+		Datacenter: "dc1",
+		Node:       "foobar",
+		Address:    "192.168.10.10",
+		CheckID:    "service:redis1",
+	}
+
+	if _, err := catalog.Deregister(dereg, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	testutil.WaitForResult(func() (bool, error) {
+		health, _, err := c.Health().Node("foobar", nil)
+		if err != nil {
+			return false, err
+		}
+
+		if len(health) != 0 {
+			return false, fmt.Errorf("CheckID:service:redis1 is not deregistered")
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+
+	// Test node deregistration of the previously registered node
+	dereg = &CatalogDeregistration{
+		Datacenter: "dc1",
+		Node:       "foobar",
+		Address:    "192.168.10.10",
+	}
+
+	if _, err := catalog.Deregister(dereg, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	testutil.WaitForResult(func() (bool, error) {
+		node, _, err := catalog.Node("foobar", nil)
+		if err != nil {
+			return false, err
+		}
+
+		if node != nil {
+			return false, fmt.Errorf("node is not deregistered: %v", node)
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}

+ 104 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go

@@ -0,0 +1,104 @@
+package api
+
+import (
+	"bytes"
+	"strconv"
+)
+
+// Event can be used to query the Event endpoints
+type Event struct {
+	c *Client
+}
+
+// UserEvent represents an event that was fired by the user
+type UserEvent struct {
+	ID            string
+	Name          string
+	Payload       []byte
+	NodeFilter    string
+	ServiceFilter string
+	TagFilter     string
+	Version       int
+	LTime         uint64
+}
+
+// Event returns a handle to the event endpoints
+func (c *Client) Event() *Event {
+	return &Event{c}
+}
+
+// Fire is used to fire a new user event. Only the Name, Payload and Filters
+// are respected. This returns the ID or an associated error. Cross DC requests
+// are supported.
+func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
+	r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
+	r.setWriteOptions(q)
+	if params.NodeFilter != "" {
+		r.params.Set("node", params.NodeFilter)
+	}
+	if params.ServiceFilter != "" {
+		r.params.Set("service", params.ServiceFilter)
+	}
+	if params.TagFilter != "" {
+		r.params.Set("tag", params.TagFilter)
+	}
+	if params.Payload != nil {
+		r.body = bytes.NewReader(params.Payload)
+	}
+
+	rtt, resp, err := requireOK(e.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out UserEvent
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// List is used to get the most recent events an agent has received.
+// This list can be optionally filtered by the name. This endpoint supports
+// quasi-blocking queries. The index is not monotonic, nor does it provide provide
+// LastContact or KnownLeader.
+func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
+	r := e.c.newRequest("GET", "/v1/event/list")
+	r.setQueryOptions(q)
+	if name != "" {
+		r.params.Set("name", name)
+	}
+	rtt, resp, err := requireOK(e.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*UserEvent
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// IDToIndex is a bit of a hack. This simulates the index generation to
+// convert an event ID into a WaitIndex.
+func (e *Event) IDToIndex(uuid string) uint64 {
+	lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
+	upper := uuid[19:23] + uuid[24:36]
+	lowVal, err := strconv.ParseUint(lower, 16, 64)
+	if err != nil {
+		panic("Failed to convert " + lower)
+	}
+	highVal, err := strconv.ParseUint(upper, 16, 64)
+	if err != nil {
+		panic("Failed to convert " + upper)
+	}
+	return lowVal ^ highVal
+}

+ 39 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go

@@ -0,0 +1,39 @@
+package api
+
+import (
+	"testing"
+)
+
+func TestEvent_FireList(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	event := c.Event()
+
+	params := &UserEvent{Name: "foo"}
+	id, meta, err := event.Fire(params, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if meta.RequestTime == 0 {
+		t.Fatalf("bad: %v", meta)
+	}
+
+	if id == "" {
+		t.Fatalf("invalid: %v", id)
+	}
+
+	events, qm, err := event.List("", nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if qm.LastIndex != event.IDToIndex(id) {
+		t.Fatalf("Bad: %#v", qm)
+	}
+
+	if events[len(events)-1].ID != id {
+		t.Fatalf("bad: %#v", events)
+	}
+}

+ 136 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go

@@ -0,0 +1,136 @@
+package api
+
+import (
+	"fmt"
+)
+
+// HealthCheck is used to represent a single check
+type HealthCheck struct {
+	Node        string
+	CheckID     string
+	Name        string
+	Status      string
+	Notes       string
+	Output      string
+	ServiceID   string
+	ServiceName string
+}
+
+// ServiceEntry is used for the health service endpoint
+type ServiceEntry struct {
+	Node    *Node
+	Service *AgentService
+	Checks  []*HealthCheck
+}
+
+// Health can be used to query the Health endpoints
+type Health struct {
+	c *Client
+}
+
+// Health returns a handle to the health endpoints
+func (c *Client) Health() *Health {
+	return &Health{c}
+}
+
+// Node is used to query for checks belonging to a given node
+func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/health/node/"+node)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*HealthCheck
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Checks is used to return the checks associated with a service
+func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/health/checks/"+service)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*HealthCheck
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Service is used to query health information along with service info
+// for a given service. It can optionally do server-side filtering on a tag
+// or nodes with passing health checks only.
+func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/health/service/"+service)
+	r.setQueryOptions(q)
+	if tag != "" {
+		r.params.Set("tag", tag)
+	}
+	if passingOnly {
+		r.params.Set("passing", "1")
+	}
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*ServiceEntry
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// State is used to retreive all the checks in a given state.
+// The wildcard "any" state can also be used for all checks.
+func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
+	switch state {
+	case "any":
+	case "warning":
+	case "critical":
+	case "passing":
+	case "unknown":
+	default:
+		return nil, nil, fmt.Errorf("Unsupported state: %v", state)
+	}
+	r := h.c.newRequest("GET", "/v1/health/state/"+state)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*HealthCheck
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}

Some files were not shown because too many files changed in this diff