ソースを参照

Updating Godep to reuse Swarm discovery and store packages

Signed-off-by: Madhu Venugopal <madhu@docker.com>
Madhu Venugopal 10 年 前
コミット
c46a023902
89 ファイル変更13765 行追加0 行削除
  1. 24 0
      libnetwork/Godeps/Godeps.json
  2. 23 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child.go
  3. 73 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go
  4. 481 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go
  5. 108 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go
  6. 37 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go
  7. 34 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete.go
  8. 46 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go
  9. 36 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap.go
  10. 57 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go
  11. 55 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug.go
  12. 28 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go
  13. 40 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete.go
  14. 81 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go
  15. 49 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/error.go
  16. 32 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get.go
  17. 131 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go
  18. 30 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member.go
  19. 71 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member_test.go
  20. 72 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/options.go
  21. 403 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go
  22. 22 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests_test.go
  23. 89 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.go
  24. 42 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go
  25. 137 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create.go
  26. 241 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go
  27. 6 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go
  28. 103 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch.go
  29. 119 0
      libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go
  30. 274 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/README.md
  31. 106 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery.go
  32. 54 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery_test.go
  33. 71 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file.go
  34. 46 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file_test.go
  35. 35 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator.go
  36. 55 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator_test.go
  37. 92 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv.go
  38. 20 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv_test.go
  39. 45 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes.go
  40. 31 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes_test.go
  41. 31 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/README.md
  42. 104 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token.go
  43. 36 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token_test.go
  44. 79 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/README.md
  45. 301 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/consul.go
  46. 264 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/etcd.go
  47. 51 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/helpers.go
  48. 92 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/store.go
  49. 60 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/structs.go
  50. 213 0
      libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/zookeeper.go
  51. 39 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md
  52. 140 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go
  53. 148 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go
  54. 331 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go
  55. 404 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go
  56. 351 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go
  57. 339 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go
  58. 181 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go
  59. 273 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go
  60. 104 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go
  61. 39 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go
  62. 136 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go
  63. 121 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go
  64. 236 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go
  65. 431 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go
  66. 321 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go
  67. 289 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go
  68. 482 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go
  69. 306 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go
  70. 245 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go
  71. 200 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go
  72. 43 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go
  73. 35 0
      libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go
  74. 166 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/cluster_test.go
  75. 844 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/conn.go
  76. 242 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants.go
  77. 24 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants_test.go
  78. 288 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw.go
  79. 367 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw_test.go
  80. 131 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock.go
  81. 94 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock_test.go
  82. 119 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/server_help.go
  83. 136 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/server_java.go
  84. 633 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs.go
  85. 60 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs_test.go
  86. 148 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/tracer.go
  87. 54 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util.go
  88. 17 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util_test.go
  89. 518 0
      libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/zk_test.go

+ 24 - 0
libnetwork/Godeps/Godeps.json

@@ -10,6 +10,11 @@
 			"Comment": "v0.6.4-12-g467d9d5",
 			"Comment": "v0.6.4-12-g467d9d5",
 			"Rev": "467d9d55c2d2c17248441a8fc661561161f40d5e"
 			"Rev": "467d9d55c2d2c17248441a8fc661561161f40d5e"
 		},
 		},
+		{
+			"ImportPath": "github.com/coreos/go-etcd/etcd",
+			"Comment": "v2.0.0-7-g73a8ef7",
+			"Rev": "73a8ef737e8ea002281a28b4cb92a1de121ad4c6"
+		},
 		{
 		{
 			"ImportPath": "github.com/docker/docker/pkg/homedir",
 			"ImportPath": "github.com/docker/docker/pkg/homedir",
 			"Comment": "v1.4.1-3479-ga9172f5",
 			"Comment": "v1.4.1-3479-ga9172f5",
@@ -60,6 +65,16 @@
 			"Comment": "v1.4.0-495-g3e66118",
 			"Comment": "v1.4.0-495-g3e66118",
 			"Rev": "3e661186ba24f259d3860f067df052c7f6904bee"
 			"Rev": "3e661186ba24f259d3860f067df052c7f6904bee"
 		},
 		},
+		{
+			"ImportPath": "github.com/docker/swarm/discovery",
+			"Comment": "v0.2.0-333-g54dfabd",
+			"Rev": "54dfabd2521314de1c5b036f6c609efbe09df4ea"
+		},
+		{
+			"ImportPath": "github.com/docker/swarm/pkg/store",
+			"Comment": "v0.2.0-333-g54dfabd",
+			"Rev": "54dfabd2521314de1c5b036f6c609efbe09df4ea"
+		},
 		{
 		{
 			"ImportPath": "github.com/godbus/dbus",
 			"ImportPath": "github.com/godbus/dbus",
 			"Comment": "v2-3-g4160802",
 			"Comment": "v2-3-g4160802",
@@ -72,6 +87,15 @@
 		{
 		{
 			"ImportPath": "github.com/gorilla/mux",
 			"ImportPath": "github.com/gorilla/mux",
 			"Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5"
 			"Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5"
+                },
+                {
+			"ImportPath": "github.com/hashicorp/consul/api",
+			"Comment": "v0.5.0rc1-66-g954aec6",
+			"Rev": "954aec66231b79c161a4122b023fbcad13047f79"
+		},
+		{
+			"ImportPath": "github.com/samuel/go-zookeeper/zk",
+			"Rev": "d0e0d8e11f318e000a8cc434616d69e329edc374"
 		},
 		},
 		{
 		{
 			"ImportPath": "github.com/vishvananda/netlink",
 			"ImportPath": "github.com/vishvananda/netlink",

+ 23 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child.go

@@ -0,0 +1,23 @@
+package etcd
+
+// Add a new directory with a random etcd-generated key under the given path.
+func (c *Client) AddChildDir(key string, ttl uint64) (*Response, error) {
+	raw, err := c.post(key, "", ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// Add a new file with a random etcd-generated key under the given path.
+func (c *Client) AddChild(key string, value string, ttl uint64) (*Response, error) {
+	raw, err := c.post(key, value, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}

+ 73 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/add_child_test.go

@@ -0,0 +1,73 @@
+package etcd
+
+import "testing"
+
+func TestAddChild(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("fooDir", true)
+		c.Delete("nonexistentDir", true)
+	}()
+
+	c.CreateDir("fooDir", 5)
+
+	_, err := c.AddChild("fooDir", "v0", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = c.AddChild("fooDir", "v1", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.Get("fooDir", true, false)
+	// The child with v0 should proceed the child with v1 because it's added
+	// earlier, so it should have a lower key.
+	if !(len(resp.Node.Nodes) == 2 && (resp.Node.Nodes[0].Value == "v0" && resp.Node.Nodes[1].Value == "v1")) {
+		t.Fatalf("AddChild 1 failed.  There should be two chlidren whose values are v0 and v1, respectively."+
+			"  The response was: %#v", resp)
+	}
+
+	// Creating a child under a nonexistent directory should succeed.
+	// The directory should be created.
+	resp, err = c.AddChild("nonexistentDir", "foo", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestAddChildDir(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("fooDir", true)
+		c.Delete("nonexistentDir", true)
+	}()
+
+	c.CreateDir("fooDir", 5)
+
+	_, err := c.AddChildDir("fooDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = c.AddChildDir("fooDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	resp, err := c.Get("fooDir", true, false)
+	// The child with v0 should proceed the child with v1 because it's added
+	// earlier, so it should have a lower key.
+	if !(len(resp.Node.Nodes) == 2 && (len(resp.Node.Nodes[0].Nodes) == 0 && len(resp.Node.Nodes[1].Nodes) == 0)) {
+		t.Fatalf("AddChildDir 1 failed.  There should be two chlidren whose values are v0 and v1, respectively."+
+			"  The response was: %#v", resp)
+	}
+
+	// Creating a child under a nonexistent directory should succeed.
+	// The directory should be created.
+	resp, err = c.AddChildDir("nonexistentDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+}

+ 481 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go

@@ -0,0 +1,481 @@
+package etcd
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/json"
+	"errors"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"path"
+	"strings"
+	"time"
+)
+
+// See SetConsistency for how to use these constants.
+const (
+	// Using strings rather than iota because the consistency level
+	// could be persisted to disk, so it'd be better to use
+	// human-readable values.
+	STRONG_CONSISTENCY = "STRONG"
+	WEAK_CONSISTENCY   = "WEAK"
+)
+
+const (
+	defaultBufferSize = 10
+)
+
+func init() {
+	rand.Seed(int64(time.Now().Nanosecond()))
+}
+
+type Config struct {
+	CertFile    string        `json:"certFile"`
+	KeyFile     string        `json:"keyFile"`
+	CaCertFile  []string      `json:"caCertFiles"`
+	DialTimeout time.Duration `json:"timeout"`
+	Consistency string        `json:"consistency"`
+}
+
+type credentials struct {
+	username string
+	password string
+}
+
+type Client struct {
+	config      Config   `json:"config"`
+	cluster     *Cluster `json:"cluster"`
+	httpClient  *http.Client
+	credentials *credentials
+	transport   *http.Transport
+	persistence io.Writer
+	cURLch      chan string
+	// CheckRetry can be used to control the policy for failed requests
+	// and modify the cluster if needed.
+	// The client calls it before sending requests again, and
+	// stops retrying if CheckRetry returns some error. The cases that
+	// this function needs to handle include no response and unexpected
+	// http status code of response.
+	// If CheckRetry is nil, client will call the default one
+	// `DefaultCheckRetry`.
+	// Argument cluster is the etcd.Cluster object that these requests have been made on.
+	// Argument numReqs is the number of http.Requests that have been made so far.
+	// Argument lastResp is the http.Responses from the last request.
+	// Argument err is the reason of the failure.
+	CheckRetry func(cluster *Cluster, numReqs int,
+		lastResp http.Response, err error) error
+}
+
+// NewClient create a basic client that is configured to be used
+// with the given machine list.
+func NewClient(machines []string) *Client {
+	config := Config{
+		// default timeout is one second
+		DialTimeout: time.Second,
+		Consistency: WEAK_CONSISTENCY,
+	}
+
+	client := &Client{
+		cluster: NewCluster(machines),
+		config:  config,
+	}
+
+	client.initHTTPClient()
+	client.saveConfig()
+
+	return client
+}
+
+// NewTLSClient create a basic client with TLS configuration
+func NewTLSClient(machines []string, cert, key, caCert string) (*Client, error) {
+	// overwrite the default machine to use https
+	if len(machines) == 0 {
+		machines = []string{"https://127.0.0.1:4001"}
+	}
+
+	config := Config{
+		// default timeout is one second
+		DialTimeout: time.Second,
+		Consistency: WEAK_CONSISTENCY,
+		CertFile:    cert,
+		KeyFile:     key,
+		CaCertFile:  make([]string, 0),
+	}
+
+	client := &Client{
+		cluster: NewCluster(machines),
+		config:  config,
+	}
+
+	err := client.initHTTPSClient(cert, key)
+	if err != nil {
+		return nil, err
+	}
+
+	err = client.AddRootCA(caCert)
+
+	client.saveConfig()
+
+	return client, nil
+}
+
+// NewClientFromFile creates a client from a given file path.
+// The given file is expected to use the JSON format.
+func NewClientFromFile(fpath string) (*Client, error) {
+	fi, err := os.Open(fpath)
+	if err != nil {
+		return nil, err
+	}
+
+	defer func() {
+		if err := fi.Close(); err != nil {
+			panic(err)
+		}
+	}()
+
+	return NewClientFromReader(fi)
+}
+
+// NewClientFromReader creates a Client configured from a given reader.
+// The configuration is expected to use the JSON format.
+func NewClientFromReader(reader io.Reader) (*Client, error) {
+	c := new(Client)
+
+	b, err := ioutil.ReadAll(reader)
+	if err != nil {
+		return nil, err
+	}
+
+	err = json.Unmarshal(b, c)
+	if err != nil {
+		return nil, err
+	}
+	if c.config.CertFile == "" {
+		c.initHTTPClient()
+	} else {
+		err = c.initHTTPSClient(c.config.CertFile, c.config.KeyFile)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	for _, caCert := range c.config.CaCertFile {
+		if err := c.AddRootCA(caCert); err != nil {
+			return nil, err
+		}
+	}
+
+	return c, nil
+}
+
+// Override the Client's HTTP Transport object
+func (c *Client) SetTransport(tr *http.Transport) {
+	c.httpClient.Transport = tr
+	c.transport = tr
+}
+
+func (c *Client) SetCredentials(username, password string) {
+	c.credentials = &credentials{username, password}
+}
+
+func (c *Client) Close() {
+	c.transport.DisableKeepAlives = true
+	c.transport.CloseIdleConnections()
+}
+
+// initHTTPClient initializes a HTTP client for etcd client
+func (c *Client) initHTTPClient() {
+	c.transport = &http.Transport{
+		Dial: c.dial,
+		TLSClientConfig: &tls.Config{
+			InsecureSkipVerify: true,
+		},
+	}
+	c.httpClient = &http.Client{Transport: c.transport}
+}
+
+// initHTTPClient initializes a HTTPS client for etcd client
+func (c *Client) initHTTPSClient(cert, key string) error {
+	if cert == "" || key == "" {
+		return errors.New("Require both cert and key path")
+	}
+
+	tlsCert, err := tls.LoadX509KeyPair(cert, key)
+	if err != nil {
+		return err
+	}
+
+	tlsConfig := &tls.Config{
+		Certificates:       []tls.Certificate{tlsCert},
+		InsecureSkipVerify: true,
+	}
+
+	tr := &http.Transport{
+		TLSClientConfig: tlsConfig,
+		Dial:            c.dial,
+	}
+
+	c.httpClient = &http.Client{Transport: tr}
+	return nil
+}
+
+// SetPersistence sets a writer to which the config will be
+// written every time it's changed.
+func (c *Client) SetPersistence(writer io.Writer) {
+	c.persistence = writer
+}
+
+// SetConsistency changes the consistency level of the client.
+//
+// When consistency is set to STRONG_CONSISTENCY, all requests,
+// including GET, are sent to the leader.  This means that, assuming
+// the absence of leader failures, GET requests are guaranteed to see
+// the changes made by previous requests.
+//
+// When consistency is set to WEAK_CONSISTENCY, other requests
+// are still sent to the leader, but GET requests are sent to a
+// random server from the server pool.  This reduces the read
+// load on the leader, but it's not guaranteed that the GET requests
+// will see changes made by previous requests (they might have not
+// yet been committed on non-leader servers).
+func (c *Client) SetConsistency(consistency string) error {
+	if !(consistency == STRONG_CONSISTENCY || consistency == WEAK_CONSISTENCY) {
+		return errors.New("The argument must be either STRONG_CONSISTENCY or WEAK_CONSISTENCY.")
+	}
+	c.config.Consistency = consistency
+	return nil
+}
+
+// Sets the DialTimeout value
+func (c *Client) SetDialTimeout(d time.Duration) {
+	c.config.DialTimeout = d
+}
+
+// AddRootCA adds a root CA cert for the etcd client
+func (c *Client) AddRootCA(caCert string) error {
+	if c.httpClient == nil {
+		return errors.New("Client has not been initialized yet!")
+	}
+
+	certBytes, err := ioutil.ReadFile(caCert)
+	if err != nil {
+		return err
+	}
+
+	tr, ok := c.httpClient.Transport.(*http.Transport)
+
+	if !ok {
+		panic("AddRootCA(): Transport type assert should not fail")
+	}
+
+	if tr.TLSClientConfig.RootCAs == nil {
+		caCertPool := x509.NewCertPool()
+		ok = caCertPool.AppendCertsFromPEM(certBytes)
+		if ok {
+			tr.TLSClientConfig.RootCAs = caCertPool
+		}
+		tr.TLSClientConfig.InsecureSkipVerify = false
+	} else {
+		ok = tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(certBytes)
+	}
+
+	if !ok {
+		err = errors.New("Unable to load caCert")
+	}
+
+	c.config.CaCertFile = append(c.config.CaCertFile, caCert)
+	c.saveConfig()
+
+	return err
+}
+
+// SetCluster updates cluster information using the given machine list.
+func (c *Client) SetCluster(machines []string) bool {
+	success := c.internalSyncCluster(machines)
+	return success
+}
+
+func (c *Client) GetCluster() []string {
+	return c.cluster.Machines
+}
+
+// SyncCluster updates the cluster information using the internal machine list.
+func (c *Client) SyncCluster() bool {
+	return c.internalSyncCluster(c.cluster.Machines)
+}
+
+// internalSyncCluster syncs cluster information using the given machine list.
+func (c *Client) internalSyncCluster(machines []string) bool {
+	for _, machine := range machines {
+		httpPath := c.createHttpPath(machine, path.Join(version, "members"))
+		resp, err := c.httpClient.Get(httpPath)
+		if err != nil {
+			// try another machine in the cluster
+			continue
+		}
+
+		if resp.StatusCode != http.StatusOK { // fall-back to old endpoint
+			httpPath := c.createHttpPath(machine, path.Join(version, "machines"))
+			resp, err := c.httpClient.Get(httpPath)
+			if err != nil {
+				// try another machine in the cluster
+				continue
+			}
+			b, err := ioutil.ReadAll(resp.Body)
+			resp.Body.Close()
+			if err != nil {
+				// try another machine in the cluster
+				continue
+			}
+			// update Machines List
+			c.cluster.updateFromStr(string(b))
+		} else {
+			b, err := ioutil.ReadAll(resp.Body)
+			resp.Body.Close()
+			if err != nil {
+				// try another machine in the cluster
+				continue
+			}
+
+			var mCollection memberCollection
+			if err := json.Unmarshal(b, &mCollection); err != nil {
+				// try another machine
+				continue
+			}
+
+			urls := make([]string, 0)
+			for _, m := range mCollection {
+				urls = append(urls, m.ClientURLs...)
+			}
+
+			// update Machines List
+			c.cluster.updateFromStr(strings.Join(urls, ","))
+		}
+
+		logger.Debug("sync.machines ", c.cluster.Machines)
+		c.saveConfig()
+		return true
+	}
+
+	return false
+}
+
+// createHttpPath creates a complete HTTP URL.
+// serverName should contain both the host name and a port number, if any.
+func (c *Client) createHttpPath(serverName string, _path string) string {
+	u, err := url.Parse(serverName)
+	if err != nil {
+		panic(err)
+	}
+
+	u.Path = path.Join(u.Path, _path)
+
+	if u.Scheme == "" {
+		u.Scheme = "http"
+	}
+	return u.String()
+}
+
+// dial attempts to open a TCP connection to the provided address, explicitly
+// enabling keep-alives with a one-second interval.
+func (c *Client) dial(network, addr string) (net.Conn, error) {
+	conn, err := net.DialTimeout(network, addr, c.config.DialTimeout)
+	if err != nil {
+		return nil, err
+	}
+
+	tcpConn, ok := conn.(*net.TCPConn)
+	if !ok {
+		return nil, errors.New("Failed type-assertion of net.Conn as *net.TCPConn")
+	}
+
+	// Keep TCP alive to check whether or not the remote machine is down
+	if err = tcpConn.SetKeepAlive(true); err != nil {
+		return nil, err
+	}
+
+	if err = tcpConn.SetKeepAlivePeriod(time.Second); err != nil {
+		return nil, err
+	}
+
+	return tcpConn, nil
+}
+
+func (c *Client) OpenCURL() {
+	c.cURLch = make(chan string, defaultBufferSize)
+}
+
+func (c *Client) CloseCURL() {
+	c.cURLch = nil
+}
+
+func (c *Client) sendCURL(command string) {
+	go func() {
+		select {
+		case c.cURLch <- command:
+		default:
+		}
+	}()
+}
+
+func (c *Client) RecvCURL() string {
+	return <-c.cURLch
+}
+
+// saveConfig saves the current config using c.persistence.
+func (c *Client) saveConfig() error {
+	if c.persistence != nil {
+		b, err := json.Marshal(c)
+		if err != nil {
+			return err
+		}
+
+		_, err = c.persistence.Write(b)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// MarshalJSON implements the Marshaller interface
+// as defined by the standard JSON package.
+func (c *Client) MarshalJSON() ([]byte, error) {
+	b, err := json.Marshal(struct {
+		Config  Config   `json:"config"`
+		Cluster *Cluster `json:"cluster"`
+	}{
+		Config:  c.config,
+		Cluster: c.cluster,
+	})
+
+	if err != nil {
+		return nil, err
+	}
+
+	return b, nil
+}
+
+// UnmarshalJSON implements the Unmarshaller interface
+// as defined by the standard JSON package.
+func (c *Client) UnmarshalJSON(b []byte) error {
+	temp := struct {
+		Config  Config   `json:"config"`
+		Cluster *Cluster `json:"cluster"`
+	}{}
+	err := json.Unmarshal(b, &temp)
+	if err != nil {
+		return err
+	}
+
+	c.cluster = temp.Cluster
+	c.config = temp.Config
+	return nil
+}

+ 108 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go

@@ -0,0 +1,108 @@
+package etcd
+
+import (
+	"encoding/json"
+	"fmt"
+	"net"
+	"net/url"
+	"os"
+	"testing"
+)
+
+// To pass this test, we need to create a cluster of 3 machines
+// The server should be listening on localhost:4001, 4002, 4003
+func TestSync(t *testing.T) {
+	fmt.Println("Make sure there are three nodes at 0.0.0.0:4001-4003")
+
+	// Explicit trailing slash to ensure this doesn't reproduce:
+	// https://github.com/coreos/go-etcd/issues/82
+	c := NewClient([]string{"http://127.0.0.1:4001/"})
+
+	success := c.SyncCluster()
+	if !success {
+		t.Fatal("cannot sync machines")
+	}
+
+	for _, m := range c.GetCluster() {
+		u, err := url.Parse(m)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if u.Scheme != "http" {
+			t.Fatal("scheme must be http")
+		}
+
+		host, _, err := net.SplitHostPort(u.Host)
+		if err != nil {
+			t.Fatal(err)
+		}
+		if host != "localhost" {
+			t.Fatal("Host must be localhost")
+		}
+	}
+
+	badMachines := []string{"abc", "edef"}
+
+	success = c.SetCluster(badMachines)
+
+	if success {
+		t.Fatal("should not sync on bad machines")
+	}
+
+	goodMachines := []string{"127.0.0.1:4002"}
+
+	success = c.SetCluster(goodMachines)
+
+	if !success {
+		t.Fatal("cannot sync machines")
+	} else {
+		fmt.Println(c.cluster.Machines)
+	}
+
+}
+
+func TestPersistence(t *testing.T) {
+	c := NewClient(nil)
+	c.SyncCluster()
+
+	fo, err := os.Create("config.json")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer func() {
+		if err := fo.Close(); err != nil {
+			panic(err)
+		}
+	}()
+
+	c.SetPersistence(fo)
+	err = c.saveConfig()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	c2, err := NewClientFromFile("config.json")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Verify that the two clients have the same config
+	b1, _ := json.Marshal(c)
+	b2, _ := json.Marshal(c2)
+
+	if string(b1) != string(b2) {
+		t.Fatalf("The two configs should be equal!")
+	}
+}
+
+func TestClientRetry(t *testing.T) {
+	c := NewClient([]string{"http://strange", "http://127.0.0.1:4001"})
+	// use first endpoint as the picked url
+	c.cluster.picked = 0
+	if _, err := c.Set("foo", "bar", 5); err != nil {
+		t.Fatal(err)
+	}
+	if _, err := c.Delete("foo", true); err != nil {
+		t.Fatal(err)
+	}
+}

+ 37 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go

@@ -0,0 +1,37 @@
+package etcd
+
+import (
+	"math/rand"
+	"strings"
+)
+
+type Cluster struct {
+	Leader   string   `json:"leader"`
+	Machines []string `json:"machines"`
+	picked   int
+}
+
+func NewCluster(machines []string) *Cluster {
+	// if an empty slice was sent in then just assume HTTP 4001 on localhost
+	if len(machines) == 0 {
+		machines = []string{"http://127.0.0.1:4001"}
+	}
+
+	// default leader and machines
+	return &Cluster{
+		Leader:   "",
+		Machines: machines,
+		picked:   rand.Intn(len(machines)),
+	}
+}
+
+func (cl *Cluster) failure()     { cl.picked = rand.Intn(len(cl.Machines)) }
+func (cl *Cluster) pick() string { return cl.Machines[cl.picked] }
+
+func (cl *Cluster) updateFromStr(machines string) {
+	cl.Machines = strings.Split(machines, ",")
+	for i := range cl.Machines {
+		cl.Machines[i] = strings.TrimSpace(cl.Machines[i])
+	}
+	cl.picked = rand.Intn(len(cl.Machines))
+}

+ 34 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete.go

@@ -0,0 +1,34 @@
+package etcd
+
+import "fmt"
+
+func (c *Client) CompareAndDelete(key string, prevValue string, prevIndex uint64) (*Response, error) {
+	raw, err := c.RawCompareAndDelete(key, prevValue, prevIndex)
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+func (c *Client) RawCompareAndDelete(key string, prevValue string, prevIndex uint64) (*RawResponse, error) {
+	if prevValue == "" && prevIndex == 0 {
+		return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
+	}
+
+	options := Options{}
+	if prevValue != "" {
+		options["prevValue"] = prevValue
+	}
+	if prevIndex != 0 {
+		options["prevIndex"] = prevIndex
+	}
+
+	raw, err := c.delete(key, options)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw, err
+}

+ 46 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go

@@ -0,0 +1,46 @@
+package etcd
+
+import (
+	"testing"
+)
+
+func TestCompareAndDelete(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+	}()
+
+	c.Set("foo", "bar", 5)
+
+	// This should succeed an correct prevValue
+	resp, err := c.CompareAndDelete("foo", "bar", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+		t.Fatalf("CompareAndDelete 1 prevNode failed: %#v", resp)
+	}
+
+	resp, _ = c.Set("foo", "bar", 5)
+	// This should fail because it gives an incorrect prevValue
+	_, err = c.CompareAndDelete("foo", "xxx", 0)
+	if err == nil {
+		t.Fatalf("CompareAndDelete 2 should have failed.  The response is: %#v", resp)
+	}
+
+	// This should succeed because it gives an correct prevIndex
+	resp, err = c.CompareAndDelete("foo", "", resp.Node.ModifiedIndex)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+		t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
+	}
+
+	c.Set("foo", "bar", 5)
+	// This should fail because it gives an incorrect prevIndex
+	resp, err = c.CompareAndDelete("foo", "", 29817514)
+	if err == nil {
+		t.Fatalf("CompareAndDelete 4 should have failed.  The response is: %#v", resp)
+	}
+}

+ 36 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap.go

@@ -0,0 +1,36 @@
+package etcd
+
+import "fmt"
+
+func (c *Client) CompareAndSwap(key string, value string, ttl uint64,
+	prevValue string, prevIndex uint64) (*Response, error) {
+	raw, err := c.RawCompareAndSwap(key, value, ttl, prevValue, prevIndex)
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+func (c *Client) RawCompareAndSwap(key string, value string, ttl uint64,
+	prevValue string, prevIndex uint64) (*RawResponse, error) {
+	if prevValue == "" && prevIndex == 0 {
+		return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
+	}
+
+	options := Options{}
+	if prevValue != "" {
+		options["prevValue"] = prevValue
+	}
+	if prevIndex != 0 {
+		options["prevIndex"] = prevIndex
+	}
+
+	raw, err := c.put(key, value, ttl, options)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw, err
+}

+ 57 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go

@@ -0,0 +1,57 @@
+package etcd
+
+import (
+	"testing"
+)
+
+func TestCompareAndSwap(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+	}()
+
+	c.Set("foo", "bar", 5)
+
+	// This should succeed
+	resp, err := c.CompareAndSwap("foo", "bar2", 5, "bar", 0)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+		t.Fatalf("CompareAndSwap 1 failed: %#v", resp)
+	}
+
+	if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+		t.Fatalf("CompareAndSwap 1 prevNode failed: %#v", resp)
+	}
+
+	// This should fail because it gives an incorrect prevValue
+	resp, err = c.CompareAndSwap("foo", "bar3", 5, "xxx", 0)
+	if err == nil {
+		t.Fatalf("CompareAndSwap 2 should have failed.  The response is: %#v", resp)
+	}
+
+	resp, err = c.Set("foo", "bar", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// This should succeed
+	resp, err = c.CompareAndSwap("foo", "bar2", 5, "", resp.Node.ModifiedIndex)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+		t.Fatalf("CompareAndSwap 3 failed: %#v", resp)
+	}
+
+	if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+		t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
+	}
+
+	// This should fail because it gives an incorrect prevIndex
+	resp, err = c.CompareAndSwap("foo", "bar3", 5, "", 29817514)
+	if err == nil {
+		t.Fatalf("CompareAndSwap 4 should have failed.  The response is: %#v", resp)
+	}
+}

+ 55 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug.go

@@ -0,0 +1,55 @@
+package etcd
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"strings"
+)
+
+var logger *etcdLogger
+
+func SetLogger(l *log.Logger) {
+	logger = &etcdLogger{l}
+}
+
+func GetLogger() *log.Logger {
+	return logger.log
+}
+
+type etcdLogger struct {
+	log *log.Logger
+}
+
+func (p *etcdLogger) Debug(args ...interface{}) {
+	msg := "DEBUG: " + fmt.Sprint(args...)
+	p.log.Println(msg)
+}
+
+func (p *etcdLogger) Debugf(f string, args ...interface{}) {
+	msg := "DEBUG: " + fmt.Sprintf(f, args...)
+	// Append newline if necessary
+	if !strings.HasSuffix(msg, "\n") {
+		msg = msg + "\n"
+	}
+	p.log.Print(msg)
+}
+
+func (p *etcdLogger) Warning(args ...interface{}) {
+	msg := "WARNING: " + fmt.Sprint(args...)
+	p.log.Println(msg)
+}
+
+func (p *etcdLogger) Warningf(f string, args ...interface{}) {
+	msg := "WARNING: " + fmt.Sprintf(f, args...)
+	// Append newline if necessary
+	if !strings.HasSuffix(msg, "\n") {
+		msg = msg + "\n"
+	}
+	p.log.Print(msg)
+}
+
+func init() {
+	// Default logger uses the go default log.
+	SetLogger(log.New(ioutil.Discard, "go-etcd", log.LstdFlags))
+}

+ 28 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/debug_test.go

@@ -0,0 +1,28 @@
+package etcd
+
+import (
+	"testing"
+)
+
+type Foo struct{}
+type Bar struct {
+	one string
+	two int
+}
+
+// Tests that logs don't panic with arbitrary interfaces
+func TestDebug(t *testing.T) {
+	f := &Foo{}
+	b := &Bar{"asfd", 3}
+	for _, test := range []interface{}{
+		1234,
+		"asdf",
+		f,
+		b,
+	} {
+		logger.Debug(test)
+		logger.Debugf("something, %s", test)
+		logger.Warning(test)
+		logger.Warningf("something, %s", test)
+	}
+}

+ 40 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete.go

@@ -0,0 +1,40 @@
+package etcd
+
+// Delete deletes the given key.
+//
+// When recursive set to false, if the key points to a
+// directory the method will fail.
+//
+// When recursive set to true, if the key points to a file,
+// the file will be deleted; if the key points to a directory,
+// then everything under the directory (including all child directories)
+// will be deleted.
+func (c *Client) Delete(key string, recursive bool) (*Response, error) {
+	raw, err := c.RawDelete(key, recursive, false)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// DeleteDir deletes an empty directory or a key value pair
+func (c *Client) DeleteDir(key string) (*Response, error) {
+	raw, err := c.RawDelete(key, false, true)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+func (c *Client) RawDelete(key string, recursive bool, dir bool) (*RawResponse, error) {
+	ops := Options{
+		"recursive": recursive,
+		"dir":       dir,
+	}
+
+	return c.delete(key, ops)
+}

+ 81 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/delete_test.go

@@ -0,0 +1,81 @@
+package etcd
+
+import (
+	"testing"
+)
+
+func TestDelete(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+	}()
+
+	c.Set("foo", "bar", 5)
+	resp, err := c.Delete("foo", false)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Node.Value == "") {
+		t.Fatalf("Delete failed with %s", resp.Node.Value)
+	}
+
+	if !(resp.PrevNode.Value == "bar") {
+		t.Fatalf("Delete PrevNode failed with %s", resp.Node.Value)
+	}
+
+	resp, err = c.Delete("foo", false)
+	if err == nil {
+		t.Fatalf("Delete should have failed because the key foo did not exist.  "+
+			"The response was: %v", resp)
+	}
+}
+
+func TestDeleteAll(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+		c.Delete("fooDir", true)
+	}()
+
+	c.SetDir("foo", 5)
+	// test delete an empty dir
+	resp, err := c.DeleteDir("foo")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Node.Value == "") {
+		t.Fatalf("DeleteAll 1 failed: %#v", resp)
+	}
+
+	if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
+		t.Fatalf("DeleteAll 1 PrevNode failed: %#v", resp)
+	}
+
+	c.CreateDir("fooDir", 5)
+	c.Set("fooDir/foo", "bar", 5)
+	_, err = c.DeleteDir("fooDir")
+	if err == nil {
+		t.Fatal("should not able to delete a non-empty dir with deletedir")
+	}
+
+	resp, err = c.Delete("fooDir", true)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Node.Value == "") {
+		t.Fatalf("DeleteAll 2 failed: %#v", resp)
+	}
+
+	if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
+		t.Fatalf("DeleteAll 2 PrevNode failed: %#v", resp)
+	}
+
+	resp, err = c.Delete("foo", true)
+	if err == nil {
+		t.Fatalf("DeleteAll should have failed because the key foo did not exist.  "+
+			"The response was: %v", resp)
+	}
+}

+ 49 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/error.go

@@ -0,0 +1,49 @@
+package etcd
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+const (
+	ErrCodeEtcdNotReachable    = 501
+	ErrCodeUnhandledHTTPStatus = 502
+)
+
+var (
+	errorMap = map[int]string{
+		ErrCodeEtcdNotReachable: "All the given peers are not reachable",
+	}
+)
+
+type EtcdError struct {
+	ErrorCode int    `json:"errorCode"`
+	Message   string `json:"message"`
+	Cause     string `json:"cause,omitempty"`
+	Index     uint64 `json:"index"`
+}
+
+func (e EtcdError) Error() string {
+	return fmt.Sprintf("%v: %v (%v) [%v]", e.ErrorCode, e.Message, e.Cause, e.Index)
+}
+
+func newError(errorCode int, cause string, index uint64) *EtcdError {
+	return &EtcdError{
+		ErrorCode: errorCode,
+		Message:   errorMap[errorCode],
+		Cause:     cause,
+		Index:     index,
+	}
+}
+
+func handleError(b []byte) error {
+	etcdErr := new(EtcdError)
+
+	err := json.Unmarshal(b, etcdErr)
+	if err != nil {
+		logger.Warningf("cannot unmarshal etcd error: %v", err)
+		return err
+	}
+
+	return etcdErr
+}

+ 32 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get.go

@@ -0,0 +1,32 @@
+package etcd
+
+// Get gets the file or directory associated with the given key.
+// If the key points to a directory, files and directories under
+// it will be returned in sorted or unsorted order, depending on
+// the sort flag.
+// If recursive is set to false, contents under child directories
+// will not be returned.
+// If recursive is set to true, all the contents will be returned.
+func (c *Client) Get(key string, sort, recursive bool) (*Response, error) {
+	raw, err := c.RawGet(key, sort, recursive)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+func (c *Client) RawGet(key string, sort, recursive bool) (*RawResponse, error) {
+	var q bool
+	if c.config.Consistency == STRONG_CONSISTENCY {
+		q = true
+	}
+	ops := Options{
+		"recursive": recursive,
+		"sorted":    sort,
+		"quorum":    q,
+	}
+
+	return c.get(key, ops)
+}

+ 131 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/get_test.go

@@ -0,0 +1,131 @@
+package etcd
+
+import (
+	"reflect"
+	"testing"
+)
+
+// cleanNode scrubs Expiration, ModifiedIndex and CreatedIndex of a node.
+func cleanNode(n *Node) {
+	n.Expiration = nil
+	n.ModifiedIndex = 0
+	n.CreatedIndex = 0
+}
+
+// cleanResult scrubs a result object two levels deep of Expiration,
+// ModifiedIndex and CreatedIndex.
+func cleanResult(result *Response) {
+	//  TODO(philips): make this recursive.
+	cleanNode(result.Node)
+	for i, _ := range result.Node.Nodes {
+		cleanNode(result.Node.Nodes[i])
+		for j, _ := range result.Node.Nodes[i].Nodes {
+			cleanNode(result.Node.Nodes[i].Nodes[j])
+		}
+	}
+}
+
+func TestGet(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+	}()
+
+	c.Set("foo", "bar", 5)
+
+	result, err := c.Get("foo", false, false)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if result.Node.Key != "/foo" || result.Node.Value != "bar" {
+		t.Fatalf("Get failed with %s %s %v", result.Node.Key, result.Node.Value, result.Node.TTL)
+	}
+
+	result, err = c.Get("goo", false, false)
+	if err == nil {
+		t.Fatalf("should not be able to get non-exist key")
+	}
+}
+
+func TestGetAll(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("fooDir", true)
+	}()
+
+	c.CreateDir("fooDir", 5)
+	c.Set("fooDir/k0", "v0", 5)
+	c.Set("fooDir/k1", "v1", 5)
+
+	// Return kv-pairs in sorted order
+	result, err := c.Get("fooDir", true, false)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expected := Nodes{
+		&Node{
+			Key:   "/fooDir/k0",
+			Value: "v0",
+			TTL:   5,
+		},
+		&Node{
+			Key:   "/fooDir/k1",
+			Value: "v1",
+			TTL:   5,
+		},
+	}
+
+	cleanResult(result)
+
+	if !reflect.DeepEqual(result.Node.Nodes, expected) {
+		t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
+	}
+
+	// Test the `recursive` option
+	c.CreateDir("fooDir/childDir", 5)
+	c.Set("fooDir/childDir/k2", "v2", 5)
+
+	// Return kv-pairs in sorted order
+	result, err = c.Get("fooDir", true, true)
+
+	cleanResult(result)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expected = Nodes{
+		&Node{
+			Key: "/fooDir/childDir",
+			Dir: true,
+			Nodes: Nodes{
+				&Node{
+					Key:   "/fooDir/childDir/k2",
+					Value: "v2",
+					TTL:   5,
+				},
+			},
+			TTL: 5,
+		},
+		&Node{
+			Key:   "/fooDir/k0",
+			Value: "v0",
+			TTL:   5,
+		},
+		&Node{
+			Key:   "/fooDir/k1",
+			Value: "v1",
+			TTL:   5,
+		},
+	}
+
+	cleanResult(result)
+
+	if !reflect.DeepEqual(result.Node.Nodes, expected) {
+		t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
+	}
+}

+ 30 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member.go

@@ -0,0 +1,30 @@
+package etcd
+
+import "encoding/json"
+
+type Member struct {
+	ID         string   `json:"id"`
+	Name       string   `json:"name"`
+	PeerURLs   []string `json:"peerURLs"`
+	ClientURLs []string `json:"clientURLs"`
+}
+
+type memberCollection []Member
+
+func (c *memberCollection) UnmarshalJSON(data []byte) error {
+	d := struct {
+		Members []Member
+	}{}
+
+	if err := json.Unmarshal(data, &d); err != nil {
+		return err
+	}
+
+	if d.Members == nil {
+		*c = make([]Member, 0)
+		return nil
+	}
+
+	*c = d.Members
+	return nil
+}

+ 71 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member_test.go

@@ -0,0 +1,71 @@
+package etcd
+
+import (
+	"encoding/json"
+	"reflect"
+	"testing"
+)
+
+func TestMemberCollectionUnmarshal(t *testing.T) {
+	tests := []struct {
+		body []byte
+		want memberCollection
+	}{
+		{
+			body: []byte(`{"members":[]}`),
+			want: memberCollection([]Member{}),
+		},
+		{
+			body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+			want: memberCollection(
+				[]Member{
+					{
+						ID:   "2745e2525fce8fe",
+						Name: "node3",
+						PeerURLs: []string{
+							"http://127.0.0.1:7003",
+						},
+						ClientURLs: []string{
+							"http://127.0.0.1:4003",
+						},
+					},
+					{
+						ID:   "42134f434382925",
+						Name: "node1",
+						PeerURLs: []string{
+							"http://127.0.0.1:2380",
+							"http://127.0.0.1:7001",
+						},
+						ClientURLs: []string{
+							"http://127.0.0.1:2379",
+							"http://127.0.0.1:4001",
+						},
+					},
+					{
+						ID:   "94088180e21eb87b",
+						Name: "node2",
+						PeerURLs: []string{
+							"http://127.0.0.1:7002",
+						},
+						ClientURLs: []string{
+							"http://127.0.0.1:4002",
+						},
+					},
+				},
+			),
+		},
+	}
+
+	for i, tt := range tests {
+		var got memberCollection
+		err := json.Unmarshal(tt.body, &got)
+		if err != nil {
+			t.Errorf("#%d: unexpected error: %v", i, err)
+			continue
+		}
+
+		if !reflect.DeepEqual(tt.want, got) {
+			t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.want, got)
+		}
+	}
+}

+ 72 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/options.go

@@ -0,0 +1,72 @@
+package etcd
+
+import (
+	"fmt"
+	"net/url"
+	"reflect"
+)
+
+type Options map[string]interface{}
+
+// An internally-used data structure that represents a mapping
+// between valid options and their kinds
+type validOptions map[string]reflect.Kind
+
+// Valid options for GET, PUT, POST, DELETE
+// Using CAPITALIZED_UNDERSCORE to emphasize that these
+// values are meant to be used as constants.
+var (
+	VALID_GET_OPTIONS = validOptions{
+		"recursive": reflect.Bool,
+		"quorum":    reflect.Bool,
+		"sorted":    reflect.Bool,
+		"wait":      reflect.Bool,
+		"waitIndex": reflect.Uint64,
+	}
+
+	VALID_PUT_OPTIONS = validOptions{
+		"prevValue": reflect.String,
+		"prevIndex": reflect.Uint64,
+		"prevExist": reflect.Bool,
+		"dir":       reflect.Bool,
+	}
+
+	VALID_POST_OPTIONS = validOptions{}
+
+	VALID_DELETE_OPTIONS = validOptions{
+		"recursive": reflect.Bool,
+		"dir":       reflect.Bool,
+		"prevValue": reflect.String,
+		"prevIndex": reflect.Uint64,
+	}
+)
+
+// Convert options to a string of HTML parameters
+func (ops Options) toParameters(validOps validOptions) (string, error) {
+	p := "?"
+	values := url.Values{}
+
+	if ops == nil {
+		return "", nil
+	}
+
+	for k, v := range ops {
+		// Check if the given option is valid (that it exists)
+		kind := validOps[k]
+		if kind == reflect.Invalid {
+			return "", fmt.Errorf("Invalid option: %v", k)
+		}
+
+		// Check if the given option is of the valid type
+		t := reflect.TypeOf(v)
+		if kind != t.Kind() {
+			return "", fmt.Errorf("Option %s should be of %v kind, not of %v kind.",
+				k, kind, t.Kind())
+		}
+
+		values.Set(k, fmt.Sprintf("%v", v))
+	}
+
+	p += values.Encode()
+	return p, nil
+}

+ 403 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go

@@ -0,0 +1,403 @@
+package etcd
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"path"
+	"strings"
+	"sync"
+	"time"
+)
+
+// Errors introduced by handling requests
+var (
+	ErrRequestCancelled = errors.New("sending request is cancelled")
+)
+
+type RawRequest struct {
+	Method       string
+	RelativePath string
+	Values       url.Values
+	Cancel       <-chan bool
+}
+
+// NewRawRequest returns a new RawRequest
+func NewRawRequest(method, relativePath string, values url.Values, cancel <-chan bool) *RawRequest {
+	return &RawRequest{
+		Method:       method,
+		RelativePath: relativePath,
+		Values:       values,
+		Cancel:       cancel,
+	}
+}
+
+// getCancelable issues a cancelable GET request
+func (c *Client) getCancelable(key string, options Options,
+	cancel <-chan bool) (*RawResponse, error) {
+	logger.Debugf("get %s [%s]", key, c.cluster.pick())
+	p := keyToPath(key)
+
+	str, err := options.toParameters(VALID_GET_OPTIONS)
+	if err != nil {
+		return nil, err
+	}
+	p += str
+
+	req := NewRawRequest("GET", p, nil, cancel)
+	resp, err := c.SendRequest(req)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return resp, nil
+}
+
+// get issues a GET request
+func (c *Client) get(key string, options Options) (*RawResponse, error) {
+	return c.getCancelable(key, options, nil)
+}
+
+// put issues a PUT request
+func (c *Client) put(key string, value string, ttl uint64,
+	options Options) (*RawResponse, error) {
+
+	logger.Debugf("put %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick())
+	p := keyToPath(key)
+
+	str, err := options.toParameters(VALID_PUT_OPTIONS)
+	if err != nil {
+		return nil, err
+	}
+	p += str
+
+	req := NewRawRequest("PUT", p, buildValues(value, ttl), nil)
+	resp, err := c.SendRequest(req)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return resp, nil
+}
+
+// post issues a POST request
+func (c *Client) post(key string, value string, ttl uint64) (*RawResponse, error) {
+	logger.Debugf("post %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick())
+	p := keyToPath(key)
+
+	req := NewRawRequest("POST", p, buildValues(value, ttl), nil)
+	resp, err := c.SendRequest(req)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return resp, nil
+}
+
+// delete issues a DELETE request
+func (c *Client) delete(key string, options Options) (*RawResponse, error) {
+	logger.Debugf("delete %s [%s]", key, c.cluster.pick())
+	p := keyToPath(key)
+
+	str, err := options.toParameters(VALID_DELETE_OPTIONS)
+	if err != nil {
+		return nil, err
+	}
+	p += str
+
+	req := NewRawRequest("DELETE", p, nil, nil)
+	resp, err := c.SendRequest(req)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return resp, nil
+}
+
+// SendRequest sends a HTTP request and returns a Response as defined by etcd
+func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) {
+	var req *http.Request
+	var resp *http.Response
+	var httpPath string
+	var err error
+	var respBody []byte
+
+	var numReqs = 1
+
+	checkRetry := c.CheckRetry
+	if checkRetry == nil {
+		checkRetry = DefaultCheckRetry
+	}
+
+	cancelled := make(chan bool, 1)
+	reqLock := new(sync.Mutex)
+
+	if rr.Cancel != nil {
+		cancelRoutine := make(chan bool)
+		defer close(cancelRoutine)
+
+		go func() {
+			select {
+			case <-rr.Cancel:
+				cancelled <- true
+				logger.Debug("send.request is cancelled")
+			case <-cancelRoutine:
+				return
+			}
+
+			// Repeat canceling request until this thread is stopped
+			// because we have no idea about whether it succeeds.
+			for {
+				reqLock.Lock()
+				c.httpClient.Transport.(*http.Transport).CancelRequest(req)
+				reqLock.Unlock()
+
+				select {
+				case <-time.After(100 * time.Millisecond):
+				case <-cancelRoutine:
+					return
+				}
+			}
+		}()
+	}
+
+	// If we connect to a follower and consistency is required, retry until
+	// we connect to a leader
+	sleep := 25 * time.Millisecond
+	maxSleep := time.Second
+
+	for attempt := 0; ; attempt++ {
+		if attempt > 0 {
+			select {
+			case <-cancelled:
+				return nil, ErrRequestCancelled
+			case <-time.After(sleep):
+				sleep = sleep * 2
+				if sleep > maxSleep {
+					sleep = maxSleep
+				}
+			}
+		}
+
+		logger.Debug("Connecting to etcd: attempt ", attempt+1, " for ", rr.RelativePath)
+
+		// get httpPath if not set
+		if httpPath == "" {
+			httpPath = c.getHttpPath(rr.RelativePath)
+		}
+
+		// Return a cURL command if curlChan is set
+		if c.cURLch != nil {
+			command := fmt.Sprintf("curl -X %s %s", rr.Method, httpPath)
+			for key, value := range rr.Values {
+				command += fmt.Sprintf(" -d %s=%s", key, value[0])
+			}
+			if c.credentials != nil {
+				command += fmt.Sprintf(" -u %s", c.credentials.username)
+			}
+			c.sendCURL(command)
+		}
+
+		logger.Debug("send.request.to ", httpPath, " | method ", rr.Method)
+
+		req, err := func() (*http.Request, error) {
+			reqLock.Lock()
+			defer reqLock.Unlock()
+
+			if rr.Values == nil {
+				if req, err = http.NewRequest(rr.Method, httpPath, nil); err != nil {
+					return nil, err
+				}
+			} else {
+				body := strings.NewReader(rr.Values.Encode())
+				if req, err = http.NewRequest(rr.Method, httpPath, body); err != nil {
+					return nil, err
+				}
+
+				req.Header.Set("Content-Type",
+					"application/x-www-form-urlencoded; param=value")
+			}
+			return req, nil
+		}()
+
+		if err != nil {
+			return nil, err
+		}
+
+		if c.credentials != nil {
+			req.SetBasicAuth(c.credentials.username, c.credentials.password)
+		}
+
+		resp, err = c.httpClient.Do(req)
+		// clear previous httpPath
+		httpPath = ""
+		defer func() {
+			if resp != nil {
+				resp.Body.Close()
+			}
+		}()
+
+		// If the request was cancelled, return ErrRequestCancelled directly
+		select {
+		case <-cancelled:
+			return nil, ErrRequestCancelled
+		default:
+		}
+
+		numReqs++
+
+		// network error, change a machine!
+		if err != nil {
+			logger.Debug("network error: ", err.Error())
+			lastResp := http.Response{}
+			if checkErr := checkRetry(c.cluster, numReqs, lastResp, err); checkErr != nil {
+				return nil, checkErr
+			}
+
+			c.cluster.failure()
+			continue
+		}
+
+		// if there is no error, it should receive response
+		logger.Debug("recv.response.from ", httpPath)
+
+		if validHttpStatusCode[resp.StatusCode] {
+			// try to read byte code and break the loop
+			respBody, err = ioutil.ReadAll(resp.Body)
+			if err == nil {
+				logger.Debug("recv.success ", httpPath)
+				break
+			}
+			// ReadAll error may be caused due to cancel request
+			select {
+			case <-cancelled:
+				return nil, ErrRequestCancelled
+			default:
+			}
+
+			if err == io.ErrUnexpectedEOF {
+				// underlying connection was closed prematurely, probably by timeout
+				// TODO: empty body or unexpectedEOF can cause http.Transport to get hosed;
+				// this allows the client to detect that and take evasive action. Need
+				// to revisit once code.google.com/p/go/issues/detail?id=8648 gets fixed.
+				respBody = []byte{}
+				break
+			}
+		}
+
+		if resp.StatusCode == http.StatusTemporaryRedirect {
+			u, err := resp.Location()
+
+			if err != nil {
+				logger.Warning(err)
+			} else {
+				// set httpPath for following redirection
+				httpPath = u.String()
+			}
+			resp.Body.Close()
+			continue
+		}
+
+		if checkErr := checkRetry(c.cluster, numReqs, *resp,
+			errors.New("Unexpected HTTP status code")); checkErr != nil {
+			return nil, checkErr
+		}
+		resp.Body.Close()
+	}
+
+	r := &RawResponse{
+		StatusCode: resp.StatusCode,
+		Body:       respBody,
+		Header:     resp.Header,
+	}
+
+	return r, nil
+}
+
+// DefaultCheckRetry defines the retrying behaviour for bad HTTP requests
+// If we have retried 2 * machine number, stop retrying.
+// If status code is InternalServerError, sleep for 200ms.
+func DefaultCheckRetry(cluster *Cluster, numReqs int, lastResp http.Response,
+	err error) error {
+
+	if numReqs > 2*len(cluster.Machines) {
+		errStr := fmt.Sprintf("failed to propose on members %v twice [last error: %v]", cluster.Machines, err)
+		return newError(ErrCodeEtcdNotReachable, errStr, 0)
+	}
+
+	if isEmptyResponse(lastResp) {
+		// always retry if it failed to get response from one machine
+		return nil
+	}
+	if !shouldRetry(lastResp) {
+		body := []byte("nil")
+		if lastResp.Body != nil {
+			if b, err := ioutil.ReadAll(lastResp.Body); err == nil {
+				body = b
+			}
+		}
+		errStr := fmt.Sprintf("unhandled http status [%s] with body [%s]", http.StatusText(lastResp.StatusCode), body)
+		return newError(ErrCodeUnhandledHTTPStatus, errStr, 0)
+	}
+	// sleep some time and expect leader election finish
+	time.Sleep(time.Millisecond * 200)
+	logger.Warning("bad response status code", lastResp.StatusCode)
+	return nil
+}
+
+func isEmptyResponse(r http.Response) bool { return r.StatusCode == 0 }
+
+// shouldRetry returns whether the reponse deserves retry.
+func shouldRetry(r http.Response) bool {
+	// TODO: only retry when the cluster is in leader election
+	// We cannot do it exactly because etcd doesn't support it well.
+	return r.StatusCode == http.StatusInternalServerError
+}
+
+func (c *Client) getHttpPath(s ...string) string {
+	fullPath := c.cluster.pick() + "/" + version
+	for _, seg := range s {
+		fullPath = fullPath + "/" + seg
+	}
+	return fullPath
+}
+
+// buildValues builds a url.Values map according to the given value and ttl
+func buildValues(value string, ttl uint64) url.Values {
+	v := url.Values{}
+
+	if value != "" {
+		v.Set("value", value)
+	}
+
+	if ttl > 0 {
+		v.Set("ttl", fmt.Sprintf("%v", ttl))
+	}
+
+	return v
+}
+
+// convert key string to http path exclude version, including URL escaping
+// for example: key[foo] -> path[keys/foo]
+// key[/%z] -> path[keys/%25z]
+// key[/] -> path[keys/]
+func keyToPath(key string) string {
+	// URL-escape our key, except for slashes
+	p := strings.Replace(url.QueryEscape(path.Join("keys", key)), "%2F", "/", -1)
+
+	// corner case: if key is "/" or "//" ect
+	// path join will clear the tailing "/"
+	// we need to add it back
+	if p == "keys" {
+		p = "keys/"
+	}
+
+	return p
+}

+ 22 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests_test.go

@@ -0,0 +1,22 @@
+package etcd
+
+import "testing"
+
+func TestKeyToPath(t *testing.T) {
+	tests := []struct {
+		key   string
+		wpath string
+	}{
+		{"", "keys/"},
+		{"foo", "keys/foo"},
+		{"foo/bar", "keys/foo/bar"},
+		{"%z", "keys/%25z"},
+		{"/", "keys/"},
+	}
+	for i, tt := range tests {
+		path := keyToPath(tt.key)
+		if path != tt.wpath {
+			t.Errorf("#%d: path = %s, want %s", i, path, tt.wpath)
+		}
+	}
+}

+ 89 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/response.go

@@ -0,0 +1,89 @@
+package etcd
+
+import (
+	"encoding/json"
+	"net/http"
+	"strconv"
+	"time"
+)
+
+const (
+	rawResponse = iota
+	normalResponse
+)
+
+type responseType int
+
+type RawResponse struct {
+	StatusCode int
+	Body       []byte
+	Header     http.Header
+}
+
+var (
+	validHttpStatusCode = map[int]bool{
+		http.StatusCreated:            true,
+		http.StatusOK:                 true,
+		http.StatusBadRequest:         true,
+		http.StatusNotFound:           true,
+		http.StatusPreconditionFailed: true,
+		http.StatusForbidden:          true,
+	}
+)
+
+// Unmarshal parses RawResponse and stores the result in Response
+func (rr *RawResponse) Unmarshal() (*Response, error) {
+	if rr.StatusCode != http.StatusOK && rr.StatusCode != http.StatusCreated {
+		return nil, handleError(rr.Body)
+	}
+
+	resp := new(Response)
+
+	err := json.Unmarshal(rr.Body, resp)
+
+	if err != nil {
+		return nil, err
+	}
+
+	// attach index and term to response
+	resp.EtcdIndex, _ = strconv.ParseUint(rr.Header.Get("X-Etcd-Index"), 10, 64)
+	resp.RaftIndex, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Index"), 10, 64)
+	resp.RaftTerm, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Term"), 10, 64)
+
+	return resp, nil
+}
+
+type Response struct {
+	Action    string `json:"action"`
+	Node      *Node  `json:"node"`
+	PrevNode  *Node  `json:"prevNode,omitempty"`
+	EtcdIndex uint64 `json:"etcdIndex"`
+	RaftIndex uint64 `json:"raftIndex"`
+	RaftTerm  uint64 `json:"raftTerm"`
+}
+
+type Node struct {
+	Key           string     `json:"key, omitempty"`
+	Value         string     `json:"value,omitempty"`
+	Dir           bool       `json:"dir,omitempty"`
+	Expiration    *time.Time `json:"expiration,omitempty"`
+	TTL           int64      `json:"ttl,omitempty"`
+	Nodes         Nodes      `json:"nodes,omitempty"`
+	ModifiedIndex uint64     `json:"modifiedIndex,omitempty"`
+	CreatedIndex  uint64     `json:"createdIndex,omitempty"`
+}
+
+type Nodes []*Node
+
+// interfaces for sorting
+func (ns Nodes) Len() int {
+	return len(ns)
+}
+
+func (ns Nodes) Less(i, j int) bool {
+	return ns[i].Key < ns[j].Key
+}
+
+func (ns Nodes) Swap(i, j int) {
+	ns[i], ns[j] = ns[j], ns[i]
+}

+ 42 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go

@@ -0,0 +1,42 @@
+package etcd
+
+import (
+	"fmt"
+	"testing"
+)
+
+func TestSetCurlChan(t *testing.T) {
+	c := NewClient(nil)
+	c.OpenCURL()
+
+	defer func() {
+		c.Delete("foo", true)
+	}()
+
+	_, err := c.Set("foo", "bar", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expected := fmt.Sprintf("curl -X PUT %s/v2/keys/foo -d value=bar -d ttl=5",
+		c.cluster.pick())
+	actual := c.RecvCURL()
+	if expected != actual {
+		t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
+			actual, expected)
+	}
+
+	c.SetConsistency(STRONG_CONSISTENCY)
+	_, err = c.Get("foo", false, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	expected = fmt.Sprintf("curl -X GET %s/v2/keys/foo?quorum=true&recursive=false&sorted=false",
+		c.cluster.pick())
+	actual = c.RecvCURL()
+	if expected != actual {
+		t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
+			actual, expected)
+	}
+}

+ 137 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create.go

@@ -0,0 +1,137 @@
+package etcd
+
+// Set sets the given key to the given value.
+// It will create a new key value pair or replace the old one.
+// It will not replace a existing directory.
+func (c *Client) Set(key string, value string, ttl uint64) (*Response, error) {
+	raw, err := c.RawSet(key, value, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// SetDir sets the given key to a directory.
+// It will create a new directory or replace the old key value pair by a directory.
+// It will not replace a existing directory.
+func (c *Client) SetDir(key string, ttl uint64) (*Response, error) {
+	raw, err := c.RawSetDir(key, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// CreateDir creates a directory. It succeeds only if
+// the given key does not yet exist.
+func (c *Client) CreateDir(key string, ttl uint64) (*Response, error) {
+	raw, err := c.RawCreateDir(key, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// UpdateDir updates the given directory. It succeeds only if the
+// given key already exists.
+func (c *Client) UpdateDir(key string, ttl uint64) (*Response, error) {
+	raw, err := c.RawUpdateDir(key, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// Create creates a file with the given value under the given key.  It succeeds
+// only if the given key does not yet exist.
+func (c *Client) Create(key string, value string, ttl uint64) (*Response, error) {
+	raw, err := c.RawCreate(key, value, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// CreateInOrder creates a file with a key that's guaranteed to be higher than other
+// keys in the given directory. It is useful for creating queues.
+func (c *Client) CreateInOrder(dir string, value string, ttl uint64) (*Response, error) {
+	raw, err := c.RawCreateInOrder(dir, value, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+// Update updates the given key to the given value.  It succeeds only if the
+// given key already exists.
+func (c *Client) Update(key string, value string, ttl uint64) (*Response, error) {
+	raw, err := c.RawUpdate(key, value, ttl)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return raw.Unmarshal()
+}
+
+func (c *Client) RawUpdateDir(key string, ttl uint64) (*RawResponse, error) {
+	ops := Options{
+		"prevExist": true,
+		"dir":       true,
+	}
+
+	return c.put(key, "", ttl, ops)
+}
+
+func (c *Client) RawCreateDir(key string, ttl uint64) (*RawResponse, error) {
+	ops := Options{
+		"prevExist": false,
+		"dir":       true,
+	}
+
+	return c.put(key, "", ttl, ops)
+}
+
+func (c *Client) RawSet(key string, value string, ttl uint64) (*RawResponse, error) {
+	return c.put(key, value, ttl, nil)
+}
+
+func (c *Client) RawSetDir(key string, ttl uint64) (*RawResponse, error) {
+	ops := Options{
+		"dir": true,
+	}
+
+	return c.put(key, "", ttl, ops)
+}
+
+func (c *Client) RawUpdate(key string, value string, ttl uint64) (*RawResponse, error) {
+	ops := Options{
+		"prevExist": true,
+	}
+
+	return c.put(key, value, ttl, ops)
+}
+
+func (c *Client) RawCreate(key string, value string, ttl uint64) (*RawResponse, error) {
+	ops := Options{
+		"prevExist": false,
+	}
+
+	return c.put(key, value, ttl, ops)
+}
+
+func (c *Client) RawCreateInOrder(dir string, value string, ttl uint64) (*RawResponse, error) {
+	return c.post(dir, value, ttl)
+}

+ 241 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/set_update_create_test.go

@@ -0,0 +1,241 @@
+package etcd
+
+import (
+	"testing"
+)
+
+func TestSet(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+	}()
+
+	resp, err := c.Set("foo", "bar", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if resp.Node.Key != "/foo" || resp.Node.Value != "bar" || resp.Node.TTL != 5 {
+		t.Fatalf("Set 1 failed: %#v", resp)
+	}
+	if resp.PrevNode != nil {
+		t.Fatalf("Set 1 PrevNode failed: %#v", resp)
+	}
+
+	resp, err = c.Set("foo", "bar2", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/foo" && resp.Node.Value == "bar2" && resp.Node.TTL == 5) {
+		t.Fatalf("Set 2 failed: %#v", resp)
+	}
+	if resp.PrevNode.Key != "/foo" || resp.PrevNode.Value != "bar" || resp.Node.TTL != 5 {
+		t.Fatalf("Set 2 PrevNode failed: %#v", resp)
+	}
+}
+
+func TestUpdate(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+		c.Delete("nonexistent", true)
+	}()
+
+	resp, err := c.Set("foo", "bar", 5)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// This should succeed.
+	resp, err = c.Update("foo", "wakawaka", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Action == "update" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+		t.Fatalf("Update 1 failed: %#v", resp)
+	}
+	if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.Node.TTL == 5) {
+		t.Fatalf("Update 1 prevValue failed: %#v", resp)
+	}
+
+	// This should fail because the key does not exist.
+	resp, err = c.Update("nonexistent", "whatever", 5)
+	if err == nil {
+		t.Fatalf("The key %v did not exist, so the update should have failed."+
+			"The response was: %#v", resp.Node.Key, resp)
+	}
+}
+
+func TestCreate(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("newKey", true)
+	}()
+
+	newKey := "/newKey"
+	newValue := "/newValue"
+
+	// This should succeed
+	resp, err := c.Create(newKey, newValue, 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Action == "create" && resp.Node.Key == newKey &&
+		resp.Node.Value == newValue && resp.Node.TTL == 5) {
+		t.Fatalf("Create 1 failed: %#v", resp)
+	}
+	if resp.PrevNode != nil {
+		t.Fatalf("Create 1 PrevNode failed: %#v", resp)
+	}
+
+	// This should fail, because the key is already there
+	resp, err = c.Create(newKey, newValue, 5)
+	if err == nil {
+		t.Fatalf("The key %v did exist, so the creation should have failed."+
+			"The response was: %#v", resp.Node.Key, resp)
+	}
+}
+
+func TestCreateInOrder(t *testing.T) {
+	c := NewClient(nil)
+	dir := "/queue"
+	defer func() {
+		c.DeleteDir(dir)
+	}()
+
+	var firstKey, secondKey string
+
+	resp, err := c.CreateInOrder(dir, "1", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Action == "create" && resp.Node.Value == "1" && resp.Node.TTL == 5) {
+		t.Fatalf("Create 1 failed: %#v", resp)
+	}
+
+	firstKey = resp.Node.Key
+
+	resp, err = c.CreateInOrder(dir, "2", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Action == "create" && resp.Node.Value == "2" && resp.Node.TTL == 5) {
+		t.Fatalf("Create 2 failed: %#v", resp)
+	}
+
+	secondKey = resp.Node.Key
+
+	if firstKey >= secondKey {
+		t.Fatalf("Expected first key to be greater than second key, but %s is not greater than %s",
+			firstKey, secondKey)
+	}
+}
+
+func TestSetDir(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("foo", true)
+		c.Delete("fooDir", true)
+	}()
+
+	resp, err := c.CreateDir("fooDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/fooDir" && resp.Node.Value == "" && resp.Node.TTL == 5) {
+		t.Fatalf("SetDir 1 failed: %#v", resp)
+	}
+	if resp.PrevNode != nil {
+		t.Fatalf("SetDir 1 PrevNode failed: %#v", resp)
+	}
+
+	// This should fail because /fooDir already points to a directory
+	resp, err = c.CreateDir("/fooDir", 5)
+	if err == nil {
+		t.Fatalf("fooDir already points to a directory, so SetDir should have failed."+
+			"The response was: %#v", resp)
+	}
+
+	_, err = c.Set("foo", "bar", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// This should succeed
+	// It should replace the key
+	resp, err = c.SetDir("foo", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/foo" && resp.Node.Value == "" && resp.Node.TTL == 5) {
+		t.Fatalf("SetDir 2 failed: %#v", resp)
+	}
+	if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.PrevNode.TTL == 5) {
+		t.Fatalf("SetDir 2 failed: %#v", resp)
+	}
+}
+
+func TestUpdateDir(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("fooDir", true)
+	}()
+
+	resp, err := c.CreateDir("fooDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// This should succeed.
+	resp, err = c.UpdateDir("fooDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Action == "update" && resp.Node.Key == "/fooDir" &&
+		resp.Node.Value == "" && resp.Node.TTL == 5) {
+		t.Fatalf("UpdateDir 1 failed: %#v", resp)
+	}
+	if !(resp.PrevNode.Key == "/fooDir" && resp.PrevNode.Dir == true && resp.PrevNode.TTL == 5) {
+		t.Fatalf("UpdateDir 1 PrevNode failed: %#v", resp)
+	}
+
+	// This should fail because the key does not exist.
+	resp, err = c.UpdateDir("nonexistentDir", 5)
+	if err == nil {
+		t.Fatalf("The key %v did not exist, so the update should have failed."+
+			"The response was: %#v", resp.Node.Key, resp)
+	}
+}
+
+func TestCreateDir(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("fooDir", true)
+	}()
+
+	// This should succeed
+	resp, err := c.CreateDir("fooDir", 5)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !(resp.Action == "create" && resp.Node.Key == "/fooDir" &&
+		resp.Node.Value == "" && resp.Node.TTL == 5) {
+		t.Fatalf("CreateDir 1 failed: %#v", resp)
+	}
+	if resp.PrevNode != nil {
+		t.Fatalf("CreateDir 1 PrevNode failed: %#v", resp)
+	}
+
+	// This should fail, because the key is already there
+	resp, err = c.CreateDir("fooDir", 5)
+	if err == nil {
+		t.Fatalf("The key %v did exist, so the creation should have failed."+
+			"The response was: %#v", resp.Node.Key, resp)
+	}
+}

+ 6 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go

@@ -0,0 +1,6 @@
+package etcd
+
+const (
+	version        = "v2"
+	packageVersion = "v2.0.0+git"
+)

+ 103 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch.go

@@ -0,0 +1,103 @@
+package etcd
+
+import (
+	"errors"
+)
+
+// Errors introduced by the Watch command.
+var (
+	ErrWatchStoppedByUser = errors.New("Watch stopped by the user via stop channel")
+)
+
+// If recursive is set to true the watch returns the first change under the given
+// prefix since the given index.
+//
+// If recursive is set to false the watch returns the first change to the given key
+// since the given index.
+//
+// To watch for the latest change, set waitIndex = 0.
+//
+// If a receiver channel is given, it will be a long-term watch. Watch will block at the
+//channel. After someone receives the channel, it will go on to watch that
+// prefix.  If a stop channel is given, the client can close long-term watch using
+// the stop channel.
+func (c *Client) Watch(prefix string, waitIndex uint64, recursive bool,
+	receiver chan *Response, stop chan bool) (*Response, error) {
+	logger.Debugf("watch %s [%s]", prefix, c.cluster.Leader)
+	if receiver == nil {
+		raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
+
+		if err != nil {
+			return nil, err
+		}
+
+		return raw.Unmarshal()
+	}
+	defer close(receiver)
+
+	for {
+		raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
+
+		if err != nil {
+			return nil, err
+		}
+
+		resp, err := raw.Unmarshal()
+
+		if err != nil {
+			return nil, err
+		}
+
+		waitIndex = resp.Node.ModifiedIndex + 1
+		receiver <- resp
+	}
+}
+
+func (c *Client) RawWatch(prefix string, waitIndex uint64, recursive bool,
+	receiver chan *RawResponse, stop chan bool) (*RawResponse, error) {
+
+	logger.Debugf("rawWatch %s [%s]", prefix, c.cluster.Leader)
+	if receiver == nil {
+		return c.watchOnce(prefix, waitIndex, recursive, stop)
+	}
+
+	for {
+		raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
+
+		if err != nil {
+			return nil, err
+		}
+
+		resp, err := raw.Unmarshal()
+
+		if err != nil {
+			return nil, err
+		}
+
+		waitIndex = resp.Node.ModifiedIndex + 1
+		receiver <- raw
+	}
+}
+
+// helper func
+// return when there is change under the given prefix
+func (c *Client) watchOnce(key string, waitIndex uint64, recursive bool, stop chan bool) (*RawResponse, error) {
+
+	options := Options{
+		"wait": true,
+	}
+	if waitIndex > 0 {
+		options["waitIndex"] = waitIndex
+	}
+	if recursive {
+		options["recursive"] = true
+	}
+
+	resp, err := c.getCancelable(key, options, stop)
+
+	if err == ErrRequestCancelled {
+		return nil, ErrWatchStoppedByUser
+	}
+
+	return resp, err
+}

+ 119 - 0
libnetwork/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/watch_test.go

@@ -0,0 +1,119 @@
+package etcd
+
+import (
+	"fmt"
+	"runtime"
+	"testing"
+	"time"
+)
+
+func TestWatch(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("watch_foo", true)
+	}()
+
+	go setHelper("watch_foo", "bar", c)
+
+	resp, err := c.Watch("watch_foo", 0, false, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
+		t.Fatalf("Watch 1 failed: %#v", resp)
+	}
+
+	go setHelper("watch_foo", "bar", c)
+
+	resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, false, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
+		t.Fatalf("Watch 2 failed: %#v", resp)
+	}
+
+	routineNum := runtime.NumGoroutine()
+
+	ch := make(chan *Response, 10)
+	stop := make(chan bool, 1)
+
+	go setLoop("watch_foo", "bar", c)
+
+	go receiver(ch, stop)
+
+	_, err = c.Watch("watch_foo", 0, false, ch, stop)
+	if err != ErrWatchStoppedByUser {
+		t.Fatalf("Watch returned a non-user stop error")
+	}
+
+	if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
+		t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
+	}
+}
+
+func TestWatchAll(t *testing.T) {
+	c := NewClient(nil)
+	defer func() {
+		c.Delete("watch_foo", true)
+	}()
+
+	go setHelper("watch_foo/foo", "bar", c)
+
+	resp, err := c.Watch("watch_foo", 0, true, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
+		t.Fatalf("WatchAll 1 failed: %#v", resp)
+	}
+
+	go setHelper("watch_foo/foo", "bar", c)
+
+	resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, true, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
+		t.Fatalf("WatchAll 2 failed: %#v", resp)
+	}
+
+	ch := make(chan *Response, 10)
+	stop := make(chan bool, 1)
+
+	routineNum := runtime.NumGoroutine()
+
+	go setLoop("watch_foo/foo", "bar", c)
+
+	go receiver(ch, stop)
+
+	_, err = c.Watch("watch_foo", 0, true, ch, stop)
+	if err != ErrWatchStoppedByUser {
+		t.Fatalf("Watch returned a non-user stop error")
+	}
+
+	if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
+		t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
+	}
+}
+
+func setHelper(key, value string, c *Client) {
+	time.Sleep(time.Second)
+	c.Set(key, value, 100)
+}
+
+func setLoop(key, value string, c *Client) {
+	time.Sleep(time.Second)
+	for i := 0; i < 10; i++ {
+		newValue := fmt.Sprintf("%s_%v", value, i)
+		c.Set(key, newValue, 100)
+		time.Sleep(time.Second / 10)
+	}
+}
+
+func receiver(c chan *Response, stop chan bool) {
+	for i := 0; i < 10; i++ {
+		<-c
+	}
+	stop <- true
+}

+ 274 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/README.md

@@ -0,0 +1,274 @@
+---
+page_title: Docker Swarm discovery
+page_description: Swarm discovery
+page_keywords: docker, swarm, clustering, discovery
+---
+
+# Discovery
+
+Docker Swarm comes with multiple Discovery backends.
+
+## Backends
+
+### Hosted Discovery with Docker Hub
+
+First we create a cluster.
+
+```bash
+# create a cluster
+$ swarm create
+6856663cdefdec325839a4b7e1de38e8 # <- this is your unique <cluster_id>
+```
+
+Then we create each node and join them to the cluster.
+
+```bash
+# on each of your nodes, start the swarm agent
+#  <node_ip> doesn't have to be public (eg. 192.168.0.X),
+#  as long as the swarm manager can access it.
+$ swarm join --addr=<node_ip:2375> token://<cluster_id>
+```
+
+Finally, we start the Swarm manager. This can be on any machine or even
+your laptop.
+
+```bash
+$ swarm manage -H tcp://<swarm_ip:swarm_port> token://<cluster_id>
+```
+
+You can then use regular Docker commands to interact with your swarm.
+
+```bash
+docker -H tcp://<swarm_ip:swarm_port> info
+docker -H tcp://<swarm_ip:swarm_port> run ...
+docker -H tcp://<swarm_ip:swarm_port> ps
+docker -H tcp://<swarm_ip:swarm_port> logs ...
+...
+```
+
+You can also list the nodes in your cluster.
+
+```bash
+swarm list token://<cluster_id>
+<node_ip:2375>
+```
+
+### Using a static file describing the cluster
+
+For each of your nodes, add a line to a file. The node IP address
+doesn't need to be public as long the Swarm manager can access it.
+
+```bash
+echo <node_ip1:2375> >> /tmp/my_cluster
+echo <node_ip2:2375> >> /tmp/my_cluster
+echo <node_ip3:2375> >> /tmp/my_cluster
+```
+
+Then start the Swarm manager on any machine.
+
+```bash
+swarm manage -H tcp://<swarm_ip:swarm_port> file:///tmp/my_cluster
+```
+
+And then use the regular Docker commands.
+
+```bash
+docker -H tcp://<swarm_ip:swarm_port> info
+docker -H tcp://<swarm_ip:swarm_port> run ...
+docker -H tcp://<swarm_ip:swarm_port> ps
+docker -H tcp://<swarm_ip:swarm_port> logs ...
+...
+```
+
+You can list the nodes in your cluster.
+
+```bash
+$ swarm list file:///tmp/my_cluster
+<node_ip1:2375>
+<node_ip2:2375>
+<node_ip3:2375>
+```
+
+### Using etcd
+
+On each of your nodes, start the Swarm agent. The node IP address
+doesn't have to be public as long as the swarm manager can access it.
+
+```bash
+swarm join --addr=<node_ip:2375> etcd://<etcd_ip>/<path>
+```
+
+Start the manager on any machine or your laptop.
+
+```bash
+swarm manage -H tcp://<swarm_ip:swarm_port> etcd://<etcd_ip>/<path>
+```
+
+And then use the regular Docker commands.
+
+```bash
+docker -H tcp://<swarm_ip:swarm_port> info
+docker -H tcp://<swarm_ip:swarm_port> run ...
+docker -H tcp://<swarm_ip:swarm_port> ps
+docker -H tcp://<swarm_ip:swarm_port> logs ...
+...
+```
+
+You can list the nodes in your cluster.
+
+```bash
+swarm list etcd://<etcd_ip>/<path>
+<node_ip:2375>
+```
+
+### Using consul
+
+On each of your nodes, start the Swarm agent. The node IP address
+doesn't need to be public as long as the Swarm manager can access it.
+
+```bash
+swarm join --addr=<node_ip:2375> consul://<consul_addr>/<path>
+```
+
+Start the manager on any machine or your laptop.
+
+```bash
+swarm manage -H tcp://<swarm_ip:swarm_port> consul://<consul_addr>/<path>
+```
+
+And then use the regular Docker commands.
+
+```bash
+docker -H tcp://<swarm_ip:swarm_port> info
+docker -H tcp://<swarm_ip:swarm_port> run ...
+docker -H tcp://<swarm_ip:swarm_port> ps
+docker -H tcp://<swarm_ip:swarm_port> logs ...
+...
+```
+
+You can list the nodes in your cluster.
+
+```bash
+swarm list consul://<consul_addr>/<path>
+<node_ip:2375>
+```
+
+### Using zookeeper
+
+On each of your nodes, start the Swarm agent. The node IP doesn't have
+to be public as long as the swarm manager can access it.
+
+```bash
+swarm join --addr=<node_ip:2375> zk://<zookeeper_addr1>,<zookeeper_addr2>/<path>
+```
+
+Start the manager on any machine or your laptop.
+
+```bash
+swarm manage -H tcp://<swarm_ip:swarm_port> zk://<zookeeper_addr1>,<zookeeper_addr2>/<path>
+```
+
+You can then use the regular Docker commands.
+
+```bash
+docker -H tcp://<swarm_ip:swarm_port> info
+docker -H tcp://<swarm_ip:swarm_port> run ...
+docker -H tcp://<swarm_ip:swarm_port> ps
+docker -H tcp://<swarm_ip:swarm_port> logs ...
+...
+```
+
+You can list the nodes in the cluster.
+
+```bash
+swarm list zk://<zookeeper_addr1>,<zookeeper_addr2>/<path>
+<node_ip:2375>
+```
+
+### Using a static list of IP addresses
+
+Start the manager on any machine or your laptop
+
+```bash
+swarm manage -H <swarm_ip:swarm_port> nodes://<node_ip1:2375>,<node_ip2:2375>
+```
+
+Or
+
+```bash
+swarm manage -H <swarm_ip:swarm_port> <node_ip1:2375>,<node_ip2:2375>
+```
+
+Then use the regular Docker commands.
+
+```bash
+docker -H <swarm_ip:swarm_port> info
+docker -H <swarm_ip:swarm_port> run ...
+docker -H <swarm_ip:swarm_port> ps
+docker -H <swarm_ip:swarm_port> logs ...
+...
+```
+
+### Range pattern for IP addresses
+
+The `file` and `nodes` discoveries support a range pattern to specify IP
+addresses, i.e., `10.0.0.[10:200]` will be a list of nodes starting from
+`10.0.0.10` to `10.0.0.200`.
+
+For example for the `file` discovery method.
+
+```bash
+$ echo "10.0.0.[11:100]:2375"   >> /tmp/my_cluster
+$ echo "10.0.1.[15:20]:2375"    >> /tmp/my_cluster
+$ echo "192.168.1.2:[2:20]375"  >> /tmp/my_cluster
+```
+
+Then start the manager.
+
+```bash
+swarm manage -H tcp://<swarm_ip:swarm_port> file:///tmp/my_cluster
+```
+
+And for the `nodes` discovery method.
+
+```bash
+swarm manage -H <swarm_ip:swarm_port> "nodes://10.0.0.[10:200]:2375,10.0.1.[2:250]:2375"
+```
+
+## Contributing a new discovery backend
+
+Contributing a new discovery backend is easy, simply implement this
+interface:
+
+```go
+type Discovery interface {
+     Initialize(string, int) error
+     Fetch() ([]string, error)
+     Watch(WatchCallback)
+     Register(string) error
+}
+```
+
+### Initialize
+
+The parameters are `discovery` location without the scheme and a heartbeat (in seconds).
+
+### Fetch
+
+Returns the list of all the nodes from the discovery.
+
+### Watch
+
+Triggers an update (`Fetch`). This can happen either via a timer (like
+`token`) or use backend specific features (like `etcd`).
+
+### Register
+
+Add a new node to the discovery service.
+
+## Docker Swarm documentation index
+
+- [User guide](./index.md)
+- [Sheduler strategies](./scheduler/strategy.md)
+- [Sheduler filters](./scheduler/filter.md)
+- [Swarm API](./API.md)

+ 106 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery.go

@@ -0,0 +1,106 @@
+package discovery
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"strings"
+
+	log "github.com/Sirupsen/logrus"
+)
+
+// Entry is exported
+type Entry struct {
+	Host string
+	Port string
+}
+
+// NewEntry is exported
+func NewEntry(url string) (*Entry, error) {
+	host, port, err := net.SplitHostPort(url)
+	if err != nil {
+		return nil, err
+	}
+	return &Entry{host, port}, nil
+}
+
+func (m Entry) String() string {
+	return fmt.Sprintf("%s:%s", m.Host, m.Port)
+}
+
+// WatchCallback is exported
+type WatchCallback func(entries []*Entry)
+
+// Discovery is exported
+type Discovery interface {
+	Initialize(string, uint64) error
+	Fetch() ([]*Entry, error)
+	Watch(WatchCallback)
+	Register(string) error
+}
+
+var (
+	discoveries map[string]Discovery
+	// ErrNotSupported is exported
+	ErrNotSupported = errors.New("discovery service not supported")
+	// ErrNotImplemented is exported
+	ErrNotImplemented = errors.New("not implemented in this discovery service")
+)
+
+func init() {
+	discoveries = make(map[string]Discovery)
+}
+
+// Register is exported
+func Register(scheme string, d Discovery) error {
+	if _, exists := discoveries[scheme]; exists {
+		return fmt.Errorf("scheme already registered %s", scheme)
+	}
+	log.WithField("name", scheme).Debug("Registering discovery service")
+	discoveries[scheme] = d
+
+	return nil
+}
+
+func parse(rawurl string) (string, string) {
+	parts := strings.SplitN(rawurl, "://", 2)
+
+	// nodes:port,node2:port => nodes://node1:port,node2:port
+	if len(parts) == 1 {
+		return "nodes", parts[0]
+	}
+	return parts[0], parts[1]
+}
+
+// New is exported
+func New(rawurl string, heartbeat uint64) (Discovery, error) {
+	scheme, uri := parse(rawurl)
+
+	if discovery, exists := discoveries[scheme]; exists {
+		log.WithFields(log.Fields{"name": scheme, "uri": uri}).Debug("Initializing discovery service")
+		err := discovery.Initialize(uri, heartbeat)
+		return discovery, err
+	}
+
+	return nil, ErrNotSupported
+}
+
+// CreateEntries is exported
+func CreateEntries(addrs []string) ([]*Entry, error) {
+	entries := []*Entry{}
+	if addrs == nil {
+		return entries, nil
+	}
+
+	for _, addr := range addrs {
+		if len(addr) == 0 {
+			continue
+		}
+		entry, err := NewEntry(addr)
+		if err != nil {
+			return nil, err
+		}
+		entries = append(entries, entry)
+	}
+	return entries, nil
+}

+ 54 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/discovery_test.go

@@ -0,0 +1,54 @@
+package discovery
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestNewEntry(t *testing.T) {
+	entry, err := NewEntry("127.0.0.1:2375")
+	assert.Equal(t, entry.Host, "127.0.0.1")
+	assert.Equal(t, entry.Port, "2375")
+	assert.NoError(t, err)
+
+	_, err = NewEntry("127.0.0.1")
+	assert.Error(t, err)
+}
+
+func TestParse(t *testing.T) {
+	scheme, uri := parse("127.0.0.1:2375")
+	assert.Equal(t, scheme, "nodes")
+	assert.Equal(t, uri, "127.0.0.1:2375")
+
+	scheme, uri = parse("localhost:2375")
+	assert.Equal(t, scheme, "nodes")
+	assert.Equal(t, uri, "localhost:2375")
+
+	scheme, uri = parse("scheme://127.0.0.1:2375")
+	assert.Equal(t, scheme, "scheme")
+	assert.Equal(t, uri, "127.0.0.1:2375")
+
+	scheme, uri = parse("scheme://localhost:2375")
+	assert.Equal(t, scheme, "scheme")
+	assert.Equal(t, uri, "localhost:2375")
+
+	scheme, uri = parse("")
+	assert.Equal(t, scheme, "nodes")
+	assert.Equal(t, uri, "")
+}
+
+func TestCreateEntries(t *testing.T) {
+	entries, err := CreateEntries(nil)
+	assert.Equal(t, entries, []*Entry{})
+	assert.NoError(t, err)
+
+	entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""})
+	assert.Equal(t, len(entries), 2)
+	assert.Equal(t, entries[0].String(), "127.0.0.1:2375")
+	assert.Equal(t, entries[1].String(), "127.0.0.2:2375")
+	assert.NoError(t, err)
+
+	_, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"})
+	assert.Error(t, err)
+}

+ 71 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file.go

@@ -0,0 +1,71 @@
+package file
+
+import (
+	"io/ioutil"
+	"strings"
+	"time"
+
+	"github.com/docker/swarm/discovery"
+)
+
+// Discovery is exported
+type Discovery struct {
+	heartbeat uint64
+	path      string
+}
+
+func init() {
+	discovery.Register("file", &Discovery{})
+}
+
+// Initialize is exported
+func (s *Discovery) Initialize(path string, heartbeat uint64) error {
+	s.path = path
+	s.heartbeat = heartbeat
+	return nil
+}
+
+func parseFileContent(content []byte) []string {
+	var result []string
+	for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") {
+		line = strings.TrimSpace(line)
+		// Ignoring line starts with #
+		if strings.HasPrefix(line, "#") {
+			continue
+		}
+		// Inlined # comment also ignored.
+		if strings.Contains(line, "#") {
+			line = line[0:strings.Index(line, "#")]
+			// Trim additional spaces caused by above stripping.
+			line = strings.TrimSpace(line)
+		}
+		for _, ip := range discovery.Generate(line) {
+			result = append(result, ip)
+		}
+	}
+	return result
+}
+
+// Fetch is exported
+func (s *Discovery) Fetch() ([]*discovery.Entry, error) {
+	fileContent, err := ioutil.ReadFile(s.path)
+	if err != nil {
+		return nil, err
+	}
+	return discovery.CreateEntries(parseFileContent(fileContent))
+}
+
+// Watch is exported
+func (s *Discovery) Watch(callback discovery.WatchCallback) {
+	for _ = range time.Tick(time.Duration(s.heartbeat) * time.Second) {
+		entries, err := s.Fetch()
+		if err == nil {
+			callback(entries)
+		}
+	}
+}
+
+// Register is exported
+func (s *Discovery) Register(addr string) error {
+	return discovery.ErrNotImplemented
+}

+ 46 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/file/file_test.go

@@ -0,0 +1,46 @@
+package file
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestInitialize(t *testing.T) {
+	discovery := &Discovery{}
+	discovery.Initialize("/path/to/file", 0)
+	assert.Equal(t, discovery.path, "/path/to/file")
+}
+
+func TestContent(t *testing.T) {
+	data := `
+1.1.1.[1:2]:1111
+2.2.2.[2:4]:2222
+`
+	ips := parseFileContent([]byte(data))
+	assert.Equal(t, ips[0], "1.1.1.1:1111")
+	assert.Equal(t, ips[1], "1.1.1.2:1111")
+	assert.Equal(t, ips[2], "2.2.2.2:2222")
+	assert.Equal(t, ips[3], "2.2.2.3:2222")
+	assert.Equal(t, ips[4], "2.2.2.4:2222")
+}
+
+func TestRegister(t *testing.T) {
+	discovery := &Discovery{path: "/path/to/file"}
+	assert.Error(t, discovery.Register("0.0.0.0"))
+}
+
+func TestParsingContentsWithComments(t *testing.T) {
+	data := `
+### test ###
+1.1.1.1:1111 # inline comment
+# 2.2.2.2:2222
+      ### empty line with comment
+    3.3.3.3:3333
+### test ###
+`
+	ips := parseFileContent([]byte(data))
+	assert.Equal(t, 2, len(ips))
+	assert.Equal(t, "1.1.1.1:1111", ips[0])
+	assert.Equal(t, "3.3.3.3:3333", ips[1])
+}

+ 35 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator.go

@@ -0,0 +1,35 @@
+package discovery
+
+import (
+	"fmt"
+	"regexp"
+	"strconv"
+)
+
+// Generate takes care of IP generation
+func Generate(pattern string) []string {
+	re, _ := regexp.Compile(`\[(.+):(.+)\]`)
+	submatch := re.FindStringSubmatch(pattern)
+	if submatch == nil {
+		return []string{pattern}
+	}
+
+	from, err := strconv.Atoi(submatch[1])
+	if err != nil {
+		return []string{pattern}
+	}
+	to, err := strconv.Atoi(submatch[2])
+	if err != nil {
+		return []string{pattern}
+	}
+
+	template := re.ReplaceAllString(pattern, "%d")
+
+	var result []string
+	for val := from; val <= to; val++ {
+		entry := fmt.Sprintf(template, val)
+		result = append(result, entry)
+	}
+
+	return result
+}

+ 55 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/generator_test.go

@@ -0,0 +1,55 @@
+package discovery
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestGeneratorNotGenerate(t *testing.T) {
+	ips := Generate("127.0.0.1")
+	assert.Equal(t, len(ips), 1)
+	assert.Equal(t, ips[0], "127.0.0.1")
+}
+
+func TestGeneratorWithPortNotGenerate(t *testing.T) {
+	ips := Generate("127.0.0.1:8080")
+	assert.Equal(t, len(ips), 1)
+	assert.Equal(t, ips[0], "127.0.0.1:8080")
+}
+
+func TestGeneratorMatchFailedNotGenerate(t *testing.T) {
+	ips := Generate("127.0.0.[1]")
+	assert.Equal(t, len(ips), 1)
+	assert.Equal(t, ips[0], "127.0.0.[1]")
+}
+
+func TestGeneratorWithPort(t *testing.T) {
+	ips := Generate("127.0.0.[1:11]:2375")
+	assert.Equal(t, len(ips), 11)
+	assert.Equal(t, ips[0], "127.0.0.1:2375")
+	assert.Equal(t, ips[1], "127.0.0.2:2375")
+	assert.Equal(t, ips[2], "127.0.0.3:2375")
+	assert.Equal(t, ips[3], "127.0.0.4:2375")
+	assert.Equal(t, ips[4], "127.0.0.5:2375")
+	assert.Equal(t, ips[5], "127.0.0.6:2375")
+	assert.Equal(t, ips[6], "127.0.0.7:2375")
+	assert.Equal(t, ips[7], "127.0.0.8:2375")
+	assert.Equal(t, ips[8], "127.0.0.9:2375")
+	assert.Equal(t, ips[9], "127.0.0.10:2375")
+	assert.Equal(t, ips[10], "127.0.0.11:2375")
+}
+
+func TestGenerateWithMalformedInputAtRangeStart(t *testing.T) {
+	malformedInput := "127.0.0.[x:11]:2375"
+	ips := Generate(malformedInput)
+	assert.Equal(t, len(ips), 1)
+	assert.Equal(t, ips[0], malformedInput)
+}
+
+func TestGenerateWithMalformedInputAtRangeEnd(t *testing.T) {
+	malformedInput := "127.0.0.[1:x]:2375"
+	ips := Generate(malformedInput)
+	assert.Equal(t, len(ips), 1)
+	assert.Equal(t, ips[0], malformedInput)
+}

+ 92 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv.go

@@ -0,0 +1,92 @@
+package kv
+
+import (
+	"fmt"
+	"path"
+	"strings"
+	"time"
+
+	"github.com/docker/swarm/discovery"
+	"github.com/docker/swarm/pkg/store"
+)
+
+// Discovery is exported
+type Discovery struct {
+	store     store.Store
+	name      string
+	heartbeat time.Duration
+	prefix    string
+}
+
+func init() {
+	discovery.Register("zk", &Discovery{name: "zk"})
+	discovery.Register("consul", &Discovery{name: "consul"})
+	discovery.Register("etcd", &Discovery{name: "etcd"})
+}
+
+// Initialize is exported
+func (s *Discovery) Initialize(uris string, heartbeat uint64) error {
+	var (
+		parts = strings.SplitN(uris, "/", 2)
+		ips   = strings.Split(parts[0], ",")
+		addrs []string
+		err   error
+	)
+
+	if len(parts) != 2 {
+		return fmt.Errorf("invalid format %q, missing <path>", uris)
+	}
+
+	for _, ip := range ips {
+		addrs = append(addrs, ip)
+	}
+
+	s.heartbeat = time.Duration(heartbeat) * time.Second
+	s.prefix = parts[1]
+
+	// Creates a new store, will ignore options given
+	// if not supported by the chosen store
+	s.store, err = store.CreateStore(
+		s.name, // name of the store
+		addrs,
+		store.Config{
+			Timeout: s.heartbeat,
+		},
+	)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Fetch is exported
+func (s *Discovery) Fetch() ([]*discovery.Entry, error) {
+	addrs, err := s.store.GetRange(s.prefix)
+	if err != nil {
+		return nil, err
+	}
+	return discovery.CreateEntries(convertToStringArray(addrs))
+}
+
+// Watch is exported
+func (s *Discovery) Watch(callback discovery.WatchCallback) {
+	s.store.WatchRange(s.prefix, "", s.heartbeat, func(kvalues []store.KVEntry) {
+		// Traduce byte array entries to discovery.Entry
+		entries, _ := discovery.CreateEntries(convertToStringArray(kvalues))
+		callback(entries)
+	})
+}
+
+// Register is exported
+func (s *Discovery) Register(addr string) error {
+	err := s.store.Put(path.Join(s.prefix, addr), []byte(addr))
+	return err
+}
+
+func convertToStringArray(entries []store.KVEntry) (addrs []string) {
+	for _, entry := range entries {
+		addrs = append(addrs, string(entry.Value()))
+	}
+	return addrs
+}

+ 20 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/kv/kv_test.go

@@ -0,0 +1,20 @@
+package kv
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestInitialize(t *testing.T) {
+	discoveryService := &Discovery{}
+
+	assert.Equal(t, discoveryService.Initialize("127.0.0.1", 0).Error(), "invalid format \"127.0.0.1\", missing <path>")
+
+	assert.Error(t, discoveryService.Initialize("127.0.0.1/path", 0))
+	assert.Equal(t, discoveryService.prefix, "path")
+
+	assert.Error(t, discoveryService.Initialize("127.0.0.1,127.0.0.2,127.0.0.3/path", 0))
+	assert.Equal(t, discoveryService.prefix, "path")
+
+}

+ 45 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes.go

@@ -0,0 +1,45 @@
+package nodes
+
+import (
+	"strings"
+
+	"github.com/docker/swarm/discovery"
+)
+
+// Discovery is exported
+type Discovery struct {
+	entries []*discovery.Entry
+}
+
+func init() {
+	discovery.Register("nodes", &Discovery{})
+}
+
+// Initialize is exported
+func (s *Discovery) Initialize(uris string, _ uint64) error {
+	for _, input := range strings.Split(uris, ",") {
+		for _, ip := range discovery.Generate(input) {
+			entry, err := discovery.NewEntry(ip)
+			if err != nil {
+				return err
+			}
+			s.entries = append(s.entries, entry)
+		}
+	}
+
+	return nil
+}
+
+// Fetch is exported
+func (s *Discovery) Fetch() ([]*discovery.Entry, error) {
+	return s.entries, nil
+}
+
+// Watch is exported
+func (s *Discovery) Watch(callback discovery.WatchCallback) {
+}
+
+// Register is exported
+func (s *Discovery) Register(addr string) error {
+	return discovery.ErrNotImplemented
+}

+ 31 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/nodes/nodes_test.go

@@ -0,0 +1,31 @@
+package nodes
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestInitialise(t *testing.T) {
+	discovery := &Discovery{}
+	discovery.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0)
+	assert.Equal(t, len(discovery.entries), 2)
+	assert.Equal(t, discovery.entries[0].String(), "1.1.1.1:1111")
+	assert.Equal(t, discovery.entries[1].String(), "2.2.2.2:2222")
+}
+
+func TestInitialiseWithPattern(t *testing.T) {
+	discovery := &Discovery{}
+	discovery.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0)
+	assert.Equal(t, len(discovery.entries), 5)
+	assert.Equal(t, discovery.entries[0].String(), "1.1.1.1:1111")
+	assert.Equal(t, discovery.entries[1].String(), "1.1.1.2:1111")
+	assert.Equal(t, discovery.entries[2].String(), "2.2.2.2:2222")
+	assert.Equal(t, discovery.entries[3].String(), "2.2.2.3:2222")
+	assert.Equal(t, discovery.entries[4].String(), "2.2.2.4:2222")
+}
+
+func TestRegister(t *testing.T) {
+	discovery := &Discovery{}
+	assert.Error(t, discovery.Register("0.0.0.0"))
+}

+ 31 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/README.md

@@ -0,0 +1,31 @@
+#discovery-stage.hub.docker.com
+
+Docker Swarm comes with a simple discovery service built into the [Docker Hub](http://hub.docker.com)
+
+The discovery service is still in alpha stage and currently hosted at `https://discovery-stage.hub.docker.com`
+
+#####Create a new cluster
+`-> POST https://discovery-stage.hub.docker.com/v1/clusters`
+
+`<- <token>`
+
+#####Add new nodes to a cluster
+`-> POST https://discovery-stage.hub.docker.com/v1/clusters/<token> Request body: "<ip>:<port1>"`
+
+`<- OK`
+
+`-> POST https://discovery-stage.hub.docker.com/v1/clusters/<token> Request body: "<ip>:<port2>")`
+
+`<- OK`
+
+
+#####List nodes in a cluster
+`-> GET https://discovery-stage.hub.docker.com/v1/clusters/<token>`
+
+`<- ["<ip>:<port1>", "<ip>:<port2>"]`
+
+
+#####Delete a cluster (all the nodes in a cluster)
+`-> DELETE https://discovery-stage.hub.docker.com/v1/clusters/<token>`
+
+`<- OK`

+ 104 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token.go

@@ -0,0 +1,104 @@
+package token
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"strings"
+	"time"
+
+	"github.com/docker/swarm/discovery"
+)
+
+// DiscoveryUrl is exported
+const DiscoveryURL = "https://discovery-stage.hub.docker.com/v1"
+
+// Discovery is exported
+type Discovery struct {
+	heartbeat uint64
+	url       string
+	token     string
+}
+
+func init() {
+	discovery.Register("token", &Discovery{})
+}
+
+// Initialize is exported
+func (s *Discovery) Initialize(urltoken string, heartbeat uint64) error {
+	if i := strings.LastIndex(urltoken, "/"); i != -1 {
+		s.url = "https://" + urltoken[:i]
+		s.token = urltoken[i+1:]
+	} else {
+		s.url = DiscoveryURL
+		s.token = urltoken
+	}
+
+	if s.token == "" {
+		return errors.New("token is empty")
+	}
+	s.heartbeat = heartbeat
+
+	return nil
+}
+
+// Fetch returns the list of entries for the discovery service at the specified endpoint
+func (s *Discovery) Fetch() ([]*discovery.Entry, error) {
+
+	resp, err := http.Get(fmt.Sprintf("%s/%s/%s", s.url, "clusters", s.token))
+	if err != nil {
+		return nil, err
+	}
+
+	defer resp.Body.Close()
+
+	var addrs []string
+	if resp.StatusCode == http.StatusOK {
+		if err := json.NewDecoder(resp.Body).Decode(&addrs); err != nil {
+			return nil, err
+		}
+	} else {
+		return nil, fmt.Errorf("Failed to fetch entries, Discovery service returned %d HTTP status code", resp.StatusCode)
+	}
+
+	return discovery.CreateEntries(addrs)
+}
+
+// Watch is exported
+func (s *Discovery) Watch(callback discovery.WatchCallback) {
+	for _ = range time.Tick(time.Duration(s.heartbeat) * time.Second) {
+		entries, err := s.Fetch()
+		if err == nil {
+			callback(entries)
+		}
+	}
+}
+
+// Register adds a new entry identified by the into the discovery service
+func (s *Discovery) Register(addr string) error {
+	buf := strings.NewReader(addr)
+
+	resp, err := http.Post(fmt.Sprintf("%s/%s/%s", s.url,
+		"clusters", s.token), "application/json", buf)
+
+	if err != nil {
+		return err
+	}
+
+	resp.Body.Close()
+	return nil
+}
+
+// CreateCluster returns a unique cluster token
+func (s *Discovery) CreateCluster() (string, error) {
+	resp, err := http.Post(fmt.Sprintf("%s/%s", s.url, "clusters"), "", nil)
+	if err != nil {
+		return "", err
+	}
+
+	defer resp.Body.Close()
+	token, err := ioutil.ReadAll(resp.Body)
+	return string(token), err
+}

+ 36 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/discovery/token/token_test.go

@@ -0,0 +1,36 @@
+package token
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestInitialize(t *testing.T) {
+	discovery := &Discovery{}
+	err := discovery.Initialize("token", 0)
+	assert.NoError(t, err)
+	assert.Equal(t, discovery.token, "token")
+	assert.Equal(t, discovery.url, DiscoveryURL)
+
+	err = discovery.Initialize("custom/path/token", 0)
+	assert.NoError(t, err)
+	assert.Equal(t, discovery.token, "token")
+	assert.Equal(t, discovery.url, "https://custom/path")
+
+	err = discovery.Initialize("", 0)
+	assert.Error(t, err)
+}
+
+func TestRegister(t *testing.T) {
+	discovery := &Discovery{token: "TEST_TOKEN", url: DiscoveryURL}
+	expected := "127.0.0.1:2675"
+	assert.NoError(t, discovery.Register(expected))
+
+	addrs, err := discovery.Fetch()
+	assert.NoError(t, err)
+	assert.Equal(t, len(addrs), 1)
+	assert.Equal(t, addrs[0].String(), expected)
+
+	assert.NoError(t, discovery.Register(expected))
+}

+ 79 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/README.md

@@ -0,0 +1,79 @@
+# Storage
+
+This package is used by the discovery service to register machines inside the cluster. It is also used to store cluster's metadata.
+
+## Example of usage
+
+### Create a new store and use Put/Get
+
+```go
+package main
+
+import (
+	"fmt"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/swarm/store"
+)
+
+func main() {
+	var (
+		client = "localhost:8500"
+	)
+
+	// Initialize a new store with consul
+	kv, err := store.CreateStore(
+		store.Consul,
+		[]string{client},
+		store.Config{
+		    Timeout: 10*time.Second
+		},
+	)
+	if err != nil {
+		log.Error("Cannot create store consul")
+	}
+
+	key := "foo"
+	err = kv.Put(key, []byte("bar"))
+	if err != nil {
+		log.Error("Error trying to put value at key `", key, "`")
+	}
+
+	value, _, err := kv.Get(key)
+	if err != nil {
+		log.Error("Error trying accessing value at key `", key, "`")
+	}
+
+	log.Info("value: ", string(value))
+}
+```
+
+
+
+## Contributing to a new storage backend
+
+A new **storage backend** should include those calls:
+
+```go
+type Store interface {
+	Put(key string, value []byte) error
+	Get(key string) (value []byte, lastIndex uint64, err error)
+	Delete(key string) error
+	Exists(key string) (bool, error)
+	Watch(key string, ttl uint64, callback WatchCallback) error
+	CancelWatch(key string) error
+	Acquire(key string, value []byte) (string, error)
+	Release(session string) error
+	GetRange(prefix string) (value [][]byte, err error)
+	DeleteRange(prefix string) error
+	WatchRange(prefix string, filter string, heartbeat uint64, callback WatchCallback) error
+	CancelWatchRange(prefix string) error
+	AtomicPut(key string, oldValue []byte, newValue []byte, index uint64) (bool, error)
+	AtomicDelete(key string, oldValue []byte, index uint64) (bool, error)
+}
+```
+
+To be elligible as a **discovery backend** only, a K/V store implementation should at least offer `Get`, `Put`, `WatchRange`, `GetRange`.
+
+You can get inspiration from existing backends to create a new one. This interface could be subject to changes to improve the experience of using the library and contributing to a new backend.

+ 301 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/consul.go

@@ -0,0 +1,301 @@
+package store
+
+import (
+	"crypto/tls"
+	"errors"
+	"net/http"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	api "github.com/hashicorp/consul/api"
+)
+
+var (
+	// ErrSessionUndefined is exported
+	ErrSessionUndefined = errors.New("Session does not exist")
+)
+
+// Consul embeds the client and watches/lock sessions
+type Consul struct {
+	config   *api.Config
+	client   *api.Client
+	sessions map[string]*api.Session
+	watches  map[string]*Watch
+}
+
+// Watch embeds the event channel and the
+// refresh interval
+type Watch struct {
+	LastIndex uint64
+	Interval  time.Duration
+}
+
+// InitializeConsul creates a new Consul client given
+// a list of endpoints and optional tls config
+func InitializeConsul(endpoints []string, options Config) (Store, error) {
+	s := &Consul{}
+	s.sessions = make(map[string]*api.Session)
+	s.watches = make(map[string]*Watch)
+
+	// Create Consul client
+	config := api.DefaultConfig()
+	s.config = config
+	config.HttpClient = http.DefaultClient
+	config.Address = endpoints[0]
+	config.Scheme = "http"
+
+	if options.TLS != nil {
+		s.setTLS(options.TLS)
+	}
+
+	if options.Timeout != 0 {
+		s.setTimeout(options.Timeout)
+	}
+
+	// Creates a new client
+	client, err := api.NewClient(config)
+	if err != nil {
+		log.Errorf("Couldn't initialize consul client..")
+		return nil, err
+	}
+	s.client = client
+
+	return s, nil
+}
+
+// SetTLS sets Consul TLS options
+func (s *Consul) setTLS(tls *tls.Config) {
+	s.config.HttpClient.Transport = &http.Transport{
+		TLSClientConfig: tls,
+	}
+	s.config.Scheme = "https"
+}
+
+// SetTimeout sets the timout for connecting to Consul
+func (s *Consul) setTimeout(time time.Duration) {
+	s.config.WaitTime = time
+}
+
+// Get the value at "key", returns the last modified index
+// to use in conjunction to CAS calls
+func (s *Consul) Get(key string) (value []byte, lastIndex uint64, err error) {
+	pair, meta, err := s.client.KV().Get(partialFormat(key), nil)
+	if err != nil {
+		return nil, 0, err
+	}
+	if pair == nil {
+		return nil, 0, ErrKeyNotFound
+	}
+	return pair.Value, meta.LastIndex, nil
+}
+
+// Put a value at "key"
+func (s *Consul) Put(key string, value []byte) error {
+	p := &api.KVPair{Key: partialFormat(key), Value: value}
+	if s.client == nil {
+		log.Error("Error initializing client")
+	}
+	_, err := s.client.KV().Put(p, nil)
+	return err
+}
+
+// Delete a value at "key"
+func (s *Consul) Delete(key string) error {
+	_, err := s.client.KV().Delete(partialFormat(key), nil)
+	return err
+}
+
+// Exists checks that the key exists inside the store
+func (s *Consul) Exists(key string) (bool, error) {
+	_, _, err := s.Get(key)
+	if err != nil && err == ErrKeyNotFound {
+		return false, err
+	}
+	return true, nil
+}
+
+// GetRange gets a range of values at "directory"
+func (s *Consul) GetRange(prefix string) (kvi []KVEntry, err error) {
+	pairs, _, err := s.client.KV().List(partialFormat(prefix), nil)
+	if err != nil {
+		return nil, err
+	}
+	if len(pairs) == 0 {
+		return nil, ErrKeyNotFound
+	}
+	for _, pair := range pairs {
+		if pair.Key == prefix {
+			continue
+		}
+		kvi = append(kvi, &kviTuple{pair.Key, pair.Value, pair.ModifyIndex})
+	}
+	return kvi, nil
+}
+
+// DeleteRange deletes a range of values at "directory"
+func (s *Consul) DeleteRange(prefix string) error {
+	_, err := s.client.KV().DeleteTree(partialFormat(prefix), nil)
+	return err
+}
+
+// Watch a single key for modifications
+func (s *Consul) Watch(key string, heartbeat time.Duration, callback WatchCallback) error {
+	fkey := partialFormat(key)
+
+	// We get the last index first
+	_, meta, err := s.client.KV().Get(fkey, nil)
+	if err != nil {
+		return err
+	}
+
+	// Add watch to map
+	s.watches[fkey] = &Watch{LastIndex: meta.LastIndex, Interval: heartbeat}
+	eventChan := s.waitForChange(fkey)
+
+	for _ = range eventChan {
+		log.WithField("name", "consul").Debug("Key watch triggered")
+		entry, index, err := s.Get(key)
+		if err != nil {
+			log.Error("Cannot refresh the key: ", fkey, ", cancelling watch")
+			s.watches[fkey] = nil
+			return err
+		}
+
+		value := []KVEntry{&kviTuple{key, entry, index}}
+		callback(value)
+	}
+
+	return nil
+}
+
+// CancelWatch cancels a watch, sends a signal to the appropriate
+// stop channel
+func (s *Consul) CancelWatch(key string) error {
+	key = partialFormat(key)
+	if _, ok := s.watches[key]; !ok {
+		log.Error("Chan does not exist for key: ", key)
+		return ErrWatchDoesNotExist
+	}
+	s.watches[key] = nil
+	return nil
+}
+
+// Internal function to check if a key has changed
+func (s *Consul) waitForChange(key string) <-chan uint64 {
+	ch := make(chan uint64)
+	kv := s.client.KV()
+	go func() {
+		for {
+			watch, ok := s.watches[key]
+			if !ok {
+				log.Error("Cannot access last index for key: ", key, " closing channel")
+				break
+			}
+			option := &api.QueryOptions{
+				WaitIndex: watch.LastIndex,
+				WaitTime:  watch.Interval,
+			}
+			_, meta, err := kv.List(key, option)
+			if err != nil {
+				log.WithField("name", "consul").Errorf("Discovery error: %v", err)
+				break
+			}
+			watch.LastIndex = meta.LastIndex
+			ch <- watch.LastIndex
+		}
+		close(ch)
+	}()
+	return ch
+}
+
+// WatchRange triggers a watch on a range of values at "directory"
+func (s *Consul) WatchRange(prefix string, filter string, heartbeat time.Duration, callback WatchCallback) error {
+	fprefix := partialFormat(prefix)
+
+	// We get the last index first
+	_, meta, err := s.client.KV().Get(prefix, nil)
+	if err != nil {
+		return err
+	}
+
+	// Add watch to map
+	s.watches[fprefix] = &Watch{LastIndex: meta.LastIndex, Interval: heartbeat}
+	eventChan := s.waitForChange(fprefix)
+
+	for _ = range eventChan {
+		log.WithField("name", "consul").Debug("Key watch triggered")
+		kvi, err := s.GetRange(prefix)
+		if err != nil {
+			log.Error("Cannot refresh keys with prefix: ", fprefix, ", cancelling watch")
+			s.watches[fprefix] = nil
+			return err
+		}
+		callback(kvi)
+	}
+
+	return nil
+}
+
+// CancelWatchRange stops the watch on the range of values, sends
+// a signal to the appropriate stop channel
+func (s *Consul) CancelWatchRange(prefix string) error {
+	return s.CancelWatch(prefix)
+}
+
+// Acquire the lock for "key"/"directory"
+func (s *Consul) Acquire(key string, value []byte) (string, error) {
+	key = partialFormat(key)
+	session := s.client.Session()
+	id, _, err := session.CreateNoChecks(nil, nil)
+	if err != nil {
+		return "", err
+	}
+
+	// Add session to map
+	s.sessions[id] = session
+
+	p := &api.KVPair{Key: key, Value: value, Session: id}
+	if work, _, err := s.client.KV().Acquire(p, nil); err != nil {
+		return "", err
+	} else if !work {
+		return "", ErrCannotLock
+	}
+
+	return id, nil
+}
+
+// Release the lock for "key"/"directory"
+func (s *Consul) Release(id string) error {
+	if _, ok := s.sessions[id]; !ok {
+		log.Error("Lock session does not exist")
+		return ErrSessionUndefined
+	}
+	session := s.sessions[id]
+	session.Destroy(id, nil)
+	s.sessions[id] = nil
+	return nil
+}
+
+// AtomicPut put a value at "key" if the key has not been
+// modified in the meantime, throws an error if this is the case
+func (s *Consul) AtomicPut(key string, _ []byte, newValue []byte, index uint64) (bool, error) {
+	p := &api.KVPair{Key: partialFormat(key), Value: newValue, ModifyIndex: index}
+	if work, _, err := s.client.KV().CAS(p, nil); err != nil {
+		return false, err
+	} else if !work {
+		return false, ErrKeyModified
+	}
+	return true, nil
+}
+
+// AtomicDelete deletes a value at "key" if the key has not
+// been modified in the meantime, throws an error if this is the case
+func (s *Consul) AtomicDelete(key string, oldValue []byte, index uint64) (bool, error) {
+	p := &api.KVPair{Key: partialFormat(key), ModifyIndex: index}
+	if work, _, err := s.client.KV().DeleteCAS(p, nil); err != nil {
+		return false, err
+	} else if !work {
+		return false, ErrKeyModified
+	}
+	return true, nil
+}

+ 264 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/etcd.go

@@ -0,0 +1,264 @@
+package store
+
+import (
+	"crypto/tls"
+	"net"
+	"net/http"
+	"strings"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	etcd "github.com/coreos/go-etcd/etcd"
+)
+
+// Etcd embeds the client
+type Etcd struct {
+	client  *etcd.Client
+	watches map[string]chan<- bool
+}
+
+// InitializeEtcd creates a new Etcd client given
+// a list of endpoints and optional tls config
+func InitializeEtcd(addrs []string, options Config) (Store, error) {
+	s := &Etcd{}
+	s.watches = make(map[string]chan<- bool)
+
+	entries := createEndpoints(addrs, "http")
+	s.client = etcd.NewClient(entries)
+
+	if options.TLS != nil {
+		s.setTLS(options.TLS)
+	}
+
+	if options.Timeout != 0 {
+		s.setTimeout(options.Timeout)
+	}
+
+	return s, nil
+}
+
+// SetTLS sets the tls configuration given the path
+// of certificate files
+func (s *Etcd) setTLS(tls *tls.Config) {
+	// Change to https scheme
+	var addrs []string
+	entries := s.client.GetCluster()
+	for _, entry := range entries {
+		addrs = append(addrs, strings.Replace(entry, "http", "https", -1))
+	}
+	s.client.SetCluster(addrs)
+
+	// Set transport
+	t := http.Transport{
+		Dial: (&net.Dialer{
+			Timeout:   30 * time.Second, // default timeout
+			KeepAlive: 30 * time.Second,
+		}).Dial,
+		TLSHandshakeTimeout: 10 * time.Second,
+		TLSClientConfig:     tls,
+	}
+	s.client.SetTransport(&t)
+}
+
+// SetTimeout sets the timeout used for connecting to the store
+func (s *Etcd) setTimeout(time time.Duration) {
+	s.client.SetDialTimeout(time)
+}
+
+// Create the entire path for a directory that does not exist
+func (s *Etcd) createDirectory(path string) error {
+	// TODO Handle TTL at key/dir creation -> use K/V struct for key infos?
+	if _, err := s.client.CreateDir(format(path), 10); err != nil {
+		if etcdError, ok := err.(*etcd.EtcdError); ok {
+			if etcdError.ErrorCode != 105 { // Skip key already exists
+				return err
+			}
+		} else {
+			return err
+		}
+	}
+	return nil
+}
+
+// Get the value at "key", returns the last modified index
+// to use in conjunction to CAS calls
+func (s *Etcd) Get(key string) (value []byte, lastIndex uint64, err error) {
+	result, err := s.client.Get(format(key), false, false)
+	if err != nil {
+		if etcdError, ok := err.(*etcd.EtcdError); ok {
+			// Not a Directory or Not a file
+			if etcdError.ErrorCode == 102 || etcdError.ErrorCode == 104 {
+				return nil, 0, ErrKeyNotFound
+			}
+		}
+		return nil, 0, err
+	}
+	return []byte(result.Node.Value), result.Node.ModifiedIndex, nil
+}
+
+// Put a value at "key"
+func (s *Etcd) Put(key string, value []byte) error {
+	if _, err := s.client.Set(key, string(value), 0); err != nil {
+		if etcdError, ok := err.(*etcd.EtcdError); ok {
+			if etcdError.ErrorCode == 104 { // Not a directory
+				// Remove the last element (the actual key) and set the prefix as a dir
+				err = s.createDirectory(getDir(key))
+				if _, err := s.client.Set(key, string(value), 0); err != nil {
+					return err
+				}
+			}
+		}
+		return err
+	}
+	return nil
+}
+
+// Delete a value at "key"
+func (s *Etcd) Delete(key string) error {
+	if _, err := s.client.Delete(format(key), false); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Exists checks if the key exists inside the store
+func (s *Etcd) Exists(key string) (bool, error) {
+	value, _, err := s.Get(key)
+	if err != nil {
+		if err == ErrKeyNotFound || value == nil {
+			return false, nil
+		}
+		return false, err
+	}
+	return true, nil
+}
+
+// Watch a single key for modifications
+func (s *Etcd) Watch(key string, _ time.Duration, callback WatchCallback) error {
+	key = format(key)
+	watchChan := make(chan *etcd.Response)
+	stopChan := make(chan bool)
+
+	// Create new Watch entry
+	s.watches[key] = stopChan
+
+	// Start watch
+	go s.client.Watch(key, 0, false, watchChan, stopChan)
+
+	for _ = range watchChan {
+		log.WithField("name", "etcd").Debug("Discovery watch triggered")
+		entry, index, err := s.Get(key)
+		if err != nil {
+			log.Error("Cannot refresh the key: ", key, ", cancelling watch")
+			s.watches[key] = nil
+			return err
+		}
+		kvi := []KVEntry{&kviTuple{key, entry, index}}
+		callback(kvi)
+	}
+	return nil
+}
+
+// CancelWatch cancels a watch, sends a signal to the appropriate
+// stop channel
+func (s *Etcd) CancelWatch(key string) error {
+	key = format(key)
+	if _, ok := s.watches[key]; !ok {
+		log.Error("Chan does not exist for key: ", key)
+		return ErrWatchDoesNotExist
+	}
+	// Send stop signal to event chan
+	s.watches[key] <- true
+	s.watches[key] = nil
+	return nil
+}
+
+// AtomicPut put a value at "key" if the key has not been
+// modified in the meantime, throws an error if this is the case
+func (s *Etcd) AtomicPut(key string, oldValue []byte, newValue []byte, index uint64) (bool, error) {
+	resp, err := s.client.CompareAndSwap(format(key), string(newValue), 5, string(oldValue), 0)
+	if err != nil {
+		return false, err
+	}
+	if !(resp.Node.Value == string(newValue) && resp.Node.Key == key && resp.Node.TTL == 5) {
+		return false, ErrKeyModified
+	}
+	if !(resp.PrevNode.Value == string(newValue) && resp.PrevNode.Key == key && resp.PrevNode.TTL == 5) {
+		return false, ErrKeyModified
+	}
+	return true, nil
+}
+
+// AtomicDelete deletes a value at "key" if the key has not
+// been modified in the meantime, throws an error if this is the case
+func (s *Etcd) AtomicDelete(key string, oldValue []byte, index uint64) (bool, error) {
+	resp, err := s.client.CompareAndDelete(format(key), string(oldValue), 0)
+	if err != nil {
+		return false, err
+	}
+	if !(resp.PrevNode.Value == string(oldValue) && resp.PrevNode.Key == key && resp.PrevNode.TTL == 5) {
+		return false, ErrKeyModified
+	}
+	return true, nil
+}
+
+// GetRange gets a range of values at "directory"
+func (s *Etcd) GetRange(prefix string) ([]KVEntry, error) {
+	resp, err := s.client.Get(format(prefix), true, true)
+	if err != nil {
+		return nil, err
+	}
+	kvi := make([]KVEntry, len(resp.Node.Nodes))
+	for i, n := range resp.Node.Nodes {
+		kvi[i] = &kviTuple{n.Key, []byte(n.Value), n.ModifiedIndex}
+	}
+	return kvi, nil
+}
+
+// DeleteRange deletes a range of values at "directory"
+func (s *Etcd) DeleteRange(prefix string) error {
+	if _, err := s.client.Delete(format(prefix), true); err != nil {
+		return err
+	}
+	return nil
+}
+
+// WatchRange triggers a watch on a range of values at "directory"
+func (s *Etcd) WatchRange(prefix string, filter string, _ time.Duration, callback WatchCallback) error {
+	prefix = format(prefix)
+	watchChan := make(chan *etcd.Response)
+	stopChan := make(chan bool)
+
+	// Create new Watch entry
+	s.watches[prefix] = stopChan
+
+	// Start watch
+	go s.client.Watch(prefix, 0, true, watchChan, stopChan)
+	for _ = range watchChan {
+		log.WithField("name", "etcd").Debug("Discovery watch triggered")
+		kvi, err := s.GetRange(prefix)
+		if err != nil {
+			log.Error("Cannot refresh the key: ", prefix, ", cancelling watch")
+			s.watches[prefix] = nil
+			return err
+		}
+		callback(kvi)
+	}
+	return nil
+}
+
+// CancelWatchRange stops the watch on the range of values, sends
+// a signal to the appropriate stop channel
+func (s *Etcd) CancelWatchRange(prefix string) error {
+	return s.CancelWatch(format(prefix))
+}
+
+// Acquire the lock for "key"/"directory"
+func (s *Etcd) Acquire(key string, value []byte) (string, error) {
+	return "", ErrNotImplemented
+}
+
+// Release the lock for "key"/"directory"
+func (s *Etcd) Release(session string) error {
+	return ErrNotImplemented
+}

+ 51 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/helpers.go

@@ -0,0 +1,51 @@
+package store
+
+import (
+	"strings"
+)
+
+// Creates a list of endpoints given the right scheme
+func createEndpoints(addrs []string, scheme string) (entries []string) {
+	for _, addr := range addrs {
+		entries = append(entries, scheme+"://"+addr)
+	}
+	return entries
+}
+
+// Formats the key
+func format(key string) string {
+	return fullpath(splitKey(key))
+}
+
+// Formats the key partially (omits the first '/')
+func partialFormat(key string) string {
+	return partialpath(splitKey(key))
+}
+
+// Get the full directory part of the key
+func getDir(key string) string {
+	parts := splitKey(key)
+	parts = parts[:len(parts)-1]
+	return fullpath(parts)
+}
+
+// SplitKey splits the key to extract path informations
+func splitKey(key string) (path []string) {
+	if strings.Contains(key, "/") {
+		path = strings.Split(key, "/")
+	} else {
+		path = []string{key}
+	}
+	return path
+}
+
+// Get the full correct path representation of a splitted key/directory
+func fullpath(path []string) string {
+	return "/" + strings.Join(path, "/")
+}
+
+// Get the partial correct path representation of a splitted key/directory
+// Omits the first '/'
+func partialpath(path []string) string {
+	return strings.Join(path, "/")
+}

+ 92 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/store.go

@@ -0,0 +1,92 @@
+package store
+
+import (
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+)
+
+// WatchCallback is used for watch methods on keys
+// and is triggered on key change
+type WatchCallback func(kviTuple []KVEntry)
+
+// Initialize creates a new Store object, initializing the client
+type Initialize func(addrs []string, options Config) (Store, error)
+
+// Store represents the backend K/V storage
+// Each store should support every call listed
+// here. Or it couldn't be implemented as a K/V
+// backend for libkv
+type Store interface {
+	// Put a value at the specified key
+	Put(key string, value []byte) error
+
+	// Get a value given its key
+	Get(key string) (value []byte, lastIndex uint64, err error)
+
+	// Delete the value at the specified key
+	Delete(key string) error
+
+	// Verify if a Key exists in the store
+	Exists(key string) (bool, error)
+
+	// Watch changes on a key
+	Watch(key string, heartbeat time.Duration, callback WatchCallback) error
+
+	// Cancel watch key
+	CancelWatch(key string) error
+
+	// Acquire the lock at key
+	Acquire(key string, value []byte) (string, error)
+
+	// Release the lock at key
+	Release(session string) error
+
+	// Get range of keys based on prefix
+	GetRange(prefix string) ([]KVEntry, error)
+
+	// Delete range of keys based on prefix
+	DeleteRange(prefix string) error
+
+	// Watch key namespaces
+	WatchRange(prefix string, filter string, heartbeat time.Duration, callback WatchCallback) error
+
+	// Cancel watch key range
+	CancelWatchRange(prefix string) error
+
+	// Atomic operation on a single value
+	AtomicPut(key string, oldValue []byte, newValue []byte, index uint64) (bool, error)
+
+	// Atomic delete of a single value
+	AtomicDelete(key string, oldValue []byte, index uint64) (bool, error)
+}
+
+// KVEntry represents {Key, Value, Lastindex} tuple
+type KVEntry interface {
+	Key() string
+	Value() []byte
+	LastIndex() uint64
+}
+
+var (
+	// List of Store services
+	stores map[string]Initialize
+)
+
+func init() {
+	stores = make(map[string]Initialize)
+	stores["consul"] = InitializeConsul
+	stores["etcd"] = InitializeEtcd
+	stores["zk"] = InitializeZookeeper
+}
+
+// CreateStore creates a an instance of store
+func CreateStore(store string, addrs []string, options Config) (Store, error) {
+
+	if init, exists := stores[store]; exists {
+		log.WithFields(log.Fields{"store": store}).Debug("Initializing store service")
+		return init(addrs, options)
+	}
+
+	return nil, ErrNotSupported
+}

+ 60 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/structs.go

@@ -0,0 +1,60 @@
+package store
+
+import (
+	"crypto/tls"
+	"errors"
+	"time"
+)
+
+var (
+	// ErrNotSupported is exported
+	ErrNotSupported = errors.New("Backend storage not supported yet, please choose another one")
+	// ErrNotImplemented is exported
+	ErrNotImplemented = errors.New("Call not implemented in current backend")
+	// ErrNotReachable is exported
+	ErrNotReachable = errors.New("Api not reachable")
+	// ErrCannotLock is exported
+	ErrCannotLock = errors.New("Error acquiring the lock")
+	// ErrWatchDoesNotExist is exported
+	ErrWatchDoesNotExist = errors.New("No watch found for specified key")
+	// ErrKeyModified is exported
+	ErrKeyModified = errors.New("Unable to complete atomic operation, key modified")
+	// ErrKeyNotFound is exported
+	ErrKeyNotFound = errors.New("Key not found in store")
+)
+
+// KV represents the different supported K/V
+type KV string
+
+const (
+	// CONSUL is exported
+	CONSUL KV = "consul"
+	// ETCD is exported
+	ETCD = "etcd"
+	// ZOOKEEPER is exported
+	ZOOKEEPER = "zookeeper"
+)
+
+// Config contains the options for a storage client
+type Config struct {
+	TLS     *tls.Config
+	Timeout time.Duration
+}
+
+type kviTuple struct {
+	key       string
+	value     []byte
+	lastIndex uint64
+}
+
+func (kvi *kviTuple) Key() string {
+	return kvi.key
+}
+
+func (kvi *kviTuple) Value() []byte {
+	return kvi.value
+}
+
+func (kvi *kviTuple) LastIndex() uint64 {
+	return kvi.lastIndex
+}

+ 213 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/swarm/pkg/store/zookeeper.go

@@ -0,0 +1,213 @@
+package store
+
+import (
+	"strings"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	zk "github.com/samuel/go-zookeeper/zk"
+)
+
+// Zookeeper embeds the zookeeper client
+// and list of watches
+type Zookeeper struct {
+	timeout time.Duration
+	client  *zk.Conn
+	watches map[string]<-chan zk.Event
+}
+
+// InitializeZookeeper creates a new Zookeeper client
+// given a list of endpoints and optional tls config
+func InitializeZookeeper(endpoints []string, options Config) (Store, error) {
+	s := &Zookeeper{}
+	s.watches = make(map[string]<-chan zk.Event)
+	s.timeout = 5 * time.Second // default timeout
+
+	if options.Timeout != 0 {
+		s.setTimeout(options.Timeout)
+	}
+
+	conn, _, err := zk.Connect(endpoints, s.timeout)
+	if err != nil {
+		log.Error(err)
+		return nil, err
+	}
+	s.client = conn
+	return s, nil
+}
+
+// SetTimeout sets the timout for connecting to Zookeeper
+func (s *Zookeeper) setTimeout(time time.Duration) {
+	s.timeout = time
+}
+
+// Get the value at "key", returns the last modified index
+// to use in conjunction to CAS calls
+func (s *Zookeeper) Get(key string) (value []byte, lastIndex uint64, err error) {
+	resp, meta, err := s.client.Get(format(key))
+	if err != nil {
+		return nil, 0, err
+	}
+	if resp == nil {
+		return nil, 0, ErrKeyNotFound
+	}
+	return resp, uint64(meta.Mzxid), nil
+}
+
+// Create the entire path for a directory that does not exist
+func (s *Zookeeper) createFullpath(path []string) error {
+	for i := 1; i <= len(path); i++ {
+		newpath := "/" + strings.Join(path[:i], "/")
+		_, err := s.client.Create(newpath, []byte{1}, 0, zk.WorldACL(zk.PermAll))
+		if err != nil {
+			// Skip if node already exists
+			if err != zk.ErrNodeExists {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// Put a value at "key"
+func (s *Zookeeper) Put(key string, value []byte) error {
+	fkey := format(key)
+	exists, err := s.Exists(key)
+	if err != nil {
+		return err
+	}
+	if !exists {
+		s.createFullpath(splitKey(key))
+	}
+	_, err = s.client.Set(fkey, value, -1)
+	return err
+}
+
+// Delete a value at "key"
+func (s *Zookeeper) Delete(key string) error {
+	err := s.client.Delete(format(key), -1)
+	return err
+}
+
+// Exists checks if the key exists inside the store
+func (s *Zookeeper) Exists(key string) (bool, error) {
+	exists, _, err := s.client.Exists(format(key))
+	if err != nil {
+		return false, err
+	}
+	return exists, nil
+}
+
+// Watch a single key for modifications
+func (s *Zookeeper) Watch(key string, _ time.Duration, callback WatchCallback) error {
+	fkey := format(key)
+	_, _, eventChan, err := s.client.GetW(fkey)
+	if err != nil {
+		return err
+	}
+
+	// Create a new Watch entry with eventChan
+	s.watches[fkey] = eventChan
+
+	for e := range eventChan {
+		if e.Type == zk.EventNodeChildrenChanged {
+			log.WithField("name", "zk").Debug("Discovery watch triggered")
+			entry, index, err := s.Get(key)
+			kvi := []KVEntry{&kviTuple{key, []byte(entry), index}}
+			if err == nil {
+				callback(kvi)
+			}
+		}
+	}
+
+	return nil
+}
+
+// CancelWatch cancels a watch, sends a signal to the appropriate
+// stop channel
+func (s *Zookeeper) CancelWatch(key string) error {
+	key = format(key)
+	if _, ok := s.watches[key]; !ok {
+		log.Error("Chan does not exist for key: ", key)
+		return ErrWatchDoesNotExist
+	}
+	// Just remove the entry on watches key
+	s.watches[key] = nil
+	return nil
+}
+
+// GetRange gets a range of values at "directory"
+func (s *Zookeeper) GetRange(prefix string) (kvi []KVEntry, err error) {
+	prefix = format(prefix)
+	entries, stat, err := s.client.Children(prefix)
+	if err != nil {
+		log.Error("Cannot fetch range of keys beginning with prefix: ", prefix)
+		return nil, err
+	}
+	for _, item := range entries {
+		kvi = append(kvi, &kviTuple{prefix, []byte(item), uint64(stat.Mzxid)})
+	}
+	return kvi, err
+}
+
+// DeleteRange deletes a range of values at "directory"
+func (s *Zookeeper) DeleteRange(prefix string) error {
+	err := s.client.Delete(format(prefix), -1)
+	return err
+}
+
+// WatchRange triggers a watch on a range of values at "directory"
+func (s *Zookeeper) WatchRange(prefix string, filter string, _ time.Duration, callback WatchCallback) error {
+	fprefix := format(prefix)
+	_, _, eventChan, err := s.client.ChildrenW(fprefix)
+	if err != nil {
+		return err
+	}
+
+	// Create a new Watch entry with eventChan
+	s.watches[fprefix] = eventChan
+
+	for e := range eventChan {
+		if e.Type == zk.EventNodeChildrenChanged {
+			log.WithField("name", "zk").Debug("Discovery watch triggered")
+			kvi, err := s.GetRange(prefix)
+			if err == nil {
+				callback(kvi)
+			}
+		}
+	}
+
+	return nil
+}
+
+// CancelWatchRange stops the watch on the range of values, sends
+// a signal to the appropriate stop channel
+func (s *Zookeeper) CancelWatchRange(prefix string) error {
+	return s.CancelWatch(prefix)
+}
+
+// AtomicPut put a value at "key" if the key has not been
+// modified in the meantime, throws an error if this is the case
+func (s *Zookeeper) AtomicPut(key string, oldValue []byte, newValue []byte, index uint64) (bool, error) {
+	// Use index of Set method to implement CAS
+	return false, ErrNotImplemented
+}
+
+// AtomicDelete deletes a value at "key" if the key has not
+// been modified in the meantime, throws an error if this is the case
+func (s *Zookeeper) AtomicDelete(key string, oldValue []byte, index uint64) (bool, error) {
+	return false, ErrNotImplemented
+}
+
+// Acquire the lock for "key"/"directory"
+func (s *Zookeeper) Acquire(path string, value []byte) (string, error) {
+	// lock := zk.NewLock(s.client, path, nil)
+	// locks[path] = lock
+	// lock.Lock()
+	return "", ErrNotImplemented
+}
+
+// Release the lock for "key"/"directory"
+func (s *Zookeeper) Release(session string) error {
+	return ErrNotImplemented
+}

+ 39 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md

@@ -0,0 +1,39 @@
+Consul API client
+=================
+
+This package provides the `api` package which attempts to
+provide programmatic access to the full Consul API.
+
+Currently, all of the Consul APIs included in version 0.3 are supported.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/consul/api)
+
+Usage
+=====
+
+Below is an example of using the Consul client:
+
+```go
+// Get a new client, with KV endpoints
+client, _ := api.NewClient(api.DefaultConfig())
+kv := client.KV()
+
+// PUT a new KV pair
+p := &api.KVPair{Key: "foo", Value: []byte("test")}
+_, err := kv.Put(p, nil)
+if err != nil {
+    panic(err)
+}
+
+// Lookup the pair
+pair, _, err := kv.Get("foo", nil)
+if err != nil {
+    panic(err)
+}
+fmt.Printf("KV: %v", pair)
+
+```
+

+ 140 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go

@@ -0,0 +1,140 @@
+package api
+
+const (
+	// ACLCLientType is the client type token
+	ACLClientType = "client"
+
+	// ACLManagementType is the management type token
+	ACLManagementType = "management"
+)
+
+// ACLEntry is used to represent an ACL entry
+type ACLEntry struct {
+	CreateIndex uint64
+	ModifyIndex uint64
+	ID          string
+	Name        string
+	Type        string
+	Rules       string
+}
+
+// ACL can be used to query the ACL endpoints
+type ACL struct {
+	c *Client
+}
+
+// ACL returns a handle to the ACL endpoints
+func (c *Client) ACL() *ACL {
+	return &ACL{c}
+}
+
+// Create is used to generate a new token with the given parameters
+func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/create")
+	r.setWriteOptions(q)
+	r.obj = acl
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Update is used to update the rules of an existing token
+func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/update")
+	r.setWriteOptions(q)
+	r.obj = acl
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// Destroy is used to destroy a given ACL token ID
+func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// Clone is used to return a new token cloned from an existing one
+func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Info is used to query for information about an ACL token
+func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/info/"+id)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	if len(entries) > 0 {
+		return entries[0], qm, nil
+	}
+	return nil, qm, nil
+}
+
+// List is used to get all the ACL tokens
+func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/list")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}

+ 148 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl_test.go

@@ -0,0 +1,148 @@
+package api
+
+import (
+	"os"
+	"testing"
+)
+
+// ROOT is a management token for the tests
+var CONSUL_ROOT string
+
+func init() {
+	CONSUL_ROOT = os.Getenv("CONSUL_ROOT")
+}
+
+func TestACL_CreateDestroy(t *testing.T) {
+	if CONSUL_ROOT == "" {
+		t.SkipNow()
+	}
+	c, s := makeClient(t)
+	defer s.stop()
+
+	c.config.Token = CONSUL_ROOT
+	acl := c.ACL()
+
+	ae := ACLEntry{
+		Name:  "API test",
+		Type:  ACLClientType,
+		Rules: `key "" { policy = "deny" }`,
+	}
+
+	id, wm, err := acl.Create(&ae, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if wm.RequestTime == 0 {
+		t.Fatalf("bad: %v", wm)
+	}
+
+	if id == "" {
+		t.Fatalf("invalid: %v", id)
+	}
+
+	ae2, _, err := acl.Info(id, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules {
+		t.Fatalf("Bad: %#v", ae2)
+	}
+
+	wm, err = acl.Destroy(id, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if wm.RequestTime == 0 {
+		t.Fatalf("bad: %v", wm)
+	}
+}
+
+func TestACL_CloneDestroy(t *testing.T) {
+	if CONSUL_ROOT == "" {
+		t.SkipNow()
+	}
+	c, s := makeClient(t)
+	defer s.stop()
+
+	c.config.Token = CONSUL_ROOT
+	acl := c.ACL()
+
+	id, wm, err := acl.Clone(CONSUL_ROOT, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if wm.RequestTime == 0 {
+		t.Fatalf("bad: %v", wm)
+	}
+
+	if id == "" {
+		t.Fatalf("invalid: %v", id)
+	}
+
+	wm, err = acl.Destroy(id, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if wm.RequestTime == 0 {
+		t.Fatalf("bad: %v", wm)
+	}
+}
+
+func TestACL_Info(t *testing.T) {
+	if CONSUL_ROOT == "" {
+		t.SkipNow()
+	}
+	c, s := makeClient(t)
+	defer s.stop()
+
+	c.config.Token = CONSUL_ROOT
+	acl := c.ACL()
+
+	ae, qm, err := acl.Info(CONSUL_ROOT, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if qm.LastIndex == 0 {
+		t.Fatalf("bad: %v", qm)
+	}
+	if !qm.KnownLeader {
+		t.Fatalf("bad: %v", qm)
+	}
+
+	if ae == nil || ae.ID != CONSUL_ROOT || ae.Type != ACLManagementType {
+		t.Fatalf("bad: %#v", ae)
+	}
+}
+
+func TestACL_List(t *testing.T) {
+	if CONSUL_ROOT == "" {
+		t.SkipNow()
+	}
+	c, s := makeClient(t)
+	defer s.stop()
+
+	c.config.Token = CONSUL_ROOT
+	acl := c.ACL()
+
+	acls, qm, err := acl.List(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if len(acls) < 2 {
+		t.Fatalf("bad: %v", acls)
+	}
+
+	if qm.LastIndex == 0 {
+		t.Fatalf("bad: %v", qm)
+	}
+	if !qm.KnownLeader {
+		t.Fatalf("bad: %v", qm)
+	}
+}

+ 331 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go

@@ -0,0 +1,331 @@
+package api
+
+import (
+	"fmt"
+)
+
+// AgentCheck represents a check known to the agent
+type AgentCheck struct {
+	Node        string
+	CheckID     string
+	Name        string
+	Status      string
+	Notes       string
+	Output      string
+	ServiceID   string
+	ServiceName string
+}
+
+// AgentService represents a service known to the agent
+type AgentService struct {
+	ID      string
+	Service string
+	Tags    []string
+	Port    int
+	Address string
+}
+
+// AgentMember represents a cluster member known to the agent
+type AgentMember struct {
+	Name        string
+	Addr        string
+	Port        uint16
+	Tags        map[string]string
+	Status      int
+	ProtocolMin uint8
+	ProtocolMax uint8
+	ProtocolCur uint8
+	DelegateMin uint8
+	DelegateMax uint8
+	DelegateCur uint8
+}
+
+// AgentServiceRegistration is used to register a new service
+type AgentServiceRegistration struct {
+	ID      string   `json:",omitempty"`
+	Name    string   `json:",omitempty"`
+	Tags    []string `json:",omitempty"`
+	Port    int      `json:",omitempty"`
+	Address string   `json:",omitempty"`
+	Check   *AgentServiceCheck
+	Checks  AgentServiceChecks
+}
+
+// AgentCheckRegistration is used to register a new check
+type AgentCheckRegistration struct {
+	ID        string `json:",omitempty"`
+	Name      string `json:",omitempty"`
+	Notes     string `json:",omitempty"`
+	ServiceID string `json:",omitempty"`
+	AgentServiceCheck
+}
+
+// AgentServiceCheck is used to create an associated
+// check for a service
+type AgentServiceCheck struct {
+	Script   string `json:",omitempty"`
+	Interval string `json:",omitempty"`
+	TTL      string `json:",omitempty"`
+}
+type AgentServiceChecks []*AgentServiceCheck
+
+// Agent can be used to query the Agent endpoints
+type Agent struct {
+	c *Client
+
+	// cache the node name
+	nodeName string
+}
+
+// Agent returns a handle to the agent endpoints
+func (c *Client) Agent() *Agent {
+	return &Agent{c: c}
+}
+
+// Self is used to query the agent we are speaking to for
+// information about itself
+func (a *Agent) Self() (map[string]map[string]interface{}, error) {
+	r := a.c.newRequest("GET", "/v1/agent/self")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]map[string]interface{}
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// NodeName is used to get the node name of the agent
+func (a *Agent) NodeName() (string, error) {
+	if a.nodeName != "" {
+		return a.nodeName, nil
+	}
+	info, err := a.Self()
+	if err != nil {
+		return "", err
+	}
+	name := info["Config"]["NodeName"].(string)
+	a.nodeName = name
+	return name, nil
+}
+
+// Checks returns the locally registered checks
+func (a *Agent) Checks() (map[string]*AgentCheck, error) {
+	r := a.c.newRequest("GET", "/v1/agent/checks")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]*AgentCheck
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Services returns the locally registered services
+func (a *Agent) Services() (map[string]*AgentService, error) {
+	r := a.c.newRequest("GET", "/v1/agent/services")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]*AgentService
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Members returns the known gossip members. The WAN
+// flag can be used to query a server for WAN members.
+func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
+	r := a.c.newRequest("GET", "/v1/agent/members")
+	if wan {
+		r.params.Set("wan", "1")
+	}
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []*AgentMember
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// ServiceRegister is used to register a new service with
+// the local agent
+func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/register")
+	r.obj = service
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// ServiceDeregister is used to deregister a service with
+// the local agent
+func (a *Agent) ServiceDeregister(serviceID string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// PassTTL is used to set a TTL check to the passing state
+func (a *Agent) PassTTL(checkID, note string) error {
+	return a.UpdateTTL(checkID, note, "pass")
+}
+
+// WarnTTL is used to set a TTL check to the warning state
+func (a *Agent) WarnTTL(checkID, note string) error {
+	return a.UpdateTTL(checkID, note, "warn")
+}
+
+// FailTTL is used to set a TTL check to the failing state
+func (a *Agent) FailTTL(checkID, note string) error {
+	return a.UpdateTTL(checkID, note, "fail")
+}
+
+// UpdateTTL is used to update the TTL of a check
+func (a *Agent) UpdateTTL(checkID, note, status string) error {
+	switch status {
+	case "pass":
+	case "warn":
+	case "fail":
+	default:
+		return fmt.Errorf("Invalid status: %s", status)
+	}
+	endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
+	r := a.c.newRequest("PUT", endpoint)
+	r.params.Set("note", note)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// CheckRegister is used to register a new check with
+// the local agent
+func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
+	r := a.c.newRequest("PUT", "/v1/agent/check/register")
+	r.obj = check
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// CheckDeregister is used to deregister a check with
+// the local agent
+func (a *Agent) CheckDeregister(checkID string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// Join is used to instruct the agent to attempt a join to
+// another cluster member
+func (a *Agent) Join(addr string, wan bool) error {
+	r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
+	if wan {
+		r.params.Set("wan", "1")
+	}
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// ForceLeave is used to have the agent eject a failed node
+func (a *Agent) ForceLeave(node string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// EnableServiceMaintenance toggles service maintenance mode on
+// for the given service ID.
+func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
+	r.params.Set("enable", "true")
+	r.params.Set("reason", reason)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// DisableServiceMaintenance toggles service maintenance mode off
+// for the given service ID.
+func (a *Agent) DisableServiceMaintenance(serviceID string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
+	r.params.Set("enable", "false")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// EnableNodeMaintenance toggles node maintenance mode on for the
+// agent we are connected to.
+func (a *Agent) EnableNodeMaintenance(reason string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/maintenance")
+	r.params.Set("enable", "true")
+	r.params.Set("reason", reason)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// DisableNodeMaintenance toggles node maintenance mode off for the
+// agent we are connected to.
+func (a *Agent) DisableNodeMaintenance() error {
+	r := a.c.newRequest("PUT", "/v1/agent/maintenance")
+	r.params.Set("enable", "false")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}

+ 404 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent_test.go

@@ -0,0 +1,404 @@
+package api
+
+import (
+	"strings"
+	"testing"
+)
+
+func TestAgent_Self(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	info, err := agent.Self()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	name := info["Config"]["NodeName"]
+	if name == "" {
+		t.Fatalf("bad: %v", info)
+	}
+}
+
+func TestAgent_Members(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	members, err := agent.Members(false)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if len(members) != 1 {
+		t.Fatalf("bad: %v", members)
+	}
+}
+
+func TestAgent_Services(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	reg := &AgentServiceRegistration{
+		Name: "foo",
+		Tags: []string{"bar", "baz"},
+		Port: 8000,
+		Check: &AgentServiceCheck{
+			TTL: "15s",
+		},
+	}
+	if err := agent.ServiceRegister(reg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	services, err := agent.Services()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if _, ok := services["foo"]; !ok {
+		t.Fatalf("missing service: %v", services)
+	}
+
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if _, ok := checks["service:foo"]; !ok {
+		t.Fatalf("missing check: %v", checks)
+	}
+
+	if err := agent.ServiceDeregister("foo"); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestAgent_ServiceAddress(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	reg1 := &AgentServiceRegistration{
+		Name:    "foo1",
+		Port:    8000,
+		Address: "192.168.0.42",
+	}
+	reg2 := &AgentServiceRegistration{
+		Name: "foo2",
+		Port: 8000,
+	}
+	if err := agent.ServiceRegister(reg1); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if err := agent.ServiceRegister(reg2); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	services, err := agent.Services()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if _, ok := services["foo1"]; !ok {
+		t.Fatalf("missing service: %v", services)
+	}
+	if _, ok := services["foo2"]; !ok {
+		t.Fatalf("missing service: %v", services)
+	}
+
+	if services["foo1"].Address != "192.168.0.42" {
+		t.Fatalf("missing Address field in service foo1: %v", services)
+	}
+	if services["foo2"].Address != "" {
+		t.Fatalf("missing Address field in service foo2: %v", services)
+	}
+
+	if err := agent.ServiceDeregister("foo"); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestAgent_Services_MultipleChecks(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	reg := &AgentServiceRegistration{
+		Name: "foo",
+		Tags: []string{"bar", "baz"},
+		Port: 8000,
+		Checks: AgentServiceChecks{
+			&AgentServiceCheck{
+				TTL: "15s",
+			},
+			&AgentServiceCheck{
+				TTL: "30s",
+			},
+		},
+	}
+	if err := agent.ServiceRegister(reg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	services, err := agent.Services()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if _, ok := services["foo"]; !ok {
+		t.Fatalf("missing service: %v", services)
+	}
+
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if _, ok := checks["service:foo:1"]; !ok {
+		t.Fatalf("missing check: %v", checks)
+	}
+	if _, ok := checks["service:foo:2"]; !ok {
+		t.Fatalf("missing check: %v", checks)
+	}
+}
+
+func TestAgent_SetTTLStatus(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	reg := &AgentServiceRegistration{
+		Name: "foo",
+		Check: &AgentServiceCheck{
+			TTL: "15s",
+		},
+	}
+	if err := agent.ServiceRegister(reg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if err := agent.WarnTTL("service:foo", "test"); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	chk, ok := checks["service:foo"]
+	if !ok {
+		t.Fatalf("missing check: %v", checks)
+	}
+	if chk.Status != "warning" {
+		t.Fatalf("Bad: %#v", chk)
+	}
+	if chk.Output != "test" {
+		t.Fatalf("Bad: %#v", chk)
+	}
+
+	if err := agent.ServiceDeregister("foo"); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestAgent_Checks(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	reg := &AgentCheckRegistration{
+		Name: "foo",
+	}
+	reg.TTL = "15s"
+	if err := agent.CheckRegister(reg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if _, ok := checks["foo"]; !ok {
+		t.Fatalf("missing check: %v", checks)
+	}
+
+	if err := agent.CheckDeregister("foo"); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestAgent_Checks_serviceBound(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	// First register a service
+	serviceReg := &AgentServiceRegistration{
+		Name: "redis",
+	}
+	if err := agent.ServiceRegister(serviceReg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Register a check bound to the service
+	reg := &AgentCheckRegistration{
+		Name:      "redischeck",
+		ServiceID: "redis",
+	}
+	reg.TTL = "15s"
+	if err := agent.CheckRegister(reg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	check, ok := checks["redischeck"]
+	if !ok {
+		t.Fatalf("missing check: %v", checks)
+	}
+	if check.ServiceID != "redis" {
+		t.Fatalf("missing service association for check: %v", check)
+	}
+}
+
+func TestAgent_Join(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	info, err := agent.Self()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Join ourself
+	addr := info["Config"]["AdvertiseAddr"].(string)
+	err = agent.Join(addr, false)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestAgent_ForceLeave(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	// Eject somebody
+	err := agent.ForceLeave("foo")
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestServiceMaintenance(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	// First register a service
+	serviceReg := &AgentServiceRegistration{
+		Name: "redis",
+	}
+	if err := agent.ServiceRegister(serviceReg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Enable maintenance mode
+	if err := agent.EnableServiceMaintenance("redis", "broken"); err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	// Ensure a critical check was added
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	found := false
+	for _, check := range checks {
+		if strings.Contains(check.CheckID, "maintenance") {
+			found = true
+			if check.Status != "critical" || check.Notes != "broken" {
+				t.Fatalf("bad: %#v", checks)
+			}
+		}
+	}
+	if !found {
+		t.Fatalf("bad: %#v", checks)
+	}
+
+	// Disable maintenance mode
+	if err := agent.DisableServiceMaintenance("redis"); err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	// Ensure the critical health check was removed
+	checks, err = agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	for _, check := range checks {
+		if strings.Contains(check.CheckID, "maintenance") {
+			t.Fatalf("should have removed health check")
+		}
+	}
+}
+
+func TestNodeMaintenance(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+
+	// Enable maintenance mode
+	if err := agent.EnableNodeMaintenance("broken"); err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	// Check that a critical check was added
+	checks, err := agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	found := false
+	for _, check := range checks {
+		if strings.Contains(check.CheckID, "maintenance") {
+			found = true
+			if check.Status != "critical" || check.Notes != "broken" {
+				t.Fatalf("bad: %#v", checks)
+			}
+		}
+	}
+	if !found {
+		t.Fatalf("bad: %#v", checks)
+	}
+
+	// Disable maintenance mode
+	if err := agent.DisableNodeMaintenance(); err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	// Ensure the check was removed
+	checks, err = agent.Checks()
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	for _, check := range checks {
+		if strings.Contains(check.CheckID, "maintenance") {
+			t.Fatalf("should have removed health check")
+		}
+	}
+}

+ 351 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go

@@ -0,0 +1,351 @@
+package api
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// QueryOptions are used to parameterize a query
+type QueryOptions struct {
+	// Providing a datacenter overwrites the DC provided
+	// by the Config
+	Datacenter string
+
+	// AllowStale allows any Consul server (non-leader) to service
+	// a read. This allows for lower latency and higher throughput
+	AllowStale bool
+
+	// RequireConsistent forces the read to be fully consistent.
+	// This is more expensive but prevents ever performing a stale
+	// read.
+	RequireConsistent bool
+
+	// WaitIndex is used to enable a blocking query. Waits
+	// until the timeout or the next index is reached
+	WaitIndex uint64
+
+	// WaitTime is used to bound the duration of a wait.
+	// Defaults to that of the Config, but can be overriden.
+	WaitTime time.Duration
+
+	// Token is used to provide a per-request ACL token
+	// which overrides the agent's default token.
+	Token string
+}
+
+// WriteOptions are used to parameterize a write
+type WriteOptions struct {
+	// Providing a datacenter overwrites the DC provided
+	// by the Config
+	Datacenter string
+
+	// Token is used to provide a per-request ACL token
+	// which overrides the agent's default token.
+	Token string
+}
+
+// QueryMeta is used to return meta data about a query
+type QueryMeta struct {
+	// LastIndex. This can be used as a WaitIndex to perform
+	// a blocking query
+	LastIndex uint64
+
+	// Time of last contact from the leader for the
+	// server servicing the request
+	LastContact time.Duration
+
+	// Is there a known leader
+	KnownLeader bool
+
+	// How long did the request take
+	RequestTime time.Duration
+}
+
+// WriteMeta is used to return meta data about a write
+type WriteMeta struct {
+	// How long did the request take
+	RequestTime time.Duration
+}
+
+// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
+type HttpBasicAuth struct {
+	// Username to use for HTTP Basic Authentication
+	Username string
+
+	// Password to use for HTTP Basic Authentication
+	Password string
+}
+
+// Config is used to configure the creation of a client
+type Config struct {
+	// Address is the address of the Consul server
+	Address string
+
+	// Scheme is the URI scheme for the Consul server
+	Scheme string
+
+	// Datacenter to use. If not provided, the default agent datacenter is used.
+	Datacenter string
+
+	// HttpClient is the client to use. Default will be
+	// used if not provided.
+	HttpClient *http.Client
+
+	// HttpAuth is the auth info to use for http access.
+	HttpAuth *HttpBasicAuth
+
+	// WaitTime limits how long a Watch will block. If not provided,
+	// the agent default values will be used.
+	WaitTime time.Duration
+
+	// Token is used to provide a per-request ACL token
+	// which overrides the agent's default token.
+	Token string
+}
+
+// DefaultConfig returns a default configuration for the client
+func DefaultConfig() *Config {
+	config := &Config{
+		Address:    "127.0.0.1:8500",
+		Scheme:     "http",
+		HttpClient: http.DefaultClient,
+	}
+
+	if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" {
+		config.Address = addr
+	}
+
+	return config
+}
+
+// Client provides a client to the Consul API
+type Client struct {
+	config Config
+}
+
+// NewClient returns a new client
+func NewClient(config *Config) (*Client, error) {
+	// bootstrap the config
+	defConfig := DefaultConfig()
+
+	if len(config.Address) == 0 {
+		config.Address = defConfig.Address
+	}
+
+	if len(config.Scheme) == 0 {
+		config.Scheme = defConfig.Scheme
+	}
+
+	if config.HttpClient == nil {
+		config.HttpClient = defConfig.HttpClient
+	}
+
+	if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 {
+		config.HttpClient = &http.Client{
+			Transport: &http.Transport{
+				Dial: func(_, _ string) (net.Conn, error) {
+					return net.Dial("unix", parts[1])
+				},
+			},
+		}
+		config.Address = parts[1]
+	}
+
+	client := &Client{
+		config: *config,
+	}
+	return client, nil
+}
+
+// request is used to help build up a request
+type request struct {
+	config *Config
+	method string
+	url    *url.URL
+	params url.Values
+	body   io.Reader
+	obj    interface{}
+}
+
+// setQueryOptions is used to annotate the request with
+// additional query options
+func (r *request) setQueryOptions(q *QueryOptions) {
+	if q == nil {
+		return
+	}
+	if q.Datacenter != "" {
+		r.params.Set("dc", q.Datacenter)
+	}
+	if q.AllowStale {
+		r.params.Set("stale", "")
+	}
+	if q.RequireConsistent {
+		r.params.Set("consistent", "")
+	}
+	if q.WaitIndex != 0 {
+		r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
+	}
+	if q.WaitTime != 0 {
+		r.params.Set("wait", durToMsec(q.WaitTime))
+	}
+	if q.Token != "" {
+		r.params.Set("token", q.Token)
+	}
+}
+
+// durToMsec converts a duration to a millisecond specified string
+func durToMsec(dur time.Duration) string {
+	return fmt.Sprintf("%dms", dur/time.Millisecond)
+}
+
+// setWriteOptions is used to annotate the request with
+// additional write options
+func (r *request) setWriteOptions(q *WriteOptions) {
+	if q == nil {
+		return
+	}
+	if q.Datacenter != "" {
+		r.params.Set("dc", q.Datacenter)
+	}
+	if q.Token != "" {
+		r.params.Set("token", q.Token)
+	}
+}
+
+// toHTTP converts the request to an HTTP request
+func (r *request) toHTTP() (*http.Request, error) {
+	// Encode the query parameters
+	r.url.RawQuery = r.params.Encode()
+
+	// Check if we should encode the body
+	if r.body == nil && r.obj != nil {
+		if b, err := encodeBody(r.obj); err != nil {
+			return nil, err
+		} else {
+			r.body = b
+		}
+	}
+
+	// Create the HTTP request
+	req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
+	if err != nil {
+		return nil, err
+	}
+
+	req.URL.Host = r.url.Host
+	req.URL.Scheme = r.url.Scheme
+	req.Host = r.url.Host
+
+	// Setup auth
+	if r.config.HttpAuth != nil {
+		req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
+	}
+
+	return req, nil
+}
+
+// newRequest is used to create a new request
+func (c *Client) newRequest(method, path string) *request {
+	r := &request{
+		config: &c.config,
+		method: method,
+		url: &url.URL{
+			Scheme: c.config.Scheme,
+			Host:   c.config.Address,
+			Path:   path,
+		},
+		params: make(map[string][]string),
+	}
+	if c.config.Datacenter != "" {
+		r.params.Set("dc", c.config.Datacenter)
+	}
+	if c.config.WaitTime != 0 {
+		r.params.Set("wait", durToMsec(r.config.WaitTime))
+	}
+	if c.config.Token != "" {
+		r.params.Set("token", r.config.Token)
+	}
+	return r
+}
+
+// doRequest runs a request with our client
+func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
+	req, err := r.toHTTP()
+	if err != nil {
+		return 0, nil, err
+	}
+	start := time.Now()
+	resp, err := c.config.HttpClient.Do(req)
+	diff := time.Now().Sub(start)
+	return diff, resp, err
+}
+
+// parseQueryMeta is used to help parse query meta-data
+func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
+	header := resp.Header
+
+	// Parse the X-Consul-Index
+	index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64)
+	if err != nil {
+		return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
+	}
+	q.LastIndex = index
+
+	// Parse the X-Consul-LastContact
+	last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
+	if err != nil {
+		return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
+	}
+	q.LastContact = time.Duration(last) * time.Millisecond
+
+	// Parse the X-Consul-KnownLeader
+	switch header.Get("X-Consul-KnownLeader") {
+	case "true":
+		q.KnownLeader = true
+	default:
+		q.KnownLeader = false
+	}
+	return nil
+}
+
+// decodeBody is used to JSON decode a body
+func decodeBody(resp *http.Response, out interface{}) error {
+	dec := json.NewDecoder(resp.Body)
+	return dec.Decode(out)
+}
+
+// encodeBody is used to encode a request body
+func encodeBody(obj interface{}) (io.Reader, error) {
+	buf := bytes.NewBuffer(nil)
+	enc := json.NewEncoder(buf)
+	if err := enc.Encode(obj); err != nil {
+		return nil, err
+	}
+	return buf, nil
+}
+
+// requireOK is used to wrap doRequest and check for a 200
+func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
+	if e != nil {
+		if resp != nil {
+			resp.Body.Close()
+		}
+		return d, nil, e
+	}
+	if resp.StatusCode != 200 {
+		var buf bytes.Buffer
+		io.Copy(&buf, resp.Body)
+		resp.Body.Close()
+		return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
+	}
+	return d, resp, nil
+}

+ 339 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/api_test.go

@@ -0,0 +1,339 @@
+package api
+
+import (
+	crand "crypto/rand"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"runtime"
+	"testing"
+	"time"
+
+	"github.com/hashicorp/consul/testutil"
+)
+
+var consulConfig = `{
+	"ports": {
+		"dns": 19000,
+		"http": 18800,
+		"rpc": 18600,
+		"serf_lan": 18200,
+		"serf_wan": 18400,
+		"server": 18000
+	},
+	"bind_addr": "127.0.0.1",
+	"data_dir": "%s",
+	"bootstrap": true,
+	"log_level": "debug",
+	"server": true
+}`
+
+type testServer struct {
+	pid        int
+	dataDir    string
+	configFile string
+}
+
+type testPortConfig struct {
+	DNS     int `json:"dns,omitempty"`
+	HTTP    int `json:"http,omitempty"`
+	RPC     int `json:"rpc,omitempty"`
+	SerfLan int `json:"serf_lan,omitempty"`
+	SerfWan int `json:"serf_wan,omitempty"`
+	Server  int `json:"server,omitempty"`
+}
+
+type testAddressConfig struct {
+	HTTP string `json:"http,omitempty"`
+}
+
+type testServerConfig struct {
+	Bootstrap bool               `json:"bootstrap,omitempty"`
+	Server    bool               `json:"server,omitempty"`
+	DataDir   string             `json:"data_dir,omitempty"`
+	LogLevel  string             `json:"log_level,omitempty"`
+	Addresses *testAddressConfig `json:"addresses,omitempty"`
+	Ports     testPortConfig     `json:"ports,omitempty"`
+}
+
+// Callback functions for modifying config
+type configCallback func(c *Config)
+type serverConfigCallback func(c *testServerConfig)
+
+func defaultConfig() *testServerConfig {
+	return &testServerConfig{
+		Bootstrap: true,
+		Server:    true,
+		LogLevel:  "debug",
+		Ports: testPortConfig{
+			DNS:     19000,
+			HTTP:    18800,
+			RPC:     18600,
+			SerfLan: 18200,
+			SerfWan: 18400,
+			Server:  18000,
+		},
+	}
+}
+
+func (s *testServer) stop() {
+	defer os.RemoveAll(s.dataDir)
+	defer os.RemoveAll(s.configFile)
+
+	cmd := exec.Command("kill", "-9", fmt.Sprintf("%d", s.pid))
+	if err := cmd.Run(); err != nil {
+		panic(err)
+	}
+}
+
+func newTestServer(t *testing.T) *testServer {
+	return newTestServerWithConfig(t, func(c *testServerConfig) {})
+}
+
+func newTestServerWithConfig(t *testing.T, cb serverConfigCallback) *testServer {
+	if path, err := exec.LookPath("consul"); err != nil || path == "" {
+		t.Log("consul not found on $PATH, skipping")
+		t.SkipNow()
+	}
+
+	pidFile, err := ioutil.TempFile("", "consul")
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	pidFile.Close()
+	os.Remove(pidFile.Name())
+
+	dataDir, err := ioutil.TempDir("", "consul")
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	configFile, err := ioutil.TempFile("", "consul")
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	consulConfig := defaultConfig()
+	consulConfig.DataDir = dataDir
+
+	cb(consulConfig)
+
+	configContent, err := json.Marshal(consulConfig)
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	if _, err := configFile.Write(configContent); err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	configFile.Close()
+
+	// Start the server
+	cmd := exec.Command("consul", "agent", "-config-file", configFile.Name())
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	if err := cmd.Start(); err != nil {
+		t.Fatalf("err: %s", err)
+	}
+
+	return &testServer{
+		pid:        cmd.Process.Pid,
+		dataDir:    dataDir,
+		configFile: configFile.Name(),
+	}
+}
+
+func makeClient(t *testing.T) (*Client, *testServer) {
+	return makeClientWithConfig(t, func(c *Config) {
+		c.Address = "127.0.0.1:18800"
+	}, func(c *testServerConfig) {})
+}
+
+func makeClientWithConfig(t *testing.T, cb1 configCallback, cb2 serverConfigCallback) (*Client, *testServer) {
+	// Make client config
+	conf := DefaultConfig()
+	cb1(conf)
+
+	// Create client
+	client, err := NewClient(conf)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Create server
+	server := newTestServerWithConfig(t, cb2)
+
+	// Allow the server some time to start, and verify we have a leader.
+	testutil.WaitForResult(func() (bool, error) {
+		req := client.newRequest("GET", "/v1/catalog/nodes")
+		_, resp, err := client.doRequest(req)
+		if err != nil {
+			return false, err
+		}
+		resp.Body.Close()
+
+		// Ensure we have a leader and a node registeration
+		if leader := resp.Header.Get("X-Consul-KnownLeader"); leader != "true" {
+			return false, fmt.Errorf("Consul leader status: %#v", leader)
+		}
+		if resp.Header.Get("X-Consul-Index") == "0" {
+			return false, fmt.Errorf("Consul index is 0")
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+
+	return client, server
+}
+
+func testKey() string {
+	buf := make([]byte, 16)
+	if _, err := crand.Read(buf); err != nil {
+		panic(fmt.Errorf("Failed to read random bytes: %v", err))
+	}
+
+	return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
+		buf[0:4],
+		buf[4:6],
+		buf[6:8],
+		buf[8:10],
+		buf[10:16])
+}
+
+func TestSetQueryOptions(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	r := c.newRequest("GET", "/v1/kv/foo")
+	q := &QueryOptions{
+		Datacenter:        "foo",
+		AllowStale:        true,
+		RequireConsistent: true,
+		WaitIndex:         1000,
+		WaitTime:          100 * time.Second,
+		Token:             "12345",
+	}
+	r.setQueryOptions(q)
+
+	if r.params.Get("dc") != "foo" {
+		t.Fatalf("bad: %v", r.params)
+	}
+	if _, ok := r.params["stale"]; !ok {
+		t.Fatalf("bad: %v", r.params)
+	}
+	if _, ok := r.params["consistent"]; !ok {
+		t.Fatalf("bad: %v", r.params)
+	}
+	if r.params.Get("index") != "1000" {
+		t.Fatalf("bad: %v", r.params)
+	}
+	if r.params.Get("wait") != "100000ms" {
+		t.Fatalf("bad: %v", r.params)
+	}
+	if r.params.Get("token") != "12345" {
+		t.Fatalf("bad: %v", r.params)
+	}
+}
+
+func TestSetWriteOptions(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	r := c.newRequest("GET", "/v1/kv/foo")
+	q := &WriteOptions{
+		Datacenter: "foo",
+		Token:      "23456",
+	}
+	r.setWriteOptions(q)
+
+	if r.params.Get("dc") != "foo" {
+		t.Fatalf("bad: %v", r.params)
+	}
+	if r.params.Get("token") != "23456" {
+		t.Fatalf("bad: %v", r.params)
+	}
+}
+
+func TestRequestToHTTP(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	r := c.newRequest("DELETE", "/v1/kv/foo")
+	q := &QueryOptions{
+		Datacenter: "foo",
+	}
+	r.setQueryOptions(q)
+	req, err := r.toHTTP()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if req.Method != "DELETE" {
+		t.Fatalf("bad: %v", req)
+	}
+	if req.URL.RequestURI() != "/v1/kv/foo?dc=foo" {
+		t.Fatalf("bad: %v", req)
+	}
+}
+
+func TestParseQueryMeta(t *testing.T) {
+	resp := &http.Response{
+		Header: make(map[string][]string),
+	}
+	resp.Header.Set("X-Consul-Index", "12345")
+	resp.Header.Set("X-Consul-LastContact", "80")
+	resp.Header.Set("X-Consul-KnownLeader", "true")
+
+	qm := &QueryMeta{}
+	if err := parseQueryMeta(resp, qm); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if qm.LastIndex != 12345 {
+		t.Fatalf("Bad: %v", qm)
+	}
+	if qm.LastContact != 80*time.Millisecond {
+		t.Fatalf("Bad: %v", qm)
+	}
+	if !qm.KnownLeader {
+		t.Fatalf("Bad: %v", qm)
+	}
+}
+
+func TestAPI_UnixSocket(t *testing.T) {
+	if runtime.GOOS == "windows" {
+		t.SkipNow()
+	}
+
+	tempDir, err := ioutil.TempDir("", "consul")
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	defer os.RemoveAll(tempDir)
+	socket := filepath.Join(tempDir, "test.sock")
+
+	c, s := makeClientWithConfig(t, func(c *Config) {
+		c.Address = "unix://" + socket
+	}, func(c *testServerConfig) {
+		c.Addresses = &testAddressConfig{
+			HTTP: "unix://" + socket,
+		}
+	})
+	defer s.stop()
+
+	agent := c.Agent()
+
+	info, err := agent.Self()
+	if err != nil {
+		t.Fatalf("err: %s", err)
+	}
+	if info["Config"]["NodeName"] == "" {
+		t.Fatalf("bad: %v", info)
+	}
+}

+ 181 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go

@@ -0,0 +1,181 @@
+package api
+
+type Node struct {
+	Node    string
+	Address string
+}
+
+type CatalogService struct {
+	Node        string
+	Address     string
+	ServiceID   string
+	ServiceName string
+	ServiceTags []string
+	ServicePort int
+}
+
+type CatalogNode struct {
+	Node     *Node
+	Services map[string]*AgentService
+}
+
+type CatalogRegistration struct {
+	Node       string
+	Address    string
+	Datacenter string
+	Service    *AgentService
+	Check      *AgentCheck
+}
+
+type CatalogDeregistration struct {
+	Node       string
+	Address    string
+	Datacenter string
+	ServiceID  string
+	CheckID    string
+}
+
+// Catalog can be used to query the Catalog endpoints
+type Catalog struct {
+	c *Client
+}
+
+// Catalog returns a handle to the catalog endpoints
+func (c *Client) Catalog() *Catalog {
+	return &Catalog{c}
+}
+
+func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
+	r := c.c.newRequest("PUT", "/v1/catalog/register")
+	r.setWriteOptions(q)
+	r.obj = reg
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	return wm, nil
+}
+
+func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
+	r := c.c.newRequest("PUT", "/v1/catalog/deregister")
+	r.setWriteOptions(q)
+	r.obj = dereg
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	return wm, nil
+}
+
+// Datacenters is used to query for all the known datacenters
+func (c *Catalog) Datacenters() ([]string, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/datacenters")
+	_, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []string
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Nodes is used to query all the known nodes
+func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/nodes")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*Node
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Services is used to query for all known services
+func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/services")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out map[string][]string
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Service is used to query catalog entries for a given service
+func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/service/"+service)
+	r.setQueryOptions(q)
+	if tag != "" {
+		r.params.Set("tag", tag)
+	}
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*CatalogService
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Node is used to query for service information about a single node
+func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out *CatalogNode
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}

+ 273 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog_test.go

@@ -0,0 +1,273 @@
+package api
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/hashicorp/consul/testutil"
+)
+
+func TestCatalog_Datacenters(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	catalog := c.Catalog()
+
+	testutil.WaitForResult(func() (bool, error) {
+		datacenters, err := catalog.Datacenters()
+		if err != nil {
+			return false, err
+		}
+
+		if len(datacenters) == 0 {
+			return false, fmt.Errorf("Bad: %v", datacenters)
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestCatalog_Nodes(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	catalog := c.Catalog()
+
+	testutil.WaitForResult(func() (bool, error) {
+		nodes, meta, err := catalog.Nodes(nil)
+		if err != nil {
+			return false, err
+		}
+
+		if meta.LastIndex == 0 {
+			return false, fmt.Errorf("Bad: %v", meta)
+		}
+
+		if len(nodes) == 0 {
+			return false, fmt.Errorf("Bad: %v", nodes)
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestCatalog_Services(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	catalog := c.Catalog()
+
+	testutil.WaitForResult(func() (bool, error) {
+		services, meta, err := catalog.Services(nil)
+		if err != nil {
+			return false, err
+		}
+
+		if meta.LastIndex == 0 {
+			return false, fmt.Errorf("Bad: %v", meta)
+		}
+
+		if len(services) == 0 {
+			return false, fmt.Errorf("Bad: %v", services)
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestCatalog_Service(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	catalog := c.Catalog()
+
+	testutil.WaitForResult(func() (bool, error) {
+		services, meta, err := catalog.Service("consul", "", nil)
+		if err != nil {
+			return false, err
+		}
+
+		if meta.LastIndex == 0 {
+			return false, fmt.Errorf("Bad: %v", meta)
+		}
+
+		if len(services) == 0 {
+			return false, fmt.Errorf("Bad: %v", services)
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestCatalog_Node(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	catalog := c.Catalog()
+	name, _ := c.Agent().NodeName()
+
+	testutil.WaitForResult(func() (bool, error) {
+		info, meta, err := catalog.Node(name, nil)
+		if err != nil {
+			return false, err
+		}
+
+		if meta.LastIndex == 0 {
+			return false, fmt.Errorf("Bad: %v", meta)
+		}
+		if len(info.Services) == 0 {
+			return false, fmt.Errorf("Bad: %v", info)
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestCatalog_Registration(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	catalog := c.Catalog()
+
+	service := &AgentService{
+		ID:      "redis1",
+		Service: "redis",
+		Tags:    []string{"master", "v1"},
+		Port:    8000,
+	}
+
+	check := &AgentCheck{
+		Node:      "foobar",
+		CheckID:   "service:redis1",
+		Name:      "Redis health check",
+		Notes:     "Script based health check",
+		Status:    "passing",
+		ServiceID: "redis1",
+	}
+
+	reg := &CatalogRegistration{
+		Datacenter: "dc1",
+		Node:       "foobar",
+		Address:    "192.168.10.10",
+		Service:    service,
+		Check:      check,
+	}
+
+	testutil.WaitForResult(func() (bool, error) {
+		if _, err := catalog.Register(reg, nil); err != nil {
+			return false, err
+		}
+
+		node, _, err := catalog.Node("foobar", nil)
+		if err != nil {
+			return false, err
+		}
+
+		if _, ok := node.Services["redis1"]; !ok {
+			return false, fmt.Errorf("missing service: redis1")
+		}
+
+		health, _, err := c.Health().Node("foobar", nil)
+		if err != nil {
+			return false, err
+		}
+
+		if health[0].CheckID != "service:redis1" {
+			return false, fmt.Errorf("missing checkid service:redis1")
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+
+	// Test catalog deregistration of the previously registered service
+	dereg := &CatalogDeregistration{
+		Datacenter: "dc1",
+		Node:       "foobar",
+		Address:    "192.168.10.10",
+		ServiceID:  "redis1",
+	}
+
+	if _, err := catalog.Deregister(dereg, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	testutil.WaitForResult(func() (bool, error) {
+		node, _, err := catalog.Node("foobar", nil)
+		if err != nil {
+			return false, err
+		}
+
+		if _, ok := node.Services["redis1"]; ok {
+			return false, fmt.Errorf("ServiceID:redis1 is not deregistered")
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+
+	// Test deregistration of the previously registered check
+	dereg = &CatalogDeregistration{
+		Datacenter: "dc1",
+		Node:       "foobar",
+		Address:    "192.168.10.10",
+		CheckID:    "service:redis1",
+	}
+
+	if _, err := catalog.Deregister(dereg, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	testutil.WaitForResult(func() (bool, error) {
+		health, _, err := c.Health().Node("foobar", nil)
+		if err != nil {
+			return false, err
+		}
+
+		if len(health) != 0 {
+			return false, fmt.Errorf("CheckID:service:redis1 is not deregistered")
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+
+	// Test node deregistration of the previously registered node
+	dereg = &CatalogDeregistration{
+		Datacenter: "dc1",
+		Node:       "foobar",
+		Address:    "192.168.10.10",
+	}
+
+	if _, err := catalog.Deregister(dereg, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	testutil.WaitForResult(func() (bool, error) {
+		node, _, err := catalog.Node("foobar", nil)
+		if err != nil {
+			return false, err
+		}
+
+		if node != nil {
+			return false, fmt.Errorf("node is not deregistered: %v", node)
+		}
+
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}

+ 104 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go

@@ -0,0 +1,104 @@
+package api
+
+import (
+	"bytes"
+	"strconv"
+)
+
+// Event can be used to query the Event endpoints
+type Event struct {
+	c *Client
+}
+
+// UserEvent represents an event that was fired by the user
+type UserEvent struct {
+	ID            string
+	Name          string
+	Payload       []byte
+	NodeFilter    string
+	ServiceFilter string
+	TagFilter     string
+	Version       int
+	LTime         uint64
+}
+
+// Event returns a handle to the event endpoints
+func (c *Client) Event() *Event {
+	return &Event{c}
+}
+
+// Fire is used to fire a new user event. Only the Name, Payload and Filters
+// are respected. This returns the ID or an associated error. Cross DC requests
+// are supported.
+func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
+	r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
+	r.setWriteOptions(q)
+	if params.NodeFilter != "" {
+		r.params.Set("node", params.NodeFilter)
+	}
+	if params.ServiceFilter != "" {
+		r.params.Set("service", params.ServiceFilter)
+	}
+	if params.TagFilter != "" {
+		r.params.Set("tag", params.TagFilter)
+	}
+	if params.Payload != nil {
+		r.body = bytes.NewReader(params.Payload)
+	}
+
+	rtt, resp, err := requireOK(e.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out UserEvent
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// List is used to get the most recent events an agent has received.
+// This list can be optionally filtered by the name. This endpoint supports
+// quasi-blocking queries. The index is not monotonic, nor does it provide provide
+// LastContact or KnownLeader.
+func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
+	r := e.c.newRequest("GET", "/v1/event/list")
+	r.setQueryOptions(q)
+	if name != "" {
+		r.params.Set("name", name)
+	}
+	rtt, resp, err := requireOK(e.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*UserEvent
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// IDToIndex is a bit of a hack. This simulates the index generation to
+// convert an event ID into a WaitIndex.
+func (e *Event) IDToIndex(uuid string) uint64 {
+	lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
+	upper := uuid[19:23] + uuid[24:36]
+	lowVal, err := strconv.ParseUint(lower, 16, 64)
+	if err != nil {
+		panic("Failed to convert " + lower)
+	}
+	highVal, err := strconv.ParseUint(upper, 16, 64)
+	if err != nil {
+		panic("Failed to convert " + upper)
+	}
+	return lowVal ^ highVal
+}

+ 39 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/event_test.go

@@ -0,0 +1,39 @@
+package api
+
+import (
+	"testing"
+)
+
+func TestEvent_FireList(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	event := c.Event()
+
+	params := &UserEvent{Name: "foo"}
+	id, meta, err := event.Fire(params, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if meta.RequestTime == 0 {
+		t.Fatalf("bad: %v", meta)
+	}
+
+	if id == "" {
+		t.Fatalf("invalid: %v", id)
+	}
+
+	events, qm, err := event.List("", nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if qm.LastIndex != event.IDToIndex(id) {
+		t.Fatalf("Bad: %#v", qm)
+	}
+
+	if events[len(events)-1].ID != id {
+		t.Fatalf("bad: %#v", events)
+	}
+}

+ 136 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go

@@ -0,0 +1,136 @@
+package api
+
+import (
+	"fmt"
+)
+
+// HealthCheck is used to represent a single check
+type HealthCheck struct {
+	Node        string
+	CheckID     string
+	Name        string
+	Status      string
+	Notes       string
+	Output      string
+	ServiceID   string
+	ServiceName string
+}
+
+// ServiceEntry is used for the health service endpoint
+type ServiceEntry struct {
+	Node    *Node
+	Service *AgentService
+	Checks  []*HealthCheck
+}
+
+// Health can be used to query the Health endpoints
+type Health struct {
+	c *Client
+}
+
+// Health returns a handle to the health endpoints
+func (c *Client) Health() *Health {
+	return &Health{c}
+}
+
+// Node is used to query for checks belonging to a given node
+func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/health/node/"+node)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*HealthCheck
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Checks is used to return the checks associated with a service
+func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/health/checks/"+service)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*HealthCheck
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Service is used to query health information along with service info
+// for a given service. It can optionally do server-side filtering on a tag
+// or nodes with passing health checks only.
+func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/health/service/"+service)
+	r.setQueryOptions(q)
+	if tag != "" {
+		r.params.Set("tag", tag)
+	}
+	if passingOnly {
+		r.params.Set("passing", "1")
+	}
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*ServiceEntry
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// State is used to retreive all the checks in a given state.
+// The wildcard "any" state can also be used for all checks.
+func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
+	switch state {
+	case "any":
+	case "warning":
+	case "critical":
+	case "passing":
+	case "unknown":
+	default:
+		return nil, nil, fmt.Errorf("Unsupported state: %v", state)
+	}
+	r := h.c.newRequest("GET", "/v1/health/state/"+state)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*HealthCheck
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}

+ 121 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/health_test.go

@@ -0,0 +1,121 @@
+package api
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/hashicorp/consul/testutil"
+)
+
+func TestHealth_Node(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+	health := c.Health()
+
+	info, err := agent.Self()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	name := info["Config"]["NodeName"].(string)
+
+	testutil.WaitForResult(func() (bool, error) {
+		checks, meta, err := health.Node(name, nil)
+		if err != nil {
+			return false, err
+		}
+		if meta.LastIndex == 0 {
+			return false, fmt.Errorf("bad: %v", meta)
+		}
+		if len(checks) == 0 {
+			return false, fmt.Errorf("bad: %v", checks)
+		}
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestHealth_Checks(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	agent := c.Agent()
+	health := c.Health()
+
+	// Make a service with a check
+	reg := &AgentServiceRegistration{
+		Name: "foo",
+		Check: &AgentServiceCheck{
+			TTL: "15s",
+		},
+	}
+	if err := agent.ServiceRegister(reg); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	defer agent.ServiceDeregister("foo")
+
+	testutil.WaitForResult(func() (bool, error) {
+		checks, meta, err := health.Checks("foo", nil)
+		if err != nil {
+			return false, err
+		}
+		if meta.LastIndex == 0 {
+			return false, fmt.Errorf("bad: %v", meta)
+		}
+		if len(checks) == 0 {
+			return false, fmt.Errorf("Bad: %v", checks)
+		}
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestHealth_Service(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	health := c.Health()
+
+	testutil.WaitForResult(func() (bool, error) {
+		// consul service should always exist...
+		checks, meta, err := health.Service("consul", "", true, nil)
+		if err != nil {
+			return false, err
+		}
+		if meta.LastIndex == 0 {
+			return false, fmt.Errorf("bad: %v", meta)
+		}
+		if len(checks) == 0 {
+			return false, fmt.Errorf("Bad: %v", checks)
+		}
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}
+
+func TestHealth_State(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	health := c.Health()
+
+	testutil.WaitForResult(func() (bool, error) {
+		checks, meta, err := health.State("any", nil)
+		if err != nil {
+			return false, err
+		}
+		if meta.LastIndex == 0 {
+			return false, fmt.Errorf("bad: %v", meta)
+		}
+		if len(checks) == 0 {
+			return false, fmt.Errorf("Bad: %v", checks)
+		}
+		return true, nil
+	}, func(err error) {
+		t.Fatalf("err: %s", err)
+	})
+}

+ 236 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go

@@ -0,0 +1,236 @@
+package api
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net/http"
+	"strconv"
+	"strings"
+)
+
+// KVPair is used to represent a single K/V entry
+type KVPair struct {
+	Key         string
+	CreateIndex uint64
+	ModifyIndex uint64
+	LockIndex   uint64
+	Flags       uint64
+	Value       []byte
+	Session     string
+}
+
+// KVPairs is a list of KVPair objects
+type KVPairs []*KVPair
+
+// KV is used to manipulate the K/V API
+type KV struct {
+	c *Client
+}
+
+// KV is used to return a handle to the K/V apis
+func (c *Client) KV() *KV {
+	return &KV{c}
+}
+
+// Get is used to lookup a single key
+func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
+	resp, qm, err := k.getInternal(key, nil, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	if resp == nil {
+		return nil, qm, nil
+	}
+	defer resp.Body.Close()
+
+	var entries []*KVPair
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	if len(entries) > 0 {
+		return entries[0], qm, nil
+	}
+	return nil, qm, nil
+}
+
+// List is used to lookup all keys under a prefix
+func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
+	resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	if resp == nil {
+		return nil, qm, nil
+	}
+	defer resp.Body.Close()
+
+	var entries []*KVPair
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// Keys is used to list all the keys under a prefix. Optionally,
+// a separator can be used to limit the responses.
+func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
+	params := map[string]string{"keys": ""}
+	if separator != "" {
+		params["separator"] = separator
+	}
+	resp, qm, err := k.getInternal(prefix, params, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	if resp == nil {
+		return nil, qm, nil
+	}
+	defer resp.Body.Close()
+
+	var entries []string
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
+	r := k.c.newRequest("GET", "/v1/kv/"+key)
+	r.setQueryOptions(q)
+	for param, val := range params {
+		r.params.Set(param, val)
+	}
+	rtt, resp, err := k.c.doRequest(r)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if resp.StatusCode == 404 {
+		resp.Body.Close()
+		return nil, qm, nil
+	} else if resp.StatusCode != 200 {
+		resp.Body.Close()
+		return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
+	}
+	return resp, qm, nil
+}
+
+// Put is used to write a new value. Only the
+// Key, Flags and Value is respected.
+func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
+	params := make(map[string]string, 1)
+	if p.Flags != 0 {
+		params["flags"] = strconv.FormatUint(p.Flags, 10)
+	}
+	_, wm, err := k.put(p.Key, params, p.Value, q)
+	return wm, err
+}
+
+// CAS is used for a Check-And-Set operation. The Key,
+// ModifyIndex, Flags and Value are respected. Returns true
+// on success or false on failures.
+func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+	params := make(map[string]string, 2)
+	if p.Flags != 0 {
+		params["flags"] = strconv.FormatUint(p.Flags, 10)
+	}
+	params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
+	return k.put(p.Key, params, p.Value, q)
+}
+
+// Acquire is used for a lock acquisiiton operation. The Key,
+// Flags, Value and Session are respected. Returns true
+// on success or false on failures.
+func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+	params := make(map[string]string, 2)
+	if p.Flags != 0 {
+		params["flags"] = strconv.FormatUint(p.Flags, 10)
+	}
+	params["acquire"] = p.Session
+	return k.put(p.Key, params, p.Value, q)
+}
+
+// Release is used for a lock release operation. The Key,
+// Flags, Value and Session are respected. Returns true
+// on success or false on failures.
+func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+	params := make(map[string]string, 2)
+	if p.Flags != 0 {
+		params["flags"] = strconv.FormatUint(p.Flags, 10)
+	}
+	params["release"] = p.Session
+	return k.put(p.Key, params, p.Value, q)
+}
+
+func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
+	r := k.c.newRequest("PUT", "/v1/kv/"+key)
+	r.setWriteOptions(q)
+	for param, val := range params {
+		r.params.Set(param, val)
+	}
+	r.body = bytes.NewReader(body)
+	rtt, resp, err := requireOK(k.c.doRequest(r))
+	if err != nil {
+		return false, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &WriteMeta{}
+	qm.RequestTime = rtt
+
+	var buf bytes.Buffer
+	if _, err := io.Copy(&buf, resp.Body); err != nil {
+		return false, nil, fmt.Errorf("Failed to read response: %v", err)
+	}
+	res := strings.Contains(string(buf.Bytes()), "true")
+	return res, qm, nil
+}
+
+// Delete is used to delete a single key
+func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
+	_, qm, err := k.deleteInternal(key, nil, w)
+	return qm, err
+}
+
+// DeleteCAS is used for a Delete Check-And-Set operation. The Key
+// and ModifyIndex are respected. Returns true on success or false on failures.
+func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+	params := map[string]string{
+		"cas": strconv.FormatUint(p.ModifyIndex, 10),
+	}
+	return k.deleteInternal(p.Key, params, q)
+}
+
+// DeleteTree is used to delete all keys under a prefix
+func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
+	_, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w)
+	return qm, err
+}
+
+func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) {
+	r := k.c.newRequest("DELETE", "/v1/kv/"+key)
+	r.setWriteOptions(q)
+	for param, val := range params {
+		r.params.Set(param, val)
+	}
+	rtt, resp, err := requireOK(k.c.doRequest(r))
+	if err != nil {
+		return false, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &WriteMeta{}
+	qm.RequestTime = rtt
+
+	var buf bytes.Buffer
+	if _, err := io.Copy(&buf, resp.Body); err != nil {
+		return false, nil, fmt.Errorf("Failed to read response: %v", err)
+	}
+	res := strings.Contains(string(buf.Bytes()), "true")
+	return res, qm, nil
+}

+ 431 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv_test.go

@@ -0,0 +1,431 @@
+package api
+
+import (
+	"bytes"
+	"path"
+	"testing"
+	"time"
+)
+
+func TestClientPutGetDelete(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	kv := c.KV()
+
+	// Get a get without a key
+	key := testKey()
+	pair, _, err := kv.Get(key, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if pair != nil {
+		t.Fatalf("unexpected value: %#v", pair)
+	}
+
+	// Put the key
+	value := []byte("test")
+	p := &KVPair{Key: key, Flags: 42, Value: value}
+	if _, err := kv.Put(p, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Get should work
+	pair, meta, err := kv.Get(key, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if pair == nil {
+		t.Fatalf("expected value: %#v", pair)
+	}
+	if !bytes.Equal(pair.Value, value) {
+		t.Fatalf("unexpected value: %#v", pair)
+	}
+	if pair.Flags != 42 {
+		t.Fatalf("unexpected value: %#v", pair)
+	}
+	if meta.LastIndex == 0 {
+		t.Fatalf("unexpected value: %#v", meta)
+	}
+
+	// Delete
+	if _, err := kv.Delete(key, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Get should fail
+	pair, _, err = kv.Get(key, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if pair != nil {
+		t.Fatalf("unexpected value: %#v", pair)
+	}
+}
+
+func TestClient_List_DeleteRecurse(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	kv := c.KV()
+
+	// Generate some test keys
+	prefix := testKey()
+	var keys []string
+	for i := 0; i < 100; i++ {
+		keys = append(keys, path.Join(prefix, testKey()))
+	}
+
+	// Set values
+	value := []byte("test")
+	for _, key := range keys {
+		p := &KVPair{Key: key, Value: value}
+		if _, err := kv.Put(p, nil); err != nil {
+			t.Fatalf("err: %v", err)
+		}
+	}
+
+	// List the values
+	pairs, meta, err := kv.List(prefix, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if len(pairs) != len(keys) {
+		t.Fatalf("got %d keys", len(pairs))
+	}
+	for _, pair := range pairs {
+		if !bytes.Equal(pair.Value, value) {
+			t.Fatalf("unexpected value: %#v", pair)
+		}
+	}
+	if meta.LastIndex == 0 {
+		t.Fatalf("unexpected value: %#v", meta)
+	}
+
+	// Delete all
+	if _, err := kv.DeleteTree(prefix, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// List the values
+	pairs, _, err = kv.List(prefix, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if len(pairs) != 0 {
+		t.Fatalf("got %d keys", len(pairs))
+	}
+}
+
+func TestClient_DeleteCAS(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	kv := c.KV()
+
+	// Put the key
+	key := testKey()
+	value := []byte("test")
+	p := &KVPair{Key: key, Value: value}
+	if work, _, err := kv.CAS(p, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	} else if !work {
+		t.Fatalf("CAS failure")
+	}
+
+	// Get should work
+	pair, meta, err := kv.Get(key, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if pair == nil {
+		t.Fatalf("expected value: %#v", pair)
+	}
+	if meta.LastIndex == 0 {
+		t.Fatalf("unexpected value: %#v", meta)
+	}
+
+	// CAS update with bad index
+	p.ModifyIndex = 1
+	if work, _, err := kv.DeleteCAS(p, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	} else if work {
+		t.Fatalf("unexpected CAS")
+	}
+
+	// CAS update with valid index
+	p.ModifyIndex = meta.LastIndex
+	if work, _, err := kv.DeleteCAS(p, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	} else if !work {
+		t.Fatalf("unexpected CAS failure")
+	}
+}
+
+func TestClient_CAS(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	kv := c.KV()
+
+	// Put the key
+	key := testKey()
+	value := []byte("test")
+	p := &KVPair{Key: key, Value: value}
+	if work, _, err := kv.CAS(p, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	} else if !work {
+		t.Fatalf("CAS failure")
+	}
+
+	// Get should work
+	pair, meta, err := kv.Get(key, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if pair == nil {
+		t.Fatalf("expected value: %#v", pair)
+	}
+	if meta.LastIndex == 0 {
+		t.Fatalf("unexpected value: %#v", meta)
+	}
+
+	// CAS update with bad index
+	newVal := []byte("foo")
+	p.Value = newVal
+	p.ModifyIndex = 1
+	if work, _, err := kv.CAS(p, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	} else if work {
+		t.Fatalf("unexpected CAS")
+	}
+
+	// CAS update with valid index
+	p.ModifyIndex = meta.LastIndex
+	if work, _, err := kv.CAS(p, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	} else if !work {
+		t.Fatalf("unexpected CAS failure")
+	}
+}
+
+func TestClient_WatchGet(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	kv := c.KV()
+
+	// Get a get without a key
+	key := testKey()
+	pair, meta, err := kv.Get(key, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if pair != nil {
+		t.Fatalf("unexpected value: %#v", pair)
+	}
+	if meta.LastIndex == 0 {
+		t.Fatalf("unexpected value: %#v", meta)
+	}
+
+	// Put the key
+	value := []byte("test")
+	go func() {
+		kv := c.KV()
+
+		time.Sleep(100 * time.Millisecond)
+		p := &KVPair{Key: key, Flags: 42, Value: value}
+		if _, err := kv.Put(p, nil); err != nil {
+			t.Fatalf("err: %v", err)
+		}
+	}()
+
+	// Get should work
+	options := &QueryOptions{WaitIndex: meta.LastIndex}
+	pair, meta2, err := kv.Get(key, options)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if pair == nil {
+		t.Fatalf("expected value: %#v", pair)
+	}
+	if !bytes.Equal(pair.Value, value) {
+		t.Fatalf("unexpected value: %#v", pair)
+	}
+	if pair.Flags != 42 {
+		t.Fatalf("unexpected value: %#v", pair)
+	}
+	if meta2.LastIndex <= meta.LastIndex {
+		t.Fatalf("unexpected value: %#v", meta2)
+	}
+}
+
+func TestClient_WatchList(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	kv := c.KV()
+
+	// Get a get without a key
+	prefix := testKey()
+	key := path.Join(prefix, testKey())
+	pairs, meta, err := kv.List(prefix, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if len(pairs) != 0 {
+		t.Fatalf("unexpected value: %#v", pairs)
+	}
+	if meta.LastIndex == 0 {
+		t.Fatalf("unexpected value: %#v", meta)
+	}
+
+	// Put the key
+	value := []byte("test")
+	go func() {
+		kv := c.KV()
+
+		time.Sleep(100 * time.Millisecond)
+		p := &KVPair{Key: key, Flags: 42, Value: value}
+		if _, err := kv.Put(p, nil); err != nil {
+			t.Fatalf("err: %v", err)
+		}
+	}()
+
+	// Get should work
+	options := &QueryOptions{WaitIndex: meta.LastIndex}
+	pairs, meta2, err := kv.List(prefix, options)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if len(pairs) != 1 {
+		t.Fatalf("expected value: %#v", pairs)
+	}
+	if !bytes.Equal(pairs[0].Value, value) {
+		t.Fatalf("unexpected value: %#v", pairs)
+	}
+	if pairs[0].Flags != 42 {
+		t.Fatalf("unexpected value: %#v", pairs)
+	}
+	if meta2.LastIndex <= meta.LastIndex {
+		t.Fatalf("unexpected value: %#v", meta2)
+	}
+
+}
+
+func TestClient_Keys_DeleteRecurse(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	kv := c.KV()
+
+	// Generate some test keys
+	prefix := testKey()
+	var keys []string
+	for i := 0; i < 100; i++ {
+		keys = append(keys, path.Join(prefix, testKey()))
+	}
+
+	// Set values
+	value := []byte("test")
+	for _, key := range keys {
+		p := &KVPair{Key: key, Value: value}
+		if _, err := kv.Put(p, nil); err != nil {
+			t.Fatalf("err: %v", err)
+		}
+	}
+
+	// List the values
+	out, meta, err := kv.Keys(prefix, "", nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if len(out) != len(keys) {
+		t.Fatalf("got %d keys", len(out))
+	}
+	if meta.LastIndex == 0 {
+		t.Fatalf("unexpected value: %#v", meta)
+	}
+
+	// Delete all
+	if _, err := kv.DeleteTree(prefix, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// List the values
+	out, _, err = kv.Keys(prefix, "", nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if len(out) != 0 {
+		t.Fatalf("got %d keys", len(out))
+	}
+}
+
+func TestClient_AcquireRelease(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	session := c.Session()
+	kv := c.KV()
+
+	// Make a session
+	id, _, err := session.CreateNoChecks(nil, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	defer session.Destroy(id, nil)
+
+	// Acquire the key
+	key := testKey()
+	value := []byte("test")
+	p := &KVPair{Key: key, Value: value, Session: id}
+	if work, _, err := kv.Acquire(p, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	} else if !work {
+		t.Fatalf("Lock failure")
+	}
+
+	// Get should work
+	pair, meta, err := kv.Get(key, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if pair == nil {
+		t.Fatalf("expected value: %#v", pair)
+	}
+	if pair.LockIndex != 1 {
+		t.Fatalf("Expected lock: %v", pair)
+	}
+	if pair.Session != id {
+		t.Fatalf("Expected lock: %v", pair)
+	}
+	if meta.LastIndex == 0 {
+		t.Fatalf("unexpected value: %#v", meta)
+	}
+
+	// Release
+	if work, _, err := kv.Release(p, nil); err != nil {
+		t.Fatalf("err: %v", err)
+	} else if !work {
+		t.Fatalf("Release fail")
+	}
+
+	// Get should work
+	pair, meta, err = kv.Get(key, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if pair == nil {
+		t.Fatalf("expected value: %#v", pair)
+	}
+	if pair.LockIndex != 1 {
+		t.Fatalf("Expected lock: %v", pair)
+	}
+	if pair.Session != "" {
+		t.Fatalf("Expected unlock: %v", pair)
+	}
+	if meta.LastIndex == 0 {
+		t.Fatalf("unexpected value: %#v", meta)
+	}
+}

+ 321 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go

@@ -0,0 +1,321 @@
+package api
+
+import (
+	"fmt"
+	"sync"
+	"time"
+)
+
+const (
+	// DefaultLockSessionName is the Session Name we assign if none is provided
+	DefaultLockSessionName = "Consul API Lock"
+
+	// DefaultLockSessionTTL is the default session TTL if no Session is provided
+	// when creating a new Lock. This is used because we do not have another
+	// other check to depend upon.
+	DefaultLockSessionTTL = "15s"
+
+	// DefaultLockWaitTime is how long we block for at a time to check if lock
+	// acquisition is possible. This affects the minimum time it takes to cancel
+	// a Lock acquisition.
+	DefaultLockWaitTime = 15 * time.Second
+
+	// DefaultLockRetryTime is how long we wait after a failed lock acquisition
+	// before attempting to do the lock again. This is so that once a lock-delay
+	// is in affect, we do not hot loop retrying the acquisition.
+	DefaultLockRetryTime = 5 * time.Second
+
+	// LockFlagValue is a magic flag we set to indicate a key
+	// is being used for a lock. It is used to detect a potential
+	// conflict with a semaphore.
+	LockFlagValue = 0x2ddccbc058a50c18
+)
+
+var (
+	// ErrLockHeld is returned if we attempt to double lock
+	ErrLockHeld = fmt.Errorf("Lock already held")
+
+	// ErrLockNotHeld is returned if we attempt to unlock a lock
+	// that we do not hold.
+	ErrLockNotHeld = fmt.Errorf("Lock not held")
+
+	// ErrLockInUse is returned if we attempt to destroy a lock
+	// that is in use.
+	ErrLockInUse = fmt.Errorf("Lock in use")
+
+	// ErrLockConflict is returned if the flags on a key
+	// used for a lock do not match expectation
+	ErrLockConflict = fmt.Errorf("Existing key does not match lock use")
+)
+
+// Lock is used to implement client-side leader election. It is follows the
+// algorithm as described here: https://consul.io/docs/guides/leader-election.html.
+type Lock struct {
+	c    *Client
+	opts *LockOptions
+
+	isHeld       bool
+	sessionRenew chan struct{}
+	lockSession  string
+	l            sync.Mutex
+}
+
+// LockOptions is used to parameterize the Lock behavior.
+type LockOptions struct {
+	Key         string // Must be set and have write permissions
+	Value       []byte // Optional, value to associate with the lock
+	Session     string // Optional, created if not specified
+	SessionName string // Optional, defaults to DefaultLockSessionName
+	SessionTTL  string // Optional, defaults to DefaultLockSessionTTL
+}
+
+// LockKey returns a handle to a lock struct which can be used
+// to acquire and release the mutex. The key used must have
+// write permissions.
+func (c *Client) LockKey(key string) (*Lock, error) {
+	opts := &LockOptions{
+		Key: key,
+	}
+	return c.LockOpts(opts)
+}
+
+// LockOpts returns a handle to a lock struct which can be used
+// to acquire and release the mutex. The key used must have
+// write permissions.
+func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
+	if opts.Key == "" {
+		return nil, fmt.Errorf("missing key")
+	}
+	if opts.SessionName == "" {
+		opts.SessionName = DefaultLockSessionName
+	}
+	if opts.SessionTTL == "" {
+		opts.SessionTTL = DefaultLockSessionTTL
+	} else {
+		if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
+			return nil, fmt.Errorf("invalid SessionTTL: %v", err)
+		}
+	}
+	l := &Lock{
+		c:    c,
+		opts: opts,
+	}
+	return l, nil
+}
+
+// Lock attempts to acquire the lock and blocks while doing so.
+// Providing a non-nil stopCh can be used to abort the lock attempt.
+// Returns a channel that is closed if our lock is lost or an error.
+// This channel could be closed at any time due to session invalidation,
+// communication errors, operator intervention, etc. It is NOT safe to
+// assume that the lock is held until Unlock() unless the Session is specifically
+// created without any associated health checks. By default Consul sessions
+// prefer liveness over safety and an application must be able to handle
+// the lock being lost.
+func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
+	// Hold the lock as we try to acquire
+	l.l.Lock()
+	defer l.l.Unlock()
+
+	// Check if we already hold the lock
+	if l.isHeld {
+		return nil, ErrLockHeld
+	}
+
+	// Check if we need to create a session first
+	l.lockSession = l.opts.Session
+	if l.lockSession == "" {
+		if s, err := l.createSession(); err != nil {
+			return nil, fmt.Errorf("failed to create session: %v", err)
+		} else {
+			l.sessionRenew = make(chan struct{})
+			l.lockSession = s
+			session := l.c.Session()
+			go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew)
+
+			// If we fail to acquire the lock, cleanup the session
+			defer func() {
+				if !l.isHeld {
+					close(l.sessionRenew)
+					l.sessionRenew = nil
+				}
+			}()
+		}
+	}
+
+	// Setup the query options
+	kv := l.c.KV()
+	qOpts := &QueryOptions{
+		WaitTime: DefaultLockWaitTime,
+	}
+
+WAIT:
+	// Check if we should quit
+	select {
+	case <-stopCh:
+		return nil, nil
+	default:
+	}
+
+	// Look for an existing lock, blocking until not taken
+	pair, meta, err := kv.Get(l.opts.Key, qOpts)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read lock: %v", err)
+	}
+	if pair != nil && pair.Flags != LockFlagValue {
+		return nil, ErrLockConflict
+	}
+	if pair != nil && pair.Session != "" {
+		qOpts.WaitIndex = meta.LastIndex
+		goto WAIT
+	}
+
+	// Try to acquire the lock
+	lockEnt := l.lockEntry(l.lockSession)
+	locked, _, err := kv.Acquire(lockEnt, nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to acquire lock: %v", err)
+	}
+
+	// Handle the case of not getting the lock
+	if !locked {
+		select {
+		case <-time.After(DefaultLockRetryTime):
+			goto WAIT
+		case <-stopCh:
+			return nil, nil
+		}
+	}
+
+	// Watch to ensure we maintain leadership
+	leaderCh := make(chan struct{})
+	go l.monitorLock(l.lockSession, leaderCh)
+
+	// Set that we own the lock
+	l.isHeld = true
+
+	// Locked! All done
+	return leaderCh, nil
+}
+
+// Unlock released the lock. It is an error to call this
+// if the lock is not currently held.
+func (l *Lock) Unlock() error {
+	// Hold the lock as we try to release
+	l.l.Lock()
+	defer l.l.Unlock()
+
+	// Ensure the lock is actually held
+	if !l.isHeld {
+		return ErrLockNotHeld
+	}
+
+	// Set that we no longer own the lock
+	l.isHeld = false
+
+	// Stop the session renew
+	if l.sessionRenew != nil {
+		defer func() {
+			close(l.sessionRenew)
+			l.sessionRenew = nil
+		}()
+	}
+
+	// Get the lock entry, and clear the lock session
+	lockEnt := l.lockEntry(l.lockSession)
+	l.lockSession = ""
+
+	// Release the lock explicitly
+	kv := l.c.KV()
+	_, _, err := kv.Release(lockEnt, nil)
+	if err != nil {
+		return fmt.Errorf("failed to release lock: %v", err)
+	}
+	return nil
+}
+
+// Destroy is used to cleanup the lock entry. It is not necessary
+// to invoke. It will fail if the lock is in use.
+func (l *Lock) Destroy() error {
+	// Hold the lock as we try to release
+	l.l.Lock()
+	defer l.l.Unlock()
+
+	// Check if we already hold the lock
+	if l.isHeld {
+		return ErrLockHeld
+	}
+
+	// Look for an existing lock
+	kv := l.c.KV()
+	pair, _, err := kv.Get(l.opts.Key, nil)
+	if err != nil {
+		return fmt.Errorf("failed to read lock: %v", err)
+	}
+
+	// Nothing to do if the lock does not exist
+	if pair == nil {
+		return nil
+	}
+
+	// Check for possible flag conflict
+	if pair.Flags != LockFlagValue {
+		return ErrLockConflict
+	}
+
+	// Check if it is in use
+	if pair.Session != "" {
+		return ErrLockInUse
+	}
+
+	// Attempt the delete
+	didRemove, _, err := kv.DeleteCAS(pair, nil)
+	if err != nil {
+		return fmt.Errorf("failed to remove lock: %v", err)
+	}
+	if !didRemove {
+		return ErrLockInUse
+	}
+	return nil
+}
+
+// createSession is used to create a new managed session
+func (l *Lock) createSession() (string, error) {
+	session := l.c.Session()
+	se := &SessionEntry{
+		Name: l.opts.SessionName,
+		TTL:  l.opts.SessionTTL,
+	}
+	id, _, err := session.Create(se, nil)
+	if err != nil {
+		return "", err
+	}
+	return id, nil
+}
+
+// lockEntry returns a formatted KVPair for the lock
+func (l *Lock) lockEntry(session string) *KVPair {
+	return &KVPair{
+		Key:     l.opts.Key,
+		Value:   l.opts.Value,
+		Session: session,
+		Flags:   LockFlagValue,
+	}
+}
+
+// monitorLock is a long running routine to monitor a lock ownership
+// It closes the stopCh if we lose our leadership.
+func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
+	defer close(stopCh)
+	kv := l.c.KV()
+	opts := &QueryOptions{RequireConsistent: true}
+WAIT:
+	pair, meta, err := kv.Get(l.opts.Key, opts)
+	if err != nil {
+		return
+	}
+	if pair != nil && pair.Session == session {
+		opts.WaitIndex = meta.LastIndex
+		goto WAIT
+	}
+}

+ 289 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock_test.go

@@ -0,0 +1,289 @@
+package api
+
+import (
+	"log"
+	"sync"
+	"testing"
+	"time"
+)
+
+func TestLock_LockUnlock(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	lock, err := c.LockKey("test/lock")
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Initial unlock should fail
+	err = lock.Unlock()
+	if err != ErrLockNotHeld {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should work
+	leaderCh, err := lock.Lock(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if leaderCh == nil {
+		t.Fatalf("not leader")
+	}
+
+	// Double lock should fail
+	_, err = lock.Lock(nil)
+	if err != ErrLockHeld {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should be leader
+	select {
+	case <-leaderCh:
+		t.Fatalf("should be leader")
+	default:
+	}
+
+	// Initial unlock should work
+	err = lock.Unlock()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Double unlock should fail
+	err = lock.Unlock()
+	if err != ErrLockNotHeld {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should loose leadership
+	select {
+	case <-leaderCh:
+	case <-time.After(time.Second):
+		t.Fatalf("should not be leader")
+	}
+}
+
+func TestLock_ForceInvalidate(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	lock, err := c.LockKey("test/lock")
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should work
+	leaderCh, err := lock.Lock(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if leaderCh == nil {
+		t.Fatalf("not leader")
+	}
+	defer lock.Unlock()
+
+	go func() {
+		// Nuke the session, simulator an operator invalidation
+		// or a health check failure
+		session := c.Session()
+		session.Destroy(lock.lockSession, nil)
+	}()
+
+	// Should loose leadership
+	select {
+	case <-leaderCh:
+	case <-time.After(time.Second):
+		t.Fatalf("should not be leader")
+	}
+}
+
+func TestLock_DeleteKey(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	lock, err := c.LockKey("test/lock")
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should work
+	leaderCh, err := lock.Lock(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if leaderCh == nil {
+		t.Fatalf("not leader")
+	}
+	defer lock.Unlock()
+
+	go func() {
+		// Nuke the key, simulate an operator intervention
+		kv := c.KV()
+		kv.Delete("test/lock", nil)
+	}()
+
+	// Should loose leadership
+	select {
+	case <-leaderCh:
+	case <-time.After(time.Second):
+		t.Fatalf("should not be leader")
+	}
+}
+
+func TestLock_Contend(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	wg := &sync.WaitGroup{}
+	acquired := make([]bool, 3)
+	for idx := range acquired {
+		wg.Add(1)
+		go func(idx int) {
+			defer wg.Done()
+			lock, err := c.LockKey("test/lock")
+			if err != nil {
+				t.Fatalf("err: %v", err)
+			}
+
+			// Should work eventually, will contend
+			leaderCh, err := lock.Lock(nil)
+			if err != nil {
+				t.Fatalf("err: %v", err)
+			}
+			if leaderCh == nil {
+				t.Fatalf("not leader")
+			}
+			defer lock.Unlock()
+			log.Printf("Contender %d acquired", idx)
+
+			// Set acquired and then leave
+			acquired[idx] = true
+		}(idx)
+	}
+
+	// Wait for termination
+	doneCh := make(chan struct{})
+	go func() {
+		wg.Wait()
+		close(doneCh)
+	}()
+
+	// Wait for everybody to get a turn
+	select {
+	case <-doneCh:
+	case <-time.After(3 * DefaultLockRetryTime):
+		t.Fatalf("timeout")
+	}
+
+	for idx, did := range acquired {
+		if !did {
+			t.Fatalf("contender %d never acquired", idx)
+		}
+	}
+}
+
+func TestLock_Destroy(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	lock, err := c.LockKey("test/lock")
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should work
+	leaderCh, err := lock.Lock(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if leaderCh == nil {
+		t.Fatalf("not leader")
+	}
+
+	// Destroy should fail
+	if err := lock.Destroy(); err != ErrLockHeld {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should be able to release
+	err = lock.Unlock()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Acquire with a different lock
+	l2, err := c.LockKey("test/lock")
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should work
+	leaderCh, err = l2.Lock(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if leaderCh == nil {
+		t.Fatalf("not leader")
+	}
+
+	// Destroy should still fail
+	if err := lock.Destroy(); err != ErrLockInUse {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should relese
+	err = l2.Unlock()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Destroy should work
+	err = lock.Destroy()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Double destroy should work
+	err = l2.Destroy()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestLock_Conflict(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	sema, err := c.SemaphorePrefix("test/lock/", 2)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should work
+	lockCh, err := sema.Acquire(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if lockCh == nil {
+		t.Fatalf("not hold")
+	}
+	defer sema.Release()
+
+	lock, err := c.LockKey("test/lock/.lock")
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should conflict with semaphore
+	_, err = lock.Lock(nil)
+	if err != ErrLockConflict {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should conflict with semaphore
+	err = lock.Destroy()
+	if err != ErrLockConflict {
+		t.Fatalf("err: %v", err)
+	}
+}

+ 482 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go

@@ -0,0 +1,482 @@
+package api
+
+import (
+	"encoding/json"
+	"fmt"
+	"path"
+	"sync"
+	"time"
+)
+
+const (
+	// DefaultSemaphoreSessionName is the Session Name we assign if none is provided
+	DefaultSemaphoreSessionName = "Consul API Semaphore"
+
+	// DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided
+	// when creating a new Semaphore. This is used because we do not have another
+	// other check to depend upon.
+	DefaultSemaphoreSessionTTL = "15s"
+
+	// DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore
+	// acquisition is possible. This affects the minimum time it takes to cancel
+	// a Semaphore acquisition.
+	DefaultSemaphoreWaitTime = 15 * time.Second
+
+	// DefaultSemaphoreRetryTime is how long we wait after a failed lock acquisition
+	// before attempting to do the lock again. This is so that once a lock-delay
+	// is in affect, we do not hot loop retrying the acquisition.
+	DefaultSemaphoreRetryTime = 5 * time.Second
+
+	// DefaultSemaphoreKey is the key used within the prefix to
+	// use for coordination between all the contenders.
+	DefaultSemaphoreKey = ".lock"
+
+	// SemaphoreFlagValue is a magic flag we set to indicate a key
+	// is being used for a semaphore. It is used to detect a potential
+	// conflict with a lock.
+	SemaphoreFlagValue = 0xe0f69a2baa414de0
+)
+
+var (
+	// ErrSemaphoreHeld is returned if we attempt to double lock
+	ErrSemaphoreHeld = fmt.Errorf("Semaphore already held")
+
+	// ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore
+	// that we do not hold.
+	ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held")
+
+	// ErrSemaphoreInUse is returned if we attempt to destroy a semaphore
+	// that is in use.
+	ErrSemaphoreInUse = fmt.Errorf("Semaphore in use")
+
+	// ErrSemaphoreConflict is returned if the flags on a key
+	// used for a semaphore do not match expectation
+	ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use")
+)
+
+// Semaphore is used to implement a distributed semaphore
+// using the Consul KV primitives.
+type Semaphore struct {
+	c    *Client
+	opts *SemaphoreOptions
+
+	isHeld       bool
+	sessionRenew chan struct{}
+	lockSession  string
+	l            sync.Mutex
+}
+
+// SemaphoreOptions is used to parameterize the Semaphore
+type SemaphoreOptions struct {
+	Prefix      string // Must be set and have write permissions
+	Limit       int    // Must be set, and be positive
+	Value       []byte // Optional, value to associate with the contender entry
+	Session     string // OPtional, created if not specified
+	SessionName string // Optional, defaults to DefaultLockSessionName
+	SessionTTL  string // Optional, defaults to DefaultLockSessionTTL
+}
+
+// semaphoreLock is written under the DefaultSemaphoreKey and
+// is used to coordinate between all the contenders.
+type semaphoreLock struct {
+	// Limit is the integer limit of holders. This is used to
+	// verify that all the holders agree on the value.
+	Limit int
+
+	// Holders is a list of all the semaphore holders.
+	// It maps the session ID to true. It is used as a set effectively.
+	Holders map[string]bool
+}
+
+// SemaphorePrefix is used to created a Semaphore which will operate
+// at the given KV prefix and uses the given limit for the semaphore.
+// The prefix must have write privileges, and the limit must be agreed
+// upon by all contenders.
+func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) {
+	opts := &SemaphoreOptions{
+		Prefix: prefix,
+		Limit:  limit,
+	}
+	return c.SemaphoreOpts(opts)
+}
+
+// SemaphoreOpts is used to create a Semaphore with the given options.
+// The prefix must have write privileges, and the limit must be agreed
+// upon by all contenders. If a Session is not provided, one will be created.
+func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
+	if opts.Prefix == "" {
+		return nil, fmt.Errorf("missing prefix")
+	}
+	if opts.Limit <= 0 {
+		return nil, fmt.Errorf("semaphore limit must be positive")
+	}
+	if opts.SessionName == "" {
+		opts.SessionName = DefaultSemaphoreSessionName
+	}
+	if opts.SessionTTL == "" {
+		opts.SessionTTL = DefaultSemaphoreSessionTTL
+	} else {
+		if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
+			return nil, fmt.Errorf("invalid SessionTTL: %v", err)
+		}
+	}
+	s := &Semaphore{
+		c:    c,
+		opts: opts,
+	}
+	return s, nil
+}
+
+// Acquire attempts to reserve a slot in the semaphore, blocking until
+// success, interrupted via the stopCh or an error is encounted.
+// Providing a non-nil stopCh can be used to abort the attempt.
+// On success, a channel is returned that represents our slot.
+// This channel could be closed at any time due to session invalidation,
+// communication errors, operator intervention, etc. It is NOT safe to
+// assume that the slot is held until Release() unless the Session is specifically
+// created without any associated health checks. By default Consul sessions
+// prefer liveness over safety and an application must be able to handle
+// the session being lost.
+func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
+	// Hold the lock as we try to acquire
+	s.l.Lock()
+	defer s.l.Unlock()
+
+	// Check if we already hold the semaphore
+	if s.isHeld {
+		return nil, ErrSemaphoreHeld
+	}
+
+	// Check if we need to create a session first
+	s.lockSession = s.opts.Session
+	if s.lockSession == "" {
+		if sess, err := s.createSession(); err != nil {
+			return nil, fmt.Errorf("failed to create session: %v", err)
+		} else {
+			s.sessionRenew = make(chan struct{})
+			s.lockSession = sess
+			session := s.c.Session()
+			go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew)
+
+			// If we fail to acquire the lock, cleanup the session
+			defer func() {
+				if !s.isHeld {
+					close(s.sessionRenew)
+					s.sessionRenew = nil
+				}
+			}()
+		}
+	}
+
+	// Create the contender entry
+	kv := s.c.KV()
+	made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil)
+	if err != nil || !made {
+		return nil, fmt.Errorf("failed to make contender entry: %v", err)
+	}
+
+	// Setup the query options
+	qOpts := &QueryOptions{
+		WaitTime: DefaultSemaphoreWaitTime,
+	}
+
+WAIT:
+	// Check if we should quit
+	select {
+	case <-stopCh:
+		return nil, nil
+	default:
+	}
+
+	// Read the prefix
+	pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read prefix: %v", err)
+	}
+
+	// Decode the lock
+	lockPair := s.findLock(pairs)
+	if lockPair.Flags != SemaphoreFlagValue {
+		return nil, ErrSemaphoreConflict
+	}
+	lock, err := s.decodeLock(lockPair)
+	if err != nil {
+		return nil, err
+	}
+
+	// Verify we agree with the limit
+	if lock.Limit != s.opts.Limit {
+		return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)",
+			lock.Limit, s.opts.Limit)
+	}
+
+	// Prune the dead holders
+	s.pruneDeadHolders(lock, pairs)
+
+	// Check if the lock is held
+	if len(lock.Holders) >= lock.Limit {
+		qOpts.WaitIndex = meta.LastIndex
+		goto WAIT
+	}
+
+	// Create a new lock with us as a holder
+	lock.Holders[s.lockSession] = true
+	newLock, err := s.encodeLock(lock, lockPair.ModifyIndex)
+	if err != nil {
+		return nil, err
+	}
+
+	// Attempt the acquisition
+	didSet, _, err := kv.CAS(newLock, nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to update lock: %v", err)
+	}
+	if !didSet {
+		// Update failed, could have been a race with another contender,
+		// retry the operation
+		goto WAIT
+	}
+
+	// Watch to ensure we maintain ownership of the slot
+	lockCh := make(chan struct{})
+	go s.monitorLock(s.lockSession, lockCh)
+
+	// Set that we own the lock
+	s.isHeld = true
+
+	// Acquired! All done
+	return lockCh, nil
+}
+
+// Release is used to voluntarily give up our semaphore slot. It is
+// an error to call this if the semaphore has not been acquired.
+func (s *Semaphore) Release() error {
+	// Hold the lock as we try to release
+	s.l.Lock()
+	defer s.l.Unlock()
+
+	// Ensure the lock is actually held
+	if !s.isHeld {
+		return ErrSemaphoreNotHeld
+	}
+
+	// Set that we no longer own the lock
+	s.isHeld = false
+
+	// Stop the session renew
+	if s.sessionRenew != nil {
+		defer func() {
+			close(s.sessionRenew)
+			s.sessionRenew = nil
+		}()
+	}
+
+	// Get and clear the lock session
+	lockSession := s.lockSession
+	s.lockSession = ""
+
+	// Remove ourselves as a lock holder
+	kv := s.c.KV()
+	key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
+READ:
+	pair, _, err := kv.Get(key, nil)
+	if err != nil {
+		return err
+	}
+	if pair == nil {
+		pair = &KVPair{}
+	}
+	lock, err := s.decodeLock(pair)
+	if err != nil {
+		return err
+	}
+
+	// Create a new lock without us as a holder
+	if _, ok := lock.Holders[lockSession]; ok {
+		delete(lock.Holders, lockSession)
+		newLock, err := s.encodeLock(lock, pair.ModifyIndex)
+		if err != nil {
+			return err
+		}
+
+		// Swap the locks
+		didSet, _, err := kv.CAS(newLock, nil)
+		if err != nil {
+			return fmt.Errorf("failed to update lock: %v", err)
+		}
+		if !didSet {
+			goto READ
+		}
+	}
+
+	// Destroy the contender entry
+	contenderKey := path.Join(s.opts.Prefix, lockSession)
+	if _, err := kv.Delete(contenderKey, nil); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Destroy is used to cleanup the semaphore entry. It is not necessary
+// to invoke. It will fail if the semaphore is in use.
+func (s *Semaphore) Destroy() error {
+	// Hold the lock as we try to acquire
+	s.l.Lock()
+	defer s.l.Unlock()
+
+	// Check if we already hold the semaphore
+	if s.isHeld {
+		return ErrSemaphoreHeld
+	}
+
+	// List for the semaphore
+	kv := s.c.KV()
+	pairs, _, err := kv.List(s.opts.Prefix, nil)
+	if err != nil {
+		return fmt.Errorf("failed to read prefix: %v", err)
+	}
+
+	// Find the lock pair, bail if it doesn't exist
+	lockPair := s.findLock(pairs)
+	if lockPair.ModifyIndex == 0 {
+		return nil
+	}
+	if lockPair.Flags != SemaphoreFlagValue {
+		return ErrSemaphoreConflict
+	}
+
+	// Decode the lock
+	lock, err := s.decodeLock(lockPair)
+	if err != nil {
+		return err
+	}
+
+	// Prune the dead holders
+	s.pruneDeadHolders(lock, pairs)
+
+	// Check if there are any holders
+	if len(lock.Holders) > 0 {
+		return ErrSemaphoreInUse
+	}
+
+	// Attempt the delete
+	didRemove, _, err := kv.DeleteCAS(lockPair, nil)
+	if err != nil {
+		return fmt.Errorf("failed to remove semaphore: %v", err)
+	}
+	if !didRemove {
+		return ErrSemaphoreInUse
+	}
+	return nil
+}
+
+// createSession is used to create a new managed session
+func (s *Semaphore) createSession() (string, error) {
+	session := s.c.Session()
+	se := &SessionEntry{
+		Name:     s.opts.SessionName,
+		TTL:      s.opts.SessionTTL,
+		Behavior: SessionBehaviorDelete,
+	}
+	id, _, err := session.Create(se, nil)
+	if err != nil {
+		return "", err
+	}
+	return id, nil
+}
+
+// contenderEntry returns a formatted KVPair for the contender
+func (s *Semaphore) contenderEntry(session string) *KVPair {
+	return &KVPair{
+		Key:     path.Join(s.opts.Prefix, session),
+		Value:   s.opts.Value,
+		Session: session,
+		Flags:   SemaphoreFlagValue,
+	}
+}
+
+// findLock is used to find the KV Pair which is used for coordination
+func (s *Semaphore) findLock(pairs KVPairs) *KVPair {
+	key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
+	for _, pair := range pairs {
+		if pair.Key == key {
+			return pair
+		}
+	}
+	return &KVPair{Flags: SemaphoreFlagValue}
+}
+
+// decodeLock is used to decode a semaphoreLock from an
+// entry in Consul
+func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) {
+	// Handle if there is no lock
+	if pair == nil || pair.Value == nil {
+		return &semaphoreLock{
+			Limit:   s.opts.Limit,
+			Holders: make(map[string]bool),
+		}, nil
+	}
+
+	l := &semaphoreLock{}
+	if err := json.Unmarshal(pair.Value, l); err != nil {
+		return nil, fmt.Errorf("lock decoding failed: %v", err)
+	}
+	return l, nil
+}
+
+// encodeLock is used to encode a semaphoreLock into a KVPair
+// that can be PUT
+func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) {
+	enc, err := json.Marshal(l)
+	if err != nil {
+		return nil, fmt.Errorf("lock encoding failed: %v", err)
+	}
+	pair := &KVPair{
+		Key:         path.Join(s.opts.Prefix, DefaultSemaphoreKey),
+		Value:       enc,
+		Flags:       SemaphoreFlagValue,
+		ModifyIndex: oldIndex,
+	}
+	return pair, nil
+}
+
+// pruneDeadHolders is used to remove all the dead lock holders
+func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) {
+	// Gather all the live holders
+	alive := make(map[string]struct{}, len(pairs))
+	for _, pair := range pairs {
+		if pair.Session != "" {
+			alive[pair.Session] = struct{}{}
+		}
+	}
+
+	// Remove any holders that are dead
+	for holder := range lock.Holders {
+		if _, ok := alive[holder]; !ok {
+			delete(lock.Holders, holder)
+		}
+	}
+}
+
+// monitorLock is a long running routine to monitor a semaphore ownership
+// It closes the stopCh if we lose our slot.
+func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
+	defer close(stopCh)
+	kv := s.c.KV()
+	opts := &QueryOptions{RequireConsistent: true}
+WAIT:
+	pairs, meta, err := kv.List(s.opts.Prefix, opts)
+	if err != nil {
+		return
+	}
+	lockPair := s.findLock(pairs)
+	lock, err := s.decodeLock(lockPair)
+	if err != nil {
+		return
+	}
+	s.pruneDeadHolders(lock, pairs)
+	if _, ok := lock.Holders[session]; ok {
+		opts.WaitIndex = meta.LastIndex
+		goto WAIT
+	}
+}

+ 306 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore_test.go

@@ -0,0 +1,306 @@
+package api
+
+import (
+	"log"
+	"sync"
+	"testing"
+	"time"
+)
+
+func TestSemaphore_AcquireRelease(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	sema, err := c.SemaphorePrefix("test/semaphore", 2)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Initial release should fail
+	err = sema.Release()
+	if err != ErrSemaphoreNotHeld {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should work
+	lockCh, err := sema.Acquire(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if lockCh == nil {
+		t.Fatalf("not hold")
+	}
+
+	// Double lock should fail
+	_, err = sema.Acquire(nil)
+	if err != ErrSemaphoreHeld {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should be held
+	select {
+	case <-lockCh:
+		t.Fatalf("should be held")
+	default:
+	}
+
+	// Initial release should work
+	err = sema.Release()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Double unlock should fail
+	err = sema.Release()
+	if err != ErrSemaphoreNotHeld {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should lose resource
+	select {
+	case <-lockCh:
+	case <-time.After(time.Second):
+		t.Fatalf("should not be held")
+	}
+}
+
+func TestSemaphore_ForceInvalidate(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	sema, err := c.SemaphorePrefix("test/semaphore", 2)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should work
+	lockCh, err := sema.Acquire(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if lockCh == nil {
+		t.Fatalf("not acquired")
+	}
+	defer sema.Release()
+
+	go func() {
+		// Nuke the session, simulator an operator invalidation
+		// or a health check failure
+		session := c.Session()
+		session.Destroy(sema.lockSession, nil)
+	}()
+
+	// Should loose slot
+	select {
+	case <-lockCh:
+	case <-time.After(time.Second):
+		t.Fatalf("should not be locked")
+	}
+}
+
+func TestSemaphore_DeleteKey(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	sema, err := c.SemaphorePrefix("test/semaphore", 2)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should work
+	lockCh, err := sema.Acquire(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if lockCh == nil {
+		t.Fatalf("not locked")
+	}
+	defer sema.Release()
+
+	go func() {
+		// Nuke the key, simulate an operator intervention
+		kv := c.KV()
+		kv.DeleteTree("test/semaphore", nil)
+	}()
+
+	// Should loose leadership
+	select {
+	case <-lockCh:
+	case <-time.After(time.Second):
+		t.Fatalf("should not be locked")
+	}
+}
+
+func TestSemaphore_Contend(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	wg := &sync.WaitGroup{}
+	acquired := make([]bool, 4)
+	for idx := range acquired {
+		wg.Add(1)
+		go func(idx int) {
+			defer wg.Done()
+			sema, err := c.SemaphorePrefix("test/semaphore", 2)
+			if err != nil {
+				t.Fatalf("err: %v", err)
+			}
+
+			// Should work eventually, will contend
+			lockCh, err := sema.Acquire(nil)
+			if err != nil {
+				t.Fatalf("err: %v", err)
+			}
+			if lockCh == nil {
+				t.Fatalf("not locked")
+			}
+			defer sema.Release()
+			log.Printf("Contender %d acquired", idx)
+
+			// Set acquired and then leave
+			acquired[idx] = true
+		}(idx)
+	}
+
+	// Wait for termination
+	doneCh := make(chan struct{})
+	go func() {
+		wg.Wait()
+		close(doneCh)
+	}()
+
+	// Wait for everybody to get a turn
+	select {
+	case <-doneCh:
+	case <-time.After(3 * DefaultLockRetryTime):
+		t.Fatalf("timeout")
+	}
+
+	for idx, did := range acquired {
+		if !did {
+			t.Fatalf("contender %d never acquired", idx)
+		}
+	}
+}
+
+func TestSemaphore_BadLimit(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	sema, err := c.SemaphorePrefix("test/semaphore", 0)
+	if err == nil {
+		t.Fatalf("should error")
+	}
+
+	sema, err = c.SemaphorePrefix("test/semaphore", 1)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	_, err = sema.Acquire(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	sema2, err := c.SemaphorePrefix("test/semaphore", 2)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	_, err = sema2.Acquire(nil)
+	if err.Error() != "semaphore limit conflict (lock: 1, local: 2)" {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestSemaphore_Destroy(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	sema, err := c.SemaphorePrefix("test/semaphore", 2)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	sema2, err := c.SemaphorePrefix("test/semaphore", 2)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	_, err = sema.Acquire(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	_, err = sema2.Acquire(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Destroy should fail, still held
+	if err := sema.Destroy(); err != ErrSemaphoreHeld {
+		t.Fatalf("err: %v", err)
+	}
+
+	err = sema.Release()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Destroy should fail, still in use
+	if err := sema.Destroy(); err != ErrSemaphoreInUse {
+		t.Fatalf("err: %v", err)
+	}
+
+	err = sema2.Release()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Destroy should work
+	if err := sema.Destroy(); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Destroy should work
+	if err := sema2.Destroy(); err != nil {
+		t.Fatalf("err: %v", err)
+	}
+}
+
+func TestSemaphore_Conflict(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	lock, err := c.LockKey("test/sema/.lock")
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should work
+	leaderCh, err := lock.Lock(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if leaderCh == nil {
+		t.Fatalf("not leader")
+	}
+	defer lock.Unlock()
+
+	sema, err := c.SemaphorePrefix("test/sema/", 2)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should conflict with lock
+	_, err = sema.Acquire(nil)
+	if err != ErrSemaphoreConflict {
+		t.Fatalf("err: %v", err)
+	}
+
+	// Should conflict with lock
+	err = sema.Destroy()
+	if err != ErrSemaphoreConflict {
+		t.Fatalf("err: %v", err)
+	}
+}

+ 245 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go

@@ -0,0 +1,245 @@
+package api
+
+import (
+	"time"
+)
+
+const (
+	// SessionBehaviorRelease is the default behavior and causes
+	// all associated locks to be released on session invalidation.
+	SessionBehaviorRelease = "release"
+
+	// SessionBehaviorDelete is new in Consul 0.5 and changes the
+	// behavior to delete all associated locks on session invalidation.
+	// It can be used in a way similar to Ephemeral Nodes in ZooKeeper.
+	SessionBehaviorDelete = "delete"
+)
+
+// SessionEntry represents a session in consul
+type SessionEntry struct {
+	CreateIndex uint64
+	ID          string
+	Name        string
+	Node        string
+	Checks      []string
+	LockDelay   time.Duration
+	Behavior    string
+	TTL         string
+}
+
+// Session can be used to query the Session endpoints
+type Session struct {
+	c *Client
+}
+
+// Session returns a handle to the session endpoints
+func (c *Client) Session() *Session {
+	return &Session{c}
+}
+
+// CreateNoChecks is like Create but is used specifically to create
+// a session with no associated health checks.
+func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
+	body := make(map[string]interface{})
+	body["Checks"] = []string{}
+	if se != nil {
+		if se.Name != "" {
+			body["Name"] = se.Name
+		}
+		if se.Node != "" {
+			body["Node"] = se.Node
+		}
+		if se.LockDelay != 0 {
+			body["LockDelay"] = durToMsec(se.LockDelay)
+		}
+		if se.Behavior != "" {
+			body["Behavior"] = se.Behavior
+		}
+		if se.TTL != "" {
+			body["TTL"] = se.TTL
+		}
+	}
+	return s.create(body, q)
+
+}
+
+// Create makes a new session. Providing a session entry can
+// customize the session. It can also be nil to use defaults.
+func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
+	var obj interface{}
+	if se != nil {
+		body := make(map[string]interface{})
+		obj = body
+		if se.Name != "" {
+			body["Name"] = se.Name
+		}
+		if se.Node != "" {
+			body["Node"] = se.Node
+		}
+		if se.LockDelay != 0 {
+			body["LockDelay"] = durToMsec(se.LockDelay)
+		}
+		if len(se.Checks) > 0 {
+			body["Checks"] = se.Checks
+		}
+		if se.Behavior != "" {
+			body["Behavior"] = se.Behavior
+		}
+		if se.TTL != "" {
+			body["TTL"] = se.TTL
+		}
+	}
+	return s.create(obj, q)
+}
+
+func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
+	r := s.c.newRequest("PUT", "/v1/session/create")
+	r.setWriteOptions(q)
+	r.obj = obj
+	rtt, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Destroy invalides a given session
+func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+	r := s.c.newRequest("PUT", "/v1/session/destroy/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// Renew renews the TTL on a given session
+func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
+	r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+
+	var entries []*SessionEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, wm, err
+	}
+
+	if len(entries) > 0 {
+		return entries[0], wm, nil
+	}
+	return nil, wm, nil
+}
+
+// RenewPeriodic is used to periodically invoke Session.Renew on a
+// session until a doneCh is closed. This is meant to be used in a long running
+// goroutine to ensure a session stays valid.
+func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh chan struct{}) error {
+	ttl, err := time.ParseDuration(initialTTL)
+	if err != nil {
+		return err
+	}
+	for {
+		select {
+		case <-time.After(ttl / 2):
+			entry, _, err := s.Renew(id, q)
+			if err != nil {
+				return err
+			}
+			if entry == nil {
+				return nil
+			}
+
+			// Handle the server updating the TTL
+			ttl, _ = time.ParseDuration(entry.TTL)
+
+		case <-doneCh:
+			// Attempt a session destroy
+			s.Destroy(id, q)
+			return nil
+		}
+	}
+}
+
+// Info looks up a single session
+func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
+	r := s.c.newRequest("GET", "/v1/session/info/"+id)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*SessionEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+
+	if len(entries) > 0 {
+		return entries[0], qm, nil
+	}
+	return nil, qm, nil
+}
+
+// List gets sessions for a node
+func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
+	r := s.c.newRequest("GET", "/v1/session/node/"+node)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*SessionEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// List gets all active sessions
+func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
+	r := s.c.newRequest("GET", "/v1/session/list")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*SessionEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}

+ 200 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/session_test.go

@@ -0,0 +1,200 @@
+package api
+
+import (
+	"testing"
+)
+
+func TestSession_CreateDestroy(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	session := c.Session()
+
+	id, meta, err := session.Create(nil, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if meta.RequestTime == 0 {
+		t.Fatalf("bad: %v", meta)
+	}
+
+	if id == "" {
+		t.Fatalf("invalid: %v", id)
+	}
+
+	meta, err = session.Destroy(id, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if meta.RequestTime == 0 {
+		t.Fatalf("bad: %v", meta)
+	}
+}
+
+func TestSession_CreateRenewDestroy(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	session := c.Session()
+
+	se := &SessionEntry{
+		TTL: "10s",
+	}
+
+	id, meta, err := session.Create(se, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	defer session.Destroy(id, nil)
+
+	if meta.RequestTime == 0 {
+		t.Fatalf("bad: %v", meta)
+	}
+
+	if id == "" {
+		t.Fatalf("invalid: %v", id)
+	}
+
+	if meta.RequestTime == 0 {
+		t.Fatalf("bad: %v", meta)
+	}
+
+	renew, meta, err := session.Renew(id, nil)
+
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if meta.RequestTime == 0 {
+		t.Fatalf("bad: %v", meta)
+	}
+
+	if renew == nil {
+		t.Fatalf("should get session")
+	}
+
+	if renew.ID != id {
+		t.Fatalf("should have matching id")
+	}
+
+	if renew.TTL != "10s" {
+		t.Fatalf("should get session with TTL")
+	}
+}
+
+func TestSession_Info(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	session := c.Session()
+
+	id, _, err := session.Create(nil, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	defer session.Destroy(id, nil)
+
+	info, qm, err := session.Info(id, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if qm.LastIndex == 0 {
+		t.Fatalf("bad: %v", qm)
+	}
+	if !qm.KnownLeader {
+		t.Fatalf("bad: %v", qm)
+	}
+
+	if info == nil {
+		t.Fatalf("should get session")
+	}
+	if info.CreateIndex == 0 {
+		t.Fatalf("bad: %v", info)
+	}
+	if info.ID != id {
+		t.Fatalf("bad: %v", info)
+	}
+	if info.Name != "" {
+		t.Fatalf("bad: %v", info)
+	}
+	if info.Node == "" {
+		t.Fatalf("bad: %v", info)
+	}
+	if len(info.Checks) == 0 {
+		t.Fatalf("bad: %v", info)
+	}
+	if info.LockDelay == 0 {
+		t.Fatalf("bad: %v", info)
+	}
+	if info.Behavior != "release" {
+		t.Fatalf("bad: %v", info)
+	}
+	if info.TTL != "" {
+		t.Fatalf("bad: %v", info)
+	}
+}
+
+func TestSession_Node(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	session := c.Session()
+
+	id, _, err := session.Create(nil, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	defer session.Destroy(id, nil)
+
+	info, qm, err := session.Info(id, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	sessions, qm, err := session.Node(info.Node, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if len(sessions) != 1 {
+		t.Fatalf("bad: %v", sessions)
+	}
+
+	if qm.LastIndex == 0 {
+		t.Fatalf("bad: %v", qm)
+	}
+	if !qm.KnownLeader {
+		t.Fatalf("bad: %v", qm)
+	}
+}
+
+func TestSession_List(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	session := c.Session()
+
+	id, _, err := session.Create(nil, nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	defer session.Destroy(id, nil)
+
+	sessions, qm, err := session.List(nil)
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+
+	if len(sessions) != 1 {
+		t.Fatalf("bad: %v", sessions)
+	}
+
+	if qm.LastIndex == 0 {
+		t.Fatalf("bad: %v", qm)
+	}
+	if !qm.KnownLeader {
+		t.Fatalf("bad: %v", qm)
+	}
+}

+ 43 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go

@@ -0,0 +1,43 @@
+package api
+
+// Status can be used to query the Status endpoints
+type Status struct {
+	c *Client
+}
+
+// Status returns a handle to the status endpoints
+func (c *Client) Status() *Status {
+	return &Status{c}
+}
+
+// Leader is used to query for a known leader
+func (s *Status) Leader() (string, error) {
+	r := s.c.newRequest("GET", "/v1/status/leader")
+	_, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+
+	var leader string
+	if err := decodeBody(resp, &leader); err != nil {
+		return "", err
+	}
+	return leader, nil
+}
+
+// Peers is used to query for a known raft peers
+func (s *Status) Peers() ([]string, error) {
+	r := s.c.newRequest("GET", "/v1/status/peers")
+	_, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var peers []string
+	if err := decodeBody(resp, &peers); err != nil {
+		return nil, err
+	}
+	return peers, nil
+}

+ 35 - 0
libnetwork/Godeps/_workspace/src/github.com/hashicorp/consul/api/status_test.go

@@ -0,0 +1,35 @@
+package api
+
+import (
+	"testing"
+)
+
+func TestStatusLeader(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	status := c.Status()
+
+	leader, err := status.Leader()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if leader == "" {
+		t.Fatalf("Expected leader")
+	}
+}
+
+func TestStatusPeers(t *testing.T) {
+	c, s := makeClient(t)
+	defer s.stop()
+
+	status := c.Status()
+
+	peers, err := status.Peers()
+	if err != nil {
+		t.Fatalf("err: %v", err)
+	}
+	if len(peers) == 0 {
+		t.Fatalf("Expected peers ")
+	}
+}

+ 166 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/cluster_test.go

@@ -0,0 +1,166 @@
+package zk
+
+import (
+	"fmt"
+	"strings"
+	"testing"
+	"time"
+)
+
+type logWriter struct {
+	t *testing.T
+	p string
+}
+
+func (lw logWriter) Write(b []byte) (int, error) {
+	lw.t.Logf("%s%s", lw.p, string(b))
+	return len(b), nil
+}
+
+func TestBasicCluster(t *testing.T) {
+	ts, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk1, err := ts.Connect(0)
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk1.Close()
+	zk2, err := ts.Connect(1)
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk2.Close()
+
+	time.Sleep(time.Second * 5)
+
+	if _, err := zk1.Create("/gozk-test", []byte("foo-cluster"), 0, WorldACL(PermAll)); err != nil {
+		t.Fatalf("Create failed on node 1: %+v", err)
+	}
+	if by, _, err := zk2.Get("/gozk-test"); err != nil {
+		t.Fatalf("Get failed on node 2: %+v", err)
+	} else if string(by) != "foo-cluster" {
+		t.Fatal("Wrong data for node 2")
+	}
+}
+
+func TestClientClusterFailover(t *testing.T) {
+	ts, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk, evCh, err := ts.ConnectAll()
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk.Close()
+
+	hasSession := make(chan string, 1)
+	go func() {
+		for ev := range evCh {
+			if ev.Type == EventSession && ev.State == StateHasSession {
+				select {
+				case hasSession <- ev.Server:
+				default:
+				}
+			}
+		}
+	}()
+
+	waitSession := func() string {
+		select {
+		case srv := <-hasSession:
+			return srv
+		case <-time.After(time.Second * 8):
+			t.Fatal("Failed to connect and get a session")
+		}
+		return ""
+	}
+
+	srv := waitSession()
+	if _, err := zk.Create("/gozk-test", []byte("foo-cluster"), 0, WorldACL(PermAll)); err != nil {
+		t.Fatalf("Create failed on node 1: %+v", err)
+	}
+
+	stopped := false
+	for _, s := range ts.Servers {
+		if strings.HasSuffix(srv, fmt.Sprintf(":%d", s.Port)) {
+			s.Srv.Stop()
+			stopped = true
+			break
+		}
+	}
+	if !stopped {
+		t.Fatal("Failed to stop server")
+	}
+
+	waitSession()
+	if by, _, err := zk.Get("/gozk-test"); err != nil {
+		t.Fatalf("Get failed on node 2: %+v", err)
+	} else if string(by) != "foo-cluster" {
+		t.Fatal("Wrong data for node 2")
+	}
+}
+
+func TestWaitForClose(t *testing.T) {
+	ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk, err := ts.Connect(0)
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	timeout := time.After(30 * time.Second)
+CONNECTED:
+	for {
+		select {
+		case ev := <-zk.eventChan:
+			if ev.State == StateConnected {
+				break CONNECTED
+			}
+		case <-timeout:
+			zk.Close()
+			t.Fatal("Timeout")
+		}
+	}
+	zk.Close()
+	for {
+		select {
+		case _, ok := <-zk.eventChan:
+			if !ok {
+				return
+			}
+		case <-timeout:
+			t.Fatal("Timeout")
+		}
+	}
+}
+
+func TestBadSession(t *testing.T) {
+	ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk, _, err := ts.ConnectAll()
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk.Close()
+
+	if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
+		t.Fatalf("Delete returned error: %+v", err)
+	}
+
+	zk.conn.Close()
+	time.Sleep(time.Millisecond * 100)
+
+	if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
+		t.Fatalf("Delete returned error: %+v", err)
+	}
+}

+ 844 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/conn.go

@@ -0,0 +1,844 @@
+package zk
+
+/*
+TODO:
+* make sure a ping response comes back in a reasonable time
+
+Possible watcher events:
+* Event{Type: EventNotWatching, State: StateDisconnected, Path: path, Err: err}
+*/
+
+import (
+	"crypto/rand"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+var ErrNoServer = errors.New("zk: could not connect to a server")
+
+const (
+	bufferSize      = 1536 * 1024
+	eventChanSize   = 6
+	sendChanSize    = 16
+	protectedPrefix = "_c_"
+)
+
+type watchType int
+
+const (
+	watchTypeData  = iota
+	watchTypeExist = iota
+	watchTypeChild = iota
+)
+
+type watchPathType struct {
+	path  string
+	wType watchType
+}
+
+type Dialer func(network, address string, timeout time.Duration) (net.Conn, error)
+
+type Conn struct {
+	lastZxid  int64
+	sessionID int64
+	state     State // must be 32-bit aligned
+	xid       uint32
+	timeout   int32 // session timeout in milliseconds
+	passwd    []byte
+
+	dialer          Dialer
+	servers         []string
+	serverIndex     int // remember last server that was tried during connect to round-robin attempts to servers
+	lastServerIndex int // index of the last server that was successfully connected to and authenticated with
+	conn            net.Conn
+	eventChan       chan Event
+	shouldQuit      chan struct{}
+	pingInterval    time.Duration
+	recvTimeout     time.Duration
+	connectTimeout  time.Duration
+
+	sendChan     chan *request
+	requests     map[int32]*request // Xid -> pending request
+	requestsLock sync.Mutex
+	watchers     map[watchPathType][]chan Event
+	watchersLock sync.Mutex
+
+	// Debug (used by unit tests)
+	reconnectDelay time.Duration
+}
+
+type request struct {
+	xid        int32
+	opcode     int32
+	pkt        interface{}
+	recvStruct interface{}
+	recvChan   chan response
+
+	// Because sending and receiving happen in separate go routines, there's
+	// a possible race condition when creating watches from outside the read
+	// loop. We must ensure that a watcher gets added to the list synchronously
+	// with the response from the server on any request that creates a watch.
+	// In order to not hard code the watch logic for each opcode in the recv
+	// loop the caller can use recvFunc to insert some synchronously code
+	// after a response.
+	recvFunc func(*request, *responseHeader, error)
+}
+
+type response struct {
+	zxid int64
+	err  error
+}
+
+type Event struct {
+	Type   EventType
+	State  State
+	Path   string // For non-session events, the path of the watched node.
+	Err    error
+	Server string // For connection events
+}
+
+// Connect establishes a new connection to a pool of zookeeper servers
+// using the default net.Dialer. See ConnectWithDialer for further
+// information about session timeout.
+func Connect(servers []string, sessionTimeout time.Duration) (*Conn, <-chan Event, error) {
+	return ConnectWithDialer(servers, sessionTimeout, nil)
+}
+
+// ConnectWithDialer establishes a new connection to a pool of zookeeper
+// servers. The provided session timeout sets the amount of time for which
+// a session is considered valid after losing connection to a server. Within
+// the session timeout it's possible to reestablish a connection to a different
+// server and keep the same session. This is means any ephemeral nodes and
+// watches are maintained.
+func ConnectWithDialer(servers []string, sessionTimeout time.Duration, dialer Dialer) (*Conn, <-chan Event, error) {
+	if len(servers) == 0 {
+		return nil, nil, errors.New("zk: server list must not be empty")
+	}
+
+	recvTimeout := sessionTimeout * 2 / 3
+
+	srvs := make([]string, len(servers))
+
+	for i, addr := range servers {
+		if strings.Contains(addr, ":") {
+			srvs[i] = addr
+		} else {
+			srvs[i] = addr + ":" + strconv.Itoa(DefaultPort)
+		}
+	}
+
+	// Randomize the order of the servers to avoid creating hotspots
+	stringShuffle(srvs)
+
+	ec := make(chan Event, eventChanSize)
+	if dialer == nil {
+		dialer = net.DialTimeout
+	}
+	conn := Conn{
+		dialer:          dialer,
+		servers:         srvs,
+		serverIndex:     0,
+		lastServerIndex: -1,
+		conn:            nil,
+		state:           StateDisconnected,
+		eventChan:       ec,
+		shouldQuit:      make(chan struct{}),
+		recvTimeout:     recvTimeout,
+		pingInterval:    recvTimeout / 2,
+		connectTimeout:  1 * time.Second,
+		sendChan:        make(chan *request, sendChanSize),
+		requests:        make(map[int32]*request),
+		watchers:        make(map[watchPathType][]chan Event),
+		passwd:          emptyPassword,
+		timeout:         int32(sessionTimeout.Nanoseconds() / 1e6),
+
+		// Debug
+		reconnectDelay: 0,
+	}
+	go func() {
+		conn.loop()
+		conn.flushRequests(ErrClosing)
+		conn.invalidateWatches(ErrClosing)
+		close(conn.eventChan)
+	}()
+	return &conn, ec, nil
+}
+
+func (c *Conn) Close() {
+	close(c.shouldQuit)
+
+	select {
+	case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil):
+	case <-time.After(time.Second):
+	}
+}
+
+func (c *Conn) State() State {
+	return State(atomic.LoadInt32((*int32)(&c.state)))
+}
+
+func (c *Conn) setState(state State) {
+	atomic.StoreInt32((*int32)(&c.state), int32(state))
+	select {
+	case c.eventChan <- Event{Type: EventSession, State: state, Server: c.servers[c.serverIndex]}:
+	default:
+		// panic("zk: event channel full - it must be monitored and never allowed to be full")
+	}
+}
+
+func (c *Conn) connect() error {
+	c.setState(StateConnecting)
+	for {
+		c.serverIndex = (c.serverIndex + 1) % len(c.servers)
+		if c.serverIndex == c.lastServerIndex {
+			c.flushUnsentRequests(ErrNoServer)
+			select {
+			case <-time.After(time.Second):
+				// pass
+			case <-c.shouldQuit:
+				c.setState(StateDisconnected)
+				c.flushUnsentRequests(ErrClosing)
+				return ErrClosing
+			}
+		} else if c.lastServerIndex < 0 {
+			// lastServerIndex defaults to -1 to avoid a delay on the initial connect
+			c.lastServerIndex = 0
+		}
+
+		zkConn, err := c.dialer("tcp", c.servers[c.serverIndex], c.connectTimeout)
+		if err == nil {
+			c.conn = zkConn
+			c.setState(StateConnected)
+			return nil
+		}
+
+		log.Printf("Failed to connect to %s: %+v", c.servers[c.serverIndex], err)
+	}
+}
+
+func (c *Conn) loop() {
+	for {
+		if err := c.connect(); err != nil {
+			// c.Close() was called
+			return
+		}
+
+		err := c.authenticate()
+		switch {
+		case err == ErrSessionExpired:
+			c.invalidateWatches(err)
+		case err != nil && c.conn != nil:
+			c.conn.Close()
+		case err == nil:
+			c.lastServerIndex = c.serverIndex
+			closeChan := make(chan struct{}) // channel to tell send loop stop
+			var wg sync.WaitGroup
+
+			wg.Add(1)
+			go func() {
+				c.sendLoop(c.conn, closeChan)
+				c.conn.Close() // causes recv loop to EOF/exit
+				wg.Done()
+			}()
+
+			wg.Add(1)
+			go func() {
+				err = c.recvLoop(c.conn)
+				if err == nil {
+					panic("zk: recvLoop should never return nil error")
+				}
+				close(closeChan) // tell send loop to exit
+				wg.Done()
+			}()
+
+			wg.Wait()
+		}
+
+		c.setState(StateDisconnected)
+
+		// Yeesh
+		if err != io.EOF && err != ErrSessionExpired && !strings.Contains(err.Error(), "use of closed network connection") {
+			log.Println(err)
+		}
+
+		select {
+		case <-c.shouldQuit:
+			c.flushRequests(ErrClosing)
+			return
+		default:
+		}
+
+		if err != ErrSessionExpired {
+			err = ErrConnectionClosed
+		}
+		c.flushRequests(err)
+
+		if c.reconnectDelay > 0 {
+			select {
+			case <-c.shouldQuit:
+				return
+			case <-time.After(c.reconnectDelay):
+			}
+		}
+	}
+}
+
+func (c *Conn) flushUnsentRequests(err error) {
+	for {
+		select {
+		default:
+			return
+		case req := <-c.sendChan:
+			req.recvChan <- response{-1, err}
+		}
+	}
+}
+
+// Send error to all pending requests and clear request map
+func (c *Conn) flushRequests(err error) {
+	c.requestsLock.Lock()
+	for _, req := range c.requests {
+		req.recvChan <- response{-1, err}
+	}
+	c.requests = make(map[int32]*request)
+	c.requestsLock.Unlock()
+}
+
+// Send error to all watchers and clear watchers map
+func (c *Conn) invalidateWatches(err error) {
+	c.watchersLock.Lock()
+	defer c.watchersLock.Unlock()
+
+	if len(c.watchers) >= 0 {
+		for pathType, watchers := range c.watchers {
+			ev := Event{Type: EventNotWatching, State: StateDisconnected, Path: pathType.path, Err: err}
+			for _, ch := range watchers {
+				ch <- ev
+				close(ch)
+			}
+		}
+		c.watchers = make(map[watchPathType][]chan Event)
+	}
+}
+
+func (c *Conn) sendSetWatches() {
+	c.watchersLock.Lock()
+	defer c.watchersLock.Unlock()
+
+	if len(c.watchers) == 0 {
+		return
+	}
+
+	req := &setWatchesRequest{
+		RelativeZxid: c.lastZxid,
+		DataWatches:  make([]string, 0),
+		ExistWatches: make([]string, 0),
+		ChildWatches: make([]string, 0),
+	}
+	n := 0
+	for pathType, watchers := range c.watchers {
+		if len(watchers) == 0 {
+			continue
+		}
+		switch pathType.wType {
+		case watchTypeData:
+			req.DataWatches = append(req.DataWatches, pathType.path)
+		case watchTypeExist:
+			req.ExistWatches = append(req.ExistWatches, pathType.path)
+		case watchTypeChild:
+			req.ChildWatches = append(req.ChildWatches, pathType.path)
+		}
+		n++
+	}
+	if n == 0 {
+		return
+	}
+
+	go func() {
+		res := &setWatchesResponse{}
+		_, err := c.request(opSetWatches, req, res, nil)
+		if err != nil {
+			log.Printf("Failed to set previous watches: %s", err.Error())
+		}
+	}()
+}
+
+func (c *Conn) authenticate() error {
+	buf := make([]byte, 256)
+
+	// connect request
+
+	n, err := encodePacket(buf[4:], &connectRequest{
+		ProtocolVersion: protocolVersion,
+		LastZxidSeen:    c.lastZxid,
+		TimeOut:         c.timeout,
+		SessionID:       c.sessionID,
+		Passwd:          c.passwd,
+	})
+	if err != nil {
+		return err
+	}
+
+	binary.BigEndian.PutUint32(buf[:4], uint32(n))
+
+	c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10))
+	_, err = c.conn.Write(buf[:n+4])
+	c.conn.SetWriteDeadline(time.Time{})
+	if err != nil {
+		return err
+	}
+
+	c.sendSetWatches()
+
+	// connect response
+
+	// package length
+	c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10))
+	_, err = io.ReadFull(c.conn, buf[:4])
+	c.conn.SetReadDeadline(time.Time{})
+	if err != nil {
+		// Sometimes zookeeper just drops connection on invalid session data,
+		// we prefer to drop session and start from scratch when that event
+		// occurs instead of dropping into loop of connect/disconnect attempts
+		c.sessionID = 0
+		c.passwd = emptyPassword
+		c.lastZxid = 0
+		c.setState(StateExpired)
+		return ErrSessionExpired
+	}
+
+	blen := int(binary.BigEndian.Uint32(buf[:4]))
+	if cap(buf) < blen {
+		buf = make([]byte, blen)
+	}
+
+	_, err = io.ReadFull(c.conn, buf[:blen])
+	if err != nil {
+		return err
+	}
+
+	r := connectResponse{}
+	_, err = decodePacket(buf[:blen], &r)
+	if err != nil {
+		return err
+	}
+	if r.SessionID == 0 {
+		c.sessionID = 0
+		c.passwd = emptyPassword
+		c.lastZxid = 0
+		c.setState(StateExpired)
+		return ErrSessionExpired
+	}
+
+	if c.sessionID != r.SessionID {
+		atomic.StoreUint32(&c.xid, 0)
+	}
+	c.timeout = r.TimeOut
+	c.sessionID = r.SessionID
+	c.passwd = r.Passwd
+	c.setState(StateHasSession)
+
+	return nil
+}
+
+func (c *Conn) sendLoop(conn net.Conn, closeChan <-chan struct{}) error {
+	pingTicker := time.NewTicker(c.pingInterval)
+	defer pingTicker.Stop()
+
+	buf := make([]byte, bufferSize)
+	for {
+		select {
+		case req := <-c.sendChan:
+			header := &requestHeader{req.xid, req.opcode}
+			n, err := encodePacket(buf[4:], header)
+			if err != nil {
+				req.recvChan <- response{-1, err}
+				continue
+			}
+
+			n2, err := encodePacket(buf[4+n:], req.pkt)
+			if err != nil {
+				req.recvChan <- response{-1, err}
+				continue
+			}
+
+			n += n2
+
+			binary.BigEndian.PutUint32(buf[:4], uint32(n))
+
+			c.requestsLock.Lock()
+			select {
+			case <-closeChan:
+				req.recvChan <- response{-1, ErrConnectionClosed}
+				c.requestsLock.Unlock()
+				return ErrConnectionClosed
+			default:
+			}
+			c.requests[req.xid] = req
+			c.requestsLock.Unlock()
+
+			conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
+			_, err = conn.Write(buf[:n+4])
+			conn.SetWriteDeadline(time.Time{})
+			if err != nil {
+				req.recvChan <- response{-1, err}
+				conn.Close()
+				return err
+			}
+		case <-pingTicker.C:
+			n, err := encodePacket(buf[4:], &requestHeader{Xid: -2, Opcode: opPing})
+			if err != nil {
+				panic("zk: opPing should never fail to serialize")
+			}
+
+			binary.BigEndian.PutUint32(buf[:4], uint32(n))
+
+			conn.SetWriteDeadline(time.Now().Add(c.recvTimeout))
+			_, err = conn.Write(buf[:n+4])
+			conn.SetWriteDeadline(time.Time{})
+			if err != nil {
+				conn.Close()
+				return err
+			}
+		case <-closeChan:
+			return nil
+		}
+	}
+}
+
+func (c *Conn) recvLoop(conn net.Conn) error {
+	buf := make([]byte, bufferSize)
+	for {
+		// package length
+		conn.SetReadDeadline(time.Now().Add(c.recvTimeout))
+		_, err := io.ReadFull(conn, buf[:4])
+		if err != nil {
+			return err
+		}
+
+		blen := int(binary.BigEndian.Uint32(buf[:4]))
+		if cap(buf) < blen {
+			buf = make([]byte, blen)
+		}
+
+		_, err = io.ReadFull(conn, buf[:blen])
+		conn.SetReadDeadline(time.Time{})
+		if err != nil {
+			return err
+		}
+
+		res := responseHeader{}
+		_, err = decodePacket(buf[:16], &res)
+		if err != nil {
+			return err
+		}
+
+		if res.Xid == -1 {
+			res := &watcherEvent{}
+			_, err := decodePacket(buf[16:16+blen], res)
+			if err != nil {
+				return err
+			}
+			ev := Event{
+				Type:  res.Type,
+				State: res.State,
+				Path:  res.Path,
+				Err:   nil,
+			}
+			select {
+			case c.eventChan <- ev:
+			default:
+			}
+			wTypes := make([]watchType, 0, 2)
+			switch res.Type {
+			case EventNodeCreated:
+				wTypes = append(wTypes, watchTypeExist)
+			case EventNodeDeleted, EventNodeDataChanged:
+				wTypes = append(wTypes, watchTypeExist, watchTypeData, watchTypeChild)
+			case EventNodeChildrenChanged:
+				wTypes = append(wTypes, watchTypeChild)
+			}
+			c.watchersLock.Lock()
+			for _, t := range wTypes {
+				wpt := watchPathType{res.Path, t}
+				if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 {
+					for _, ch := range watchers {
+						ch <- ev
+						close(ch)
+					}
+					delete(c.watchers, wpt)
+				}
+			}
+			c.watchersLock.Unlock()
+		} else if res.Xid == -2 {
+			// Ping response. Ignore.
+		} else if res.Xid < 0 {
+			log.Printf("Xid < 0 (%d) but not ping or watcher event", res.Xid)
+		} else {
+			if res.Zxid > 0 {
+				c.lastZxid = res.Zxid
+			}
+
+			c.requestsLock.Lock()
+			req, ok := c.requests[res.Xid]
+			if ok {
+				delete(c.requests, res.Xid)
+			}
+			c.requestsLock.Unlock()
+
+			if !ok {
+				log.Printf("Response for unknown request with xid %d", res.Xid)
+			} else {
+				if res.Err != 0 {
+					err = res.Err.toError()
+				} else {
+					_, err = decodePacket(buf[16:16+blen], req.recvStruct)
+				}
+				if req.recvFunc != nil {
+					req.recvFunc(req, &res, err)
+				}
+				req.recvChan <- response{res.Zxid, err}
+				if req.opcode == opClose {
+					return io.EOF
+				}
+			}
+		}
+	}
+}
+
+func (c *Conn) nextXid() int32 {
+	return int32(atomic.AddUint32(&c.xid, 1) & 0x7fffffff)
+}
+
+func (c *Conn) addWatcher(path string, watchType watchType) <-chan Event {
+	c.watchersLock.Lock()
+	defer c.watchersLock.Unlock()
+
+	ch := make(chan Event, 1)
+	wpt := watchPathType{path, watchType}
+	c.watchers[wpt] = append(c.watchers[wpt], ch)
+	return ch
+}
+
+func (c *Conn) queueRequest(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) <-chan response {
+	rq := &request{
+		xid:        c.nextXid(),
+		opcode:     opcode,
+		pkt:        req,
+		recvStruct: res,
+		recvChan:   make(chan response, 1),
+		recvFunc:   recvFunc,
+	}
+	c.sendChan <- rq
+	return rq.recvChan
+}
+
+func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) (int64, error) {
+	r := <-c.queueRequest(opcode, req, res, recvFunc)
+	return r.zxid, r.err
+}
+
+func (c *Conn) AddAuth(scheme string, auth []byte) error {
+	_, err := c.request(opSetAuth, &setAuthRequest{Type: 0, Scheme: scheme, Auth: auth}, &setAuthResponse{}, nil)
+	return err
+}
+
+func (c *Conn) Children(path string) ([]string, *Stat, error) {
+	res := &getChildren2Response{}
+	_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil)
+	return res.Children, &res.Stat, err
+}
+
+func (c *Conn) ChildrenW(path string) ([]string, *Stat, <-chan Event, error) {
+	var ech <-chan Event
+	res := &getChildren2Response{}
+	_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
+		if err == nil {
+			ech = c.addWatcher(path, watchTypeChild)
+		}
+	})
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	return res.Children, &res.Stat, ech, err
+}
+
+func (c *Conn) Get(path string) ([]byte, *Stat, error) {
+	res := &getDataResponse{}
+	_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil)
+	return res.Data, &res.Stat, err
+}
+
+// GetW returns the contents of a znode and sets a watch
+func (c *Conn) GetW(path string) ([]byte, *Stat, <-chan Event, error) {
+	var ech <-chan Event
+	res := &getDataResponse{}
+	_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
+		if err == nil {
+			ech = c.addWatcher(path, watchTypeData)
+		}
+	})
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	return res.Data, &res.Stat, ech, err
+}
+
+func (c *Conn) Set(path string, data []byte, version int32) (*Stat, error) {
+	res := &setDataResponse{}
+	_, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil)
+	return &res.Stat, err
+}
+
+func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string, error) {
+	res := &createResponse{}
+	_, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil)
+	return res.Path, err
+}
+
+// CreateProtectedEphemeralSequential fixes a race condition if the server crashes
+// after it creates the node. On reconnect the session may still be valid so the
+// ephemeral node still exists. Therefore, on reconnect we need to check if a node
+// with a GUID generated on create exists.
+func (c *Conn) CreateProtectedEphemeralSequential(path string, data []byte, acl []ACL) (string, error) {
+	var guid [16]byte
+	_, err := io.ReadFull(rand.Reader, guid[:16])
+	if err != nil {
+		return "", err
+	}
+	guidStr := fmt.Sprintf("%x", guid)
+
+	parts := strings.Split(path, "/")
+	parts[len(parts)-1] = fmt.Sprintf("%s%s-%s", protectedPrefix, guidStr, parts[len(parts)-1])
+	rootPath := strings.Join(parts[:len(parts)-1], "/")
+	protectedPath := strings.Join(parts, "/")
+
+	var newPath string
+	for i := 0; i < 3; i++ {
+		newPath, err = c.Create(protectedPath, data, FlagEphemeral|FlagSequence, acl)
+		switch err {
+		case ErrSessionExpired:
+			// No need to search for the node since it can't exist. Just try again.
+		case ErrConnectionClosed:
+			children, _, err := c.Children(rootPath)
+			if err != nil {
+				return "", err
+			}
+			for _, p := range children {
+				parts := strings.Split(p, "/")
+				if pth := parts[len(parts)-1]; strings.HasPrefix(pth, protectedPrefix) {
+					if g := pth[len(protectedPrefix) : len(protectedPrefix)+32]; g == guidStr {
+						return rootPath + "/" + p, nil
+					}
+				}
+			}
+		case nil:
+			return newPath, nil
+		default:
+			return "", err
+		}
+	}
+	return "", err
+}
+
+func (c *Conn) Delete(path string, version int32) error {
+	_, err := c.request(opDelete, &DeleteRequest{path, version}, &deleteResponse{}, nil)
+	return err
+}
+
+func (c *Conn) Exists(path string) (bool, *Stat, error) {
+	res := &existsResponse{}
+	_, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil)
+	exists := true
+	if err == ErrNoNode {
+		exists = false
+		err = nil
+	}
+	return exists, &res.Stat, err
+}
+
+func (c *Conn) ExistsW(path string) (bool, *Stat, <-chan Event, error) {
+	var ech <-chan Event
+	res := &existsResponse{}
+	_, err := c.request(opExists, &existsRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
+		if err == nil {
+			ech = c.addWatcher(path, watchTypeData)
+		} else if err == ErrNoNode {
+			ech = c.addWatcher(path, watchTypeExist)
+		}
+	})
+	exists := true
+	if err == ErrNoNode {
+		exists = false
+		err = nil
+	}
+	if err != nil {
+		return false, nil, nil, err
+	}
+	return exists, &res.Stat, ech, err
+}
+
+func (c *Conn) GetACL(path string) ([]ACL, *Stat, error) {
+	res := &getAclResponse{}
+	_, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil)
+	return res.Acl, &res.Stat, err
+}
+
+func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) {
+	res := &setAclResponse{}
+	_, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil)
+	return &res.Stat, err
+}
+
+func (c *Conn) Sync(path string) (string, error) {
+	res := &syncResponse{}
+	_, err := c.request(opSync, &syncRequest{Path: path}, res, nil)
+	return res.Path, err
+}
+
+type MultiResponse struct {
+	Stat   *Stat
+	String string
+}
+
+// Multi executes multiple ZooKeeper operations or none of them. The provided
+// ops must be one of *CreateRequest, *DeleteRequest, *SetDataRequest, or
+// *CheckVersionRequest.
+func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) {
+	req := &multiRequest{
+		Ops:        make([]multiRequestOp, 0, len(ops)),
+		DoneHeader: multiHeader{Type: -1, Done: true, Err: -1},
+	}
+	for _, op := range ops {
+		var opCode int32
+		switch op.(type) {
+		case *CreateRequest:
+			opCode = opCreate
+		case *SetDataRequest:
+			opCode = opSetData
+		case *DeleteRequest:
+			opCode = opDelete
+		case *CheckVersionRequest:
+			opCode = opCheck
+		default:
+			return nil, fmt.Errorf("uknown operation type %T", op)
+		}
+		req.Ops = append(req.Ops, multiRequestOp{multiHeader{opCode, false, -1}, op})
+	}
+	res := &multiResponse{}
+	_, err := c.request(opMulti, req, res, nil)
+	mr := make([]MultiResponse, len(res.Ops))
+	for i, op := range res.Ops {
+		mr[i] = MultiResponse{Stat: op.Stat, String: op.String}
+	}
+	return mr, err
+}

+ 242 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants.go

@@ -0,0 +1,242 @@
+package zk
+
+import (
+	"errors"
+)
+
+const (
+	protocolVersion = 0
+
+	DefaultPort = 2181
+)
+
+const (
+	opNotify       = 0
+	opCreate       = 1
+	opDelete       = 2
+	opExists       = 3
+	opGetData      = 4
+	opSetData      = 5
+	opGetAcl       = 6
+	opSetAcl       = 7
+	opGetChildren  = 8
+	opSync         = 9
+	opPing         = 11
+	opGetChildren2 = 12
+	opCheck        = 13
+	opMulti        = 14
+	opClose        = -11
+	opSetAuth      = 100
+	opSetWatches   = 101
+	// Not in protocol, used internally
+	opWatcherEvent = -2
+)
+
+const (
+	EventNodeCreated         = EventType(1)
+	EventNodeDeleted         = EventType(2)
+	EventNodeDataChanged     = EventType(3)
+	EventNodeChildrenChanged = EventType(4)
+
+	EventSession     = EventType(-1)
+	EventNotWatching = EventType(-2)
+)
+
+var (
+	eventNames = map[EventType]string{
+		EventNodeCreated:         "EventNodeCreated",
+		EventNodeDeleted:         "EventNodeDeleted",
+		EventNodeDataChanged:     "EventNodeDataChanged",
+		EventNodeChildrenChanged: "EventNodeChildrenChanged",
+		EventSession:             "EventSession",
+		EventNotWatching:         "EventNotWatching",
+	}
+)
+
+const (
+	StateUnknown           = State(-1)
+	StateDisconnected      = State(0)
+	StateConnecting        = State(1)
+	StateSyncConnected     = State(3)
+	StateAuthFailed        = State(4)
+	StateConnectedReadOnly = State(5)
+	StateSaslAuthenticated = State(6)
+	StateExpired           = State(-112)
+	// StateAuthFailed        = State(-113)
+
+	StateConnected  = State(100)
+	StateHasSession = State(101)
+)
+
+const (
+	FlagEphemeral = 1
+	FlagSequence  = 2
+)
+
+var (
+	stateNames = map[State]string{
+		StateUnknown:           "StateUnknown",
+		StateDisconnected:      "StateDisconnected",
+		StateSyncConnected:     "StateSyncConnected",
+		StateConnectedReadOnly: "StateConnectedReadOnly",
+		StateSaslAuthenticated: "StateSaslAuthenticated",
+		StateExpired:           "StateExpired",
+		StateAuthFailed:        "StateAuthFailed",
+		StateConnecting:        "StateConnecting",
+		StateConnected:         "StateConnected",
+		StateHasSession:        "StateHasSession",
+	}
+)
+
+type State int32
+
+func (s State) String() string {
+	if name := stateNames[s]; name != "" {
+		return name
+	}
+	return "Unknown"
+}
+
+type ErrCode int32
+
+var (
+	ErrConnectionClosed        = errors.New("zk: connection closed")
+	ErrUnknown                 = errors.New("zk: unknown error")
+	ErrAPIError                = errors.New("zk: api error")
+	ErrNoNode                  = errors.New("zk: node does not exist")
+	ErrNoAuth                  = errors.New("zk: not authenticated")
+	ErrBadVersion              = errors.New("zk: version conflict")
+	ErrNoChildrenForEphemerals = errors.New("zk: ephemeral nodes may not have children")
+	ErrNodeExists              = errors.New("zk: node already exists")
+	ErrNotEmpty                = errors.New("zk: node has children")
+	ErrSessionExpired          = errors.New("zk: session has been expired by the server")
+	ErrInvalidACL              = errors.New("zk: invalid ACL specified")
+	ErrAuthFailed              = errors.New("zk: client authentication failed")
+	ErrClosing                 = errors.New("zk: zookeeper is closing")
+	ErrNothing                 = errors.New("zk: no server responsees to process")
+	ErrSessionMoved            = errors.New("zk: session moved to another server, so operation is ignored")
+
+	// ErrInvalidCallback         = errors.New("zk: invalid callback specified")
+	errCodeToError = map[ErrCode]error{
+		0:                          nil,
+		errAPIError:                ErrAPIError,
+		errNoNode:                  ErrNoNode,
+		errNoAuth:                  ErrNoAuth,
+		errBadVersion:              ErrBadVersion,
+		errNoChildrenForEphemerals: ErrNoChildrenForEphemerals,
+		errNodeExists:              ErrNodeExists,
+		errNotEmpty:                ErrNotEmpty,
+		errSessionExpired:          ErrSessionExpired,
+		// errInvalidCallback:         ErrInvalidCallback,
+		errInvalidAcl:   ErrInvalidACL,
+		errAuthFailed:   ErrAuthFailed,
+		errClosing:      ErrClosing,
+		errNothing:      ErrNothing,
+		errSessionMoved: ErrSessionMoved,
+	}
+)
+
+func (e ErrCode) toError() error {
+	if err, ok := errCodeToError[e]; ok {
+		return err
+	}
+	return ErrUnknown
+}
+
+const (
+	errOk = 0
+	// System and server-side errors
+	errSystemError          = -1
+	errRuntimeInconsistency = -2
+	errDataInconsistency    = -3
+	errConnectionLoss       = -4
+	errMarshallingError     = -5
+	errUnimplemented        = -6
+	errOperationTimeout     = -7
+	errBadArguments         = -8
+	errInvalidState         = -9
+	// API errors
+	errAPIError                = ErrCode(-100)
+	errNoNode                  = ErrCode(-101) // *
+	errNoAuth                  = ErrCode(-102)
+	errBadVersion              = ErrCode(-103) // *
+	errNoChildrenForEphemerals = ErrCode(-108)
+	errNodeExists              = ErrCode(-110) // *
+	errNotEmpty                = ErrCode(-111)
+	errSessionExpired          = ErrCode(-112)
+	errInvalidCallback         = ErrCode(-113)
+	errInvalidAcl              = ErrCode(-114)
+	errAuthFailed              = ErrCode(-115)
+	errClosing                 = ErrCode(-116)
+	errNothing                 = ErrCode(-117)
+	errSessionMoved            = ErrCode(-118)
+)
+
+// Constants for ACL permissions
+const (
+	PermRead = 1 << iota
+	PermWrite
+	PermCreate
+	PermDelete
+	PermAdmin
+	PermAll = 0x1f
+)
+
+var (
+	emptyPassword = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+	opNames       = map[int32]string{
+		opNotify:       "notify",
+		opCreate:       "create",
+		opDelete:       "delete",
+		opExists:       "exists",
+		opGetData:      "getData",
+		opSetData:      "setData",
+		opGetAcl:       "getACL",
+		opSetAcl:       "setACL",
+		opGetChildren:  "getChildren",
+		opSync:         "sync",
+		opPing:         "ping",
+		opGetChildren2: "getChildren2",
+		opCheck:        "check",
+		opMulti:        "multi",
+		opClose:        "close",
+		opSetAuth:      "setAuth",
+		opSetWatches:   "setWatches",
+
+		opWatcherEvent: "watcherEvent",
+	}
+)
+
+type EventType int32
+
+func (t EventType) String() string {
+	if name := eventNames[t]; name != "" {
+		return name
+	}
+	return "Unknown"
+}
+
+// Mode is used to build custom server modes (leader|follower|standalone).
+type Mode uint8
+
+func (m Mode) String() string {
+	if name := modeNames[m]; name != "" {
+		return name
+	}
+	return "unknown"
+}
+
+const (
+	ModeUnknown    Mode = iota
+	ModeLeader     Mode = iota
+	ModeFollower   Mode = iota
+	ModeStandalone Mode = iota
+)
+
+var (
+	modeNames = map[Mode]string{
+		ModeLeader:     "leader",
+		ModeFollower:   "follower",
+		ModeStandalone: "standalone",
+	}
+)

+ 24 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/constants_test.go

@@ -0,0 +1,24 @@
+package zk
+
+import (
+	"fmt"
+	"testing"
+)
+
+func TestModeString(t *testing.T) {
+	if fmt.Sprintf("%v", ModeUnknown) != "unknown" {
+		t.Errorf("unknown value should be 'unknown'")
+	}
+
+	if fmt.Sprintf("%v", ModeLeader) != "leader" {
+		t.Errorf("leader value should be 'leader'")
+	}
+
+	if fmt.Sprintf("%v", ModeFollower) != "follower" {
+		t.Errorf("follower value should be 'follower'")
+	}
+
+	if fmt.Sprintf("%v", ModeStandalone) != "standalone" {
+		t.Errorf("standlone value should be 'standalone'")
+	}
+}

+ 288 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw.go

@@ -0,0 +1,288 @@
+package zk
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"math/big"
+	"net"
+	"regexp"
+	"strconv"
+	"time"
+)
+
+// FLWSrvr is a FourLetterWord helper function. In particular, this function pulls the srvr output
+// from the zookeeper instances and parses the output. A slice of *ServerStats structs are returned
+// as well as a boolean value to indicate whether this function processed successfully.
+//
+// If the boolean value is false there was a problem. If the *ServerStats slice is empty or nil,
+// then the error happened before we started to obtain 'srvr' values. Otherwise, one of the
+// servers had an issue and the "Error" value in the struct should be inspected to determine
+// which server had the issue.
+func FLWSrvr(servers []string, timeout time.Duration) ([]*ServerStats, bool) {
+	// different parts of the regular expression that are required to parse the srvr output
+	var (
+		zrVer   = `^Zookeeper version: ([A-Za-z0-9\.\-]+), built on (\d\d/\d\d/\d\d\d\d \d\d:\d\d [A-Za-z0-9:\+\-]+)`
+		zrLat   = `^Latency min/avg/max: (\d+)/(\d+)/(\d+)`
+		zrNet   = `^Received: (\d+).*\n^Sent: (\d+).*\n^Connections: (\d+).*\n^Outstanding: (\d+)`
+		zrState = `^Zxid: (0x[A-Za-z0-9]+).*\n^Mode: (\w+).*\n^Node count: (\d+)`
+	)
+
+	// build the regex from the pieces above
+	re, err := regexp.Compile(fmt.Sprintf(`(?m:\A%v.*\n%v.*\n%v.*\n%v)`, zrVer, zrLat, zrNet, zrState))
+
+	if err != nil {
+		return nil, false
+	}
+
+	imOk := true
+	servers = FormatServers(servers)
+	ss := make([]*ServerStats, len(servers))
+
+	for i := range ss {
+		response, err := fourLetterWord(servers[i], "srvr", timeout)
+
+		if err != nil {
+			ss[i] = &ServerStats{Error: err}
+			imOk = false
+			continue
+		}
+
+		match := re.FindAllStringSubmatch(string(response), -1)[0][1:]
+
+		if match == nil {
+			err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)")
+			ss[i] = &ServerStats{Error: err}
+			imOk = false
+			continue
+		}
+
+		// determine current server
+		var srvrMode Mode
+		switch match[10] {
+		case "leader":
+			srvrMode = ModeLeader
+		case "follower":
+			srvrMode = ModeFollower
+		case "standalone":
+			srvrMode = ModeStandalone
+		default:
+			srvrMode = ModeUnknown
+		}
+
+		buildTime, err := time.Parse("01/02/2006 15:04 MST", match[1])
+
+		if err != nil {
+			ss[i] = &ServerStats{Error: err}
+			imOk = false
+			continue
+		}
+
+		parsedInt, err := strconv.ParseInt(match[9], 0, 64)
+
+		if err != nil {
+			ss[i] = &ServerStats{Error: err}
+			imOk = false
+			continue
+		}
+
+		// the ZxID value is an int64 with two int32s packed inside
+		// the high int32 is the epoch (i.e., number of leader elections)
+		// the low int32 is the counter
+		epoch := int32(parsedInt >> 32)
+		counter := int32(parsedInt & 0xFFFFFFFF)
+
+		// within the regex above, these values must be numerical
+		// so we can avoid useless checking of the error return value
+		minLatency, _ := strconv.ParseInt(match[2], 0, 64)
+		avgLatency, _ := strconv.ParseInt(match[3], 0, 64)
+		maxLatency, _ := strconv.ParseInt(match[4], 0, 64)
+		recv, _ := strconv.ParseInt(match[5], 0, 64)
+		sent, _ := strconv.ParseInt(match[6], 0, 64)
+		cons, _ := strconv.ParseInt(match[7], 0, 64)
+		outs, _ := strconv.ParseInt(match[8], 0, 64)
+		ncnt, _ := strconv.ParseInt(match[11], 0, 64)
+
+		ss[i] = &ServerStats{
+			Sent:        sent,
+			Received:    recv,
+			NodeCount:   ncnt,
+			MinLatency:  minLatency,
+			AvgLatency:  avgLatency,
+			MaxLatency:  maxLatency,
+			Connections: cons,
+			Outstanding: outs,
+			Epoch:       epoch,
+			Counter:     counter,
+			BuildTime:   buildTime,
+			Mode:        srvrMode,
+			Version:     match[0],
+		}
+	}
+
+	return ss, imOk
+}
+
+// FLWRuok is a FourLetterWord helper function. In particular, this function
+// pulls the ruok output from each server.
+func FLWRuok(servers []string, timeout time.Duration) []bool {
+	servers = FormatServers(servers)
+	oks := make([]bool, len(servers))
+
+	for i := range oks {
+		response, err := fourLetterWord(servers[i], "ruok", timeout)
+
+		if err != nil {
+			continue
+		}
+
+		if bytes.Equal(response[:4], []byte("imok")) {
+			oks[i] = true
+		}
+	}
+	return oks
+}
+
+// FLWCons is a FourLetterWord helper function. In particular, this function
+// pulls the ruok output from each server.
+//
+// As with FLWSrvr, the boolean value indicates whether one of the requests had
+// an issue. The Clients struct has an Error value that can be checked.
+func FLWCons(servers []string, timeout time.Duration) ([]*ServerClients, bool) {
+	var (
+		zrAddr = `^ /((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):(?:\d+))\[\d+\]`
+		zrPac  = `\(queued=(\d+),recved=(\d+),sent=(\d+),sid=(0x[A-Za-z0-9]+),lop=(\w+),est=(\d+),to=(\d+),`
+		zrSesh = `lcxid=(0x[A-Za-z0-9]+),lzxid=(0x[A-Za-z0-9]+),lresp=(\d+),llat=(\d+),minlat=(\d+),avglat=(\d+),maxlat=(\d+)\)`
+	)
+
+	re, err := regexp.Compile(fmt.Sprintf("%v%v%v", zrAddr, zrPac, zrSesh))
+
+	if err != nil {
+		return nil, false
+	}
+
+	servers = FormatServers(servers)
+	sc := make([]*ServerClients, len(servers))
+	imOk := true
+
+	for i := range sc {
+		response, err := fourLetterWord(servers[i], "cons", timeout)
+
+		if err != nil {
+			sc[i] = &ServerClients{Error: err}
+			imOk = false
+			continue
+		}
+
+		scan := bufio.NewScanner(bytes.NewReader(response))
+
+		var clients []*ServerClient
+
+		for scan.Scan() {
+			line := scan.Bytes()
+
+			if len(line) == 0 {
+				continue
+			}
+
+			m := re.FindAllStringSubmatch(string(line), -1)
+
+			if m == nil {
+				err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)")
+				sc[i] = &ServerClients{Error: err}
+				imOk = false
+				continue
+			}
+
+			match := m[0][1:]
+
+			queued, _ := strconv.ParseInt(match[1], 0, 64)
+			recvd, _ := strconv.ParseInt(match[2], 0, 64)
+			sent, _ := strconv.ParseInt(match[3], 0, 64)
+			sid, _ := strconv.ParseInt(match[4], 0, 64)
+			est, _ := strconv.ParseInt(match[6], 0, 64)
+			timeout, _ := strconv.ParseInt(match[7], 0, 32)
+			lresp, _ := strconv.ParseInt(match[10], 0, 64)
+			llat, _ := strconv.ParseInt(match[11], 0, 32)
+			minlat, _ := strconv.ParseInt(match[12], 0, 32)
+			avglat, _ := strconv.ParseInt(match[13], 0, 32)
+			maxlat, _ := strconv.ParseInt(match[14], 0, 32)
+
+			// zookeeper returns a value, '0xffffffffffffffff', as the
+			// Lzxid for PING requests in the 'cons' output.
+			// unfortunately, in Go that is an invalid int64 and is not represented
+			// as -1.
+			// However, converting the string value to a big.Int and then back to
+			// and int64 properly sets the value to -1
+			lzxid, ok := new(big.Int).SetString(match[9], 0)
+
+			var errVal error
+
+			if !ok {
+				errVal = fmt.Errorf("failed to convert lzxid value to big.Int")
+				imOk = false
+			}
+
+			lcxid, ok := new(big.Int).SetString(match[8], 0)
+
+			if !ok && errVal == nil {
+				errVal = fmt.Errorf("failed to convert lcxid value to big.Int")
+				imOk = false
+			}
+
+			clients = append(clients, &ServerClient{
+				Queued:        queued,
+				Received:      recvd,
+				Sent:          sent,
+				SessionID:     sid,
+				Lcxid:         lcxid.Int64(),
+				Lzxid:         lzxid.Int64(),
+				Timeout:       int32(timeout),
+				LastLatency:   int32(llat),
+				MinLatency:    int32(minlat),
+				AvgLatency:    int32(avglat),
+				MaxLatency:    int32(maxlat),
+				Established:   time.Unix(est, 0),
+				LastResponse:  time.Unix(lresp, 0),
+				Addr:          match[0],
+				LastOperation: match[5],
+				Error:         errVal,
+			})
+		}
+
+		sc[i] = &ServerClients{Clients: clients}
+	}
+
+	return sc, imOk
+}
+
+func fourLetterWord(server, command string, timeout time.Duration) ([]byte, error) {
+	conn, err := net.DialTimeout("tcp", server, timeout)
+
+	if err != nil {
+		return nil, err
+	}
+
+	// the zookeeper server should automatically close this socket
+	// once the command has been processed, but better safe than sorry
+	defer conn.Close()
+
+	conn.SetWriteDeadline(time.Now().Add(timeout))
+
+	_, err = conn.Write([]byte(command))
+
+	if err != nil {
+		return nil, err
+	}
+
+	conn.SetReadDeadline(time.Now().Add(timeout))
+
+	resp, err := ioutil.ReadAll(conn)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return resp, nil
+}

+ 367 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/flw_test.go

@@ -0,0 +1,367 @@
+package zk
+
+import (
+	"net"
+	"testing"
+	"time"
+)
+
+var (
+	zkSrvrOut = `Zookeeper version: 3.4.6-1569965, built on 02/20/2014 09:09 GMT
+Latency min/avg/max: 0/1/10
+Received: 4207
+Sent: 4220
+Connections: 81
+Outstanding: 1
+Zxid: 0x110a7a8f37
+Mode: leader
+Node count: 306
+`
+	zkConsOut = ` /10.42.45.231:45361[1](queued=0,recved=9435,sent=9457,sid=0x94c2989e04716b5,lop=PING,est=1427238717217,to=20001,lcxid=0x55120915,lzxid=0xffffffffffffffff,lresp=1427259255908,llat=0,minlat=0,avglat=1,maxlat=17)
+ /10.55.33.98:34342[1](queued=0,recved=9338,sent=9350,sid=0x94c2989e0471731,lop=PING,est=1427238849319,to=20001,lcxid=0x55120944,lzxid=0xffffffffffffffff,lresp=1427259252294,llat=0,minlat=0,avglat=1,maxlat=18)
+ /10.44.145.114:46556[1](queued=0,recved=109253,sent=109617,sid=0x94c2989e0471709,lop=DELE,est=1427238791305,to=20001,lcxid=0x55139618,lzxid=0x110a7b187d,lresp=1427259257423,llat=2,minlat=0,avglat=1,maxlat=23)
+
+`
+)
+
+func TestFLWRuok(t *testing.T) {
+	l, err := net.Listen("tcp", "127.0.0.1:2181")
+
+	if err != nil {
+		t.Fatalf(err.Error())
+	}
+
+	go tcpServer(l, "")
+
+	var oks []bool
+	var ok bool
+
+	oks = FLWRuok([]string{"127.0.0.1"}, time.Second*10)
+
+	// close the connection, and pause shortly
+	// to cheat around a race condition
+	l.Close()
+	time.Sleep(time.Millisecond * 1)
+
+	if len(oks) == 0 {
+		t.Errorf("no values returned")
+	}
+
+	ok = oks[0]
+
+	if !ok {
+		t.Errorf("instance should be marked as OK")
+	}
+
+	//
+	// Confirm that it also returns false for dead instances
+	//
+	l, err = net.Listen("tcp", "127.0.0.1:2181")
+
+	if err != nil {
+		t.Fatalf(err.Error())
+	}
+
+	defer l.Close()
+
+	go tcpServer(l, "dead")
+
+	oks = FLWRuok([]string{"127.0.0.1"}, time.Second*10)
+
+	if len(oks) == 0 {
+		t.Errorf("no values returned")
+	}
+
+	ok = oks[0]
+
+	if ok {
+		t.Errorf("instance should be marked as not OK")
+	}
+}
+
+func TestFLWSrvr(t *testing.T) {
+	l, err := net.Listen("tcp", "127.0.0.1:2181")
+
+	if err != nil {
+		t.Fatalf(err.Error())
+	}
+
+	defer l.Close()
+
+	go tcpServer(l, "")
+
+	var statsSlice []*ServerStats
+	var stats *ServerStats
+	var ok bool
+
+	statsSlice, ok = FLWSrvr([]string{"127.0.0.1:2181"}, time.Second*10)
+
+	if !ok {
+		t.Errorf("failure indicated on 'srvr' parsing")
+	}
+
+	if len(statsSlice) == 0 {
+		t.Errorf("no *ServerStats instances returned")
+	}
+
+	stats = statsSlice[0]
+
+	if stats.Error != nil {
+		t.Fatalf("error seen in stats: %v", err.Error())
+	}
+
+	if stats.Sent != 4220 {
+		t.Errorf("Sent != 4220")
+	}
+
+	if stats.Received != 4207 {
+		t.Errorf("Received != 4207")
+	}
+
+	if stats.NodeCount != 306 {
+		t.Errorf("NodeCount != 306")
+	}
+
+	if stats.MinLatency != 0 {
+		t.Errorf("MinLatency != 0")
+	}
+
+	if stats.AvgLatency != 1 {
+		t.Errorf("AvgLatency != 1")
+	}
+
+	if stats.MaxLatency != 10 {
+		t.Errorf("MaxLatency != 10")
+	}
+
+	if stats.Connections != 81 {
+		t.Errorf("Connection != 81")
+	}
+
+	if stats.Outstanding != 1 {
+		t.Errorf("Outstanding != 1")
+	}
+
+	if stats.Epoch != 17 {
+		t.Errorf("Epoch != 17")
+	}
+
+	if stats.Counter != 175804215 {
+		t.Errorf("Counter != 175804215")
+	}
+
+	if stats.Mode != ModeLeader {
+		t.Errorf("Mode != ModeLeader")
+	}
+
+	if stats.Version != "3.4.6-1569965" {
+		t.Errorf("Version expected: 3.4.6-1569965")
+	}
+
+	buildTime, err := time.Parse("01/02/2006 15:04 MST", "02/20/2014 09:09 GMT")
+
+	if !stats.BuildTime.Equal(buildTime) {
+
+	}
+}
+
+func TestFLWCons(t *testing.T) {
+	l, err := net.Listen("tcp", "127.0.0.1:2181")
+
+	if err != nil {
+		t.Fatalf(err.Error())
+	}
+
+	defer l.Close()
+
+	go tcpServer(l, "")
+
+	var clients []*ServerClients
+	var ok bool
+
+	clients, ok = FLWCons([]string{"127.0.0.1"}, time.Second*10)
+
+	if !ok {
+		t.Errorf("failure indicated on 'cons' parsing")
+	}
+
+	if len(clients) == 0 {
+		t.Errorf("no *ServerClients instances returned")
+	}
+
+	results := []*ServerClient{
+		&ServerClient{
+			Queued:        0,
+			Received:      9435,
+			Sent:          9457,
+			SessionID:     669956116721374901,
+			LastOperation: "PING",
+			Established:   time.Unix(1427238717217, 0),
+			Timeout:       20001,
+			Lcxid:         1427245333,
+			Lzxid:         -1,
+			LastResponse:  time.Unix(1427259255908, 0),
+			LastLatency:   0,
+			MinLatency:    0,
+			AvgLatency:    1,
+			MaxLatency:    17,
+			Addr:          "10.42.45.231:45361",
+		},
+		&ServerClient{
+			Queued:        0,
+			Received:      9338,
+			Sent:          9350,
+			SessionID:     669956116721375025,
+			LastOperation: "PING",
+			Established:   time.Unix(1427238849319, 0),
+			Timeout:       20001,
+			Lcxid:         1427245380,
+			Lzxid:         -1,
+			LastResponse:  time.Unix(1427259252294, 0),
+			LastLatency:   0,
+			MinLatency:    0,
+			AvgLatency:    1,
+			MaxLatency:    18,
+			Addr:          "10.55.33.98:34342",
+		},
+		&ServerClient{
+			Queued:        0,
+			Received:      109253,
+			Sent:          109617,
+			SessionID:     669956116721374985,
+			LastOperation: "DELE",
+			Established:   time.Unix(1427238791305, 0),
+			Timeout:       20001,
+			Lcxid:         1427346968,
+			Lzxid:         73190283389,
+			LastResponse:  time.Unix(1427259257423, 0),
+			LastLatency:   2,
+			MinLatency:    0,
+			AvgLatency:    1,
+			MaxLatency:    23,
+			Addr:          "10.44.145.114:46556",
+		},
+	}
+
+	for _, z := range clients {
+		if z.Error != nil {
+			t.Errorf("error seen: %v", err.Error())
+		}
+
+		for i, v := range z.Clients {
+			c := results[i]
+
+			if v.Error != nil {
+				t.Errorf("client error seen: %v", err.Error())
+			}
+
+			if v.Queued != c.Queued {
+				t.Errorf("Queued value mismatch (%d/%d)", v.Queued, c.Queued)
+			}
+
+			if v.Received != c.Received {
+				t.Errorf("Received value mismatch (%d/%d)", v.Received, c.Received)
+			}
+
+			if v.Sent != c.Sent {
+				t.Errorf("Sent value mismatch (%d/%d)", v.Sent, c.Sent)
+			}
+
+			if v.SessionID != c.SessionID {
+				t.Errorf("SessionID value mismatch (%d/%d)", v.SessionID, c.SessionID)
+			}
+
+			if v.LastOperation != c.LastOperation {
+				t.Errorf("LastOperation value mismatch ('%v'/'%v')", v.LastOperation, c.LastOperation)
+			}
+
+			if v.Timeout != c.Timeout {
+				t.Errorf("Timeout value mismatch (%d/%d)", v.Timeout, c.Timeout)
+			}
+
+			if v.Lcxid != c.Lcxid {
+				t.Errorf("Lcxid value mismatch (%d/%d)", v.Lcxid, c.Lcxid)
+			}
+
+			if v.Lzxid != c.Lzxid {
+				t.Errorf("Lzxid value mismatch (%d/%d)", v.Lzxid, c.Lzxid)
+			}
+
+			if v.LastLatency != c.LastLatency {
+				t.Errorf("LastLatency value mismatch (%d/%d)", v.LastLatency, c.LastLatency)
+			}
+
+			if v.MinLatency != c.MinLatency {
+				t.Errorf("MinLatency value mismatch (%d/%d)", v.MinLatency, c.MinLatency)
+			}
+
+			if v.AvgLatency != c.AvgLatency {
+				t.Errorf("AvgLatency value mismatch (%d/%d)", v.AvgLatency, c.AvgLatency)
+			}
+
+			if v.MaxLatency != c.MaxLatency {
+				t.Errorf("MaxLatency value mismatch (%d/%d)", v.MaxLatency, c.MaxLatency)
+			}
+
+			if v.Addr != c.Addr {
+				t.Errorf("Addr value mismatch ('%v'/'%v')", v.Addr, c.Addr)
+			}
+
+			if !c.Established.Equal(v.Established) {
+				t.Errorf("Established value mismatch (%v/%v)", c.Established, v.Established)
+			}
+
+			if !c.LastResponse.Equal(v.LastResponse) {
+				t.Errorf("Established value mismatch (%v/%v)", c.LastResponse, v.LastResponse)
+			}
+		}
+	}
+}
+
+func tcpServer(listener net.Listener, thing string) {
+	for {
+		conn, err := listener.Accept()
+		if err != nil {
+			return
+		}
+		go connHandler(conn, thing)
+	}
+}
+
+func connHandler(conn net.Conn, thing string) {
+	defer conn.Close()
+
+	data := make([]byte, 4)
+
+	_, err := conn.Read(data)
+
+	if err != nil {
+		return
+	}
+
+	switch string(data) {
+	case "ruok":
+		switch thing {
+		case "dead":
+			return
+		default:
+			conn.Write([]byte("imok"))
+		}
+	case "srvr":
+		switch thing {
+		case "dead":
+			return
+		default:
+			conn.Write([]byte(zkSrvrOut))
+		}
+	case "cons":
+		switch thing {
+		case "dead":
+			return
+		default:
+			conn.Write([]byte(zkConsOut))
+		}
+	default:
+		conn.Write([]byte("This ZooKeeper instance is not currently serving requests."))
+	}
+}

+ 131 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock.go

@@ -0,0 +1,131 @@
+package zk
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+var (
+	ErrDeadlock  = errors.New("zk: trying to acquire a lock twice")
+	ErrNotLocked = errors.New("zk: not locked")
+)
+
+type Lock struct {
+	c        *Conn
+	path     string
+	acl      []ACL
+	lockPath string
+	seq      int
+}
+
+func NewLock(c *Conn, path string, acl []ACL) *Lock {
+	return &Lock{
+		c:    c,
+		path: path,
+		acl:  acl,
+	}
+}
+
+func parseSeq(path string) (int, error) {
+	parts := strings.Split(path, "-")
+	return strconv.Atoi(parts[len(parts)-1])
+}
+
+func (l *Lock) Lock() error {
+	if l.lockPath != "" {
+		return ErrDeadlock
+	}
+
+	prefix := fmt.Sprintf("%s/lock-", l.path)
+
+	path := ""
+	var err error
+	for i := 0; i < 3; i++ {
+		path, err = l.c.CreateProtectedEphemeralSequential(prefix, []byte{}, l.acl)
+		if err == ErrNoNode {
+			// Create parent node.
+			parts := strings.Split(l.path, "/")
+			pth := ""
+			for _, p := range parts[1:] {
+				pth += "/" + p
+				_, err := l.c.Create(pth, []byte{}, 0, l.acl)
+				if err != nil && err != ErrNodeExists {
+					return err
+				}
+			}
+		} else if err == nil {
+			break
+		} else {
+			return err
+		}
+	}
+	if err != nil {
+		return err
+	}
+
+	seq, err := parseSeq(path)
+	if err != nil {
+		return err
+	}
+
+	for {
+		children, _, err := l.c.Children(l.path)
+		if err != nil {
+			return err
+		}
+
+		lowestSeq := seq
+		prevSeq := 0
+		prevSeqPath := ""
+		for _, p := range children {
+			s, err := parseSeq(p)
+			if err != nil {
+				return err
+			}
+			if s < lowestSeq {
+				lowestSeq = s
+			}
+			if s < seq && s > prevSeq {
+				prevSeq = s
+				prevSeqPath = p
+			}
+		}
+
+		if seq == lowestSeq {
+			// Acquired the lock
+			break
+		}
+
+		// Wait on the node next in line for the lock
+		_, _, ch, err := l.c.GetW(l.path + "/" + prevSeqPath)
+		if err != nil && err != ErrNoNode {
+			return err
+		} else if err != nil && err == ErrNoNode {
+			// try again
+			continue
+		}
+
+		ev := <-ch
+		if ev.Err != nil {
+			return ev.Err
+		}
+	}
+
+	l.seq = seq
+	l.lockPath = path
+	return nil
+}
+
+func (l *Lock) Unlock() error {
+	if l.lockPath == "" {
+		return ErrNotLocked
+	}
+	if err := l.c.Delete(l.lockPath, -1); err != nil {
+		return err
+	}
+	l.lockPath = ""
+	l.seq = 0
+	return nil
+}

+ 94 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/lock_test.go

@@ -0,0 +1,94 @@
+package zk
+
+import (
+	"testing"
+	"time"
+)
+
+func TestLock(t *testing.T) {
+	ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk, _, err := ts.ConnectAll()
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk.Close()
+
+	acls := WorldACL(PermAll)
+
+	l := NewLock(zk, "/test", acls)
+	if err := l.Lock(); err != nil {
+		t.Fatal(err)
+	}
+	if err := l.Unlock(); err != nil {
+		t.Fatal(err)
+	}
+
+	val := make(chan int, 3)
+
+	if err := l.Lock(); err != nil {
+		t.Fatal(err)
+	}
+
+	l2 := NewLock(zk, "/test", acls)
+	go func() {
+		if err := l2.Lock(); err != nil {
+			t.Fatal(err)
+		}
+		val <- 2
+		if err := l2.Unlock(); err != nil {
+			t.Fatal(err)
+		}
+		val <- 3
+	}()
+	time.Sleep(time.Millisecond * 100)
+
+	val <- 1
+	if err := l.Unlock(); err != nil {
+		t.Fatal(err)
+	}
+	if x := <-val; x != 1 {
+		t.Fatalf("Expected 1 instead of %d", x)
+	}
+	if x := <-val; x != 2 {
+		t.Fatalf("Expected 2 instead of %d", x)
+	}
+	if x := <-val; x != 3 {
+		t.Fatalf("Expected 3 instead of %d", x)
+	}
+}
+
+// This tests creating a lock with a path that's more than 1 node deep (e.g. "/test-multi-level/lock"),
+// when a part of that path already exists (i.e. "/test-multi-level" node already exists).
+func TestMultiLevelLock(t *testing.T) {
+	ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk, _, err := ts.ConnectAll()
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk.Close()
+
+	acls := WorldACL(PermAll)
+	path := "/test-multi-level"
+	if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
+		t.Fatalf("Create returned error: %+v", err)
+	} else if p != path {
+		t.Fatalf("Create returned different path '%s' != '%s'", p, path)
+	}
+	l := NewLock(zk, "/test-multi-level/lock", acls)
+	defer zk.Delete("/test-multi-level", -1) // Clean up what we've created for this test
+	defer zk.Delete("/test-multi-level/lock", -1)
+	if err := l.Lock(); err != nil {
+		t.Fatal(err)
+	}
+	if err := l.Unlock(); err != nil {
+		t.Fatal(err)
+	}
+}

+ 119 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/server_help.go

@@ -0,0 +1,119 @@
+package zk
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"os"
+	"path/filepath"
+	"time"
+)
+
+type TestServer struct {
+	Port int
+	Path string
+	Srv  *Server
+}
+
+type TestCluster struct {
+	Path    string
+	Servers []TestServer
+}
+
+func StartTestCluster(size int, stdout, stderr io.Writer) (*TestCluster, error) {
+	tmpPath, err := ioutil.TempDir("", "gozk")
+	if err != nil {
+		return nil, err
+	}
+	success := false
+	startPort := int(rand.Int31n(6000) + 10000)
+	cluster := &TestCluster{Path: tmpPath}
+	defer func() {
+		if !success {
+			cluster.Stop()
+		}
+	}()
+	for serverN := 0; serverN < size; serverN++ {
+		srvPath := filepath.Join(tmpPath, fmt.Sprintf("srv%d", serverN))
+		if err := os.Mkdir(srvPath, 0700); err != nil {
+			return nil, err
+		}
+		port := startPort + serverN*3
+		cfg := ServerConfig{
+			ClientPort: port,
+			DataDir:    srvPath,
+		}
+		for i := 0; i < size; i++ {
+			cfg.Servers = append(cfg.Servers, ServerConfigServer{
+				ID:                 i + 1,
+				Host:               "127.0.0.1",
+				PeerPort:           startPort + i*3 + 1,
+				LeaderElectionPort: startPort + i*3 + 2,
+			})
+		}
+		cfgPath := filepath.Join(srvPath, "zoo.cfg")
+		fi, err := os.Create(cfgPath)
+		if err != nil {
+			return nil, err
+		}
+		err = cfg.Marshall(fi)
+		fi.Close()
+		if err != nil {
+			return nil, err
+		}
+
+		fi, err = os.Create(filepath.Join(srvPath, "myid"))
+		if err != nil {
+			return nil, err
+		}
+		_, err = fmt.Fprintf(fi, "%d\n", serverN+1)
+		fi.Close()
+		if err != nil {
+			return nil, err
+		}
+
+		srv := &Server{
+			ConfigPath: cfgPath,
+			Stdout:     stdout,
+			Stderr:     stderr,
+		}
+		if err := srv.Start(); err != nil {
+			return nil, err
+		}
+		cluster.Servers = append(cluster.Servers, TestServer{
+			Path: srvPath,
+			Port: cfg.ClientPort,
+			Srv:  srv,
+		})
+	}
+	success = true
+	time.Sleep(time.Second) // Give the server time to become active. Should probably actually attempt to connect to verify.
+	return cluster, nil
+}
+
+func (ts *TestCluster) Connect(idx int) (*Conn, error) {
+	zk, _, err := Connect([]string{fmt.Sprintf("127.0.0.1:%d", ts.Servers[idx].Port)}, time.Second*15)
+	return zk, err
+}
+
+func (ts *TestCluster) ConnectAll() (*Conn, <-chan Event, error) {
+	return ts.ConnectAllTimeout(time.Second * 15)
+}
+
+func (ts *TestCluster) ConnectAllTimeout(sessionTimeout time.Duration) (*Conn, <-chan Event, error) {
+	hosts := make([]string, len(ts.Servers))
+	for i, srv := range ts.Servers {
+		hosts[i] = fmt.Sprintf("127.0.0.1:%d", srv.Port)
+	}
+	zk, ch, err := Connect(hosts, sessionTimeout)
+	return zk, ch, err
+}
+
+func (ts *TestCluster) Stop() error {
+	for _, srv := range ts.Servers {
+		srv.Srv.Stop()
+	}
+	defer os.RemoveAll(ts.Path)
+	return nil
+}

+ 136 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/server_java.go

@@ -0,0 +1,136 @@
+package zk
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"os/exec"
+	"path/filepath"
+)
+
+type ErrMissingServerConfigField string
+
+func (e ErrMissingServerConfigField) Error() string {
+	return fmt.Sprintf("zk: missing server config field '%s'", string(e))
+}
+
+const (
+	DefaultServerTickTime                 = 2000
+	DefaultServerInitLimit                = 10
+	DefaultServerSyncLimit                = 5
+	DefaultServerAutoPurgeSnapRetainCount = 3
+	DefaultPeerPort                       = 2888
+	DefaultLeaderElectionPort             = 3888
+)
+
+type ServerConfigServer struct {
+	ID                 int
+	Host               string
+	PeerPort           int
+	LeaderElectionPort int
+}
+
+type ServerConfig struct {
+	TickTime                 int    // Number of milliseconds of each tick
+	InitLimit                int    // Number of ticks that the initial synchronization phase can take
+	SyncLimit                int    // Number of ticks that can pass between sending a request and getting an acknowledgement
+	DataDir                  string // Direcrory where the snapshot is stored
+	ClientPort               int    // Port at which clients will connect
+	AutoPurgeSnapRetainCount int    // Number of snapshots to retain in dataDir
+	AutoPurgePurgeInterval   int    // Purge task internal in hours (0 to disable auto purge)
+	Servers                  []ServerConfigServer
+}
+
+func (sc ServerConfig) Marshall(w io.Writer) error {
+	if sc.DataDir == "" {
+		return ErrMissingServerConfigField("dataDir")
+	}
+	fmt.Fprintf(w, "dataDir=%s\n", sc.DataDir)
+	if sc.TickTime <= 0 {
+		sc.TickTime = DefaultServerTickTime
+	}
+	fmt.Fprintf(w, "tickTime=%d\n", sc.TickTime)
+	if sc.InitLimit <= 0 {
+		sc.InitLimit = DefaultServerInitLimit
+	}
+	fmt.Fprintf(w, "initLimit=%d\n", sc.InitLimit)
+	if sc.SyncLimit <= 0 {
+		sc.SyncLimit = DefaultServerSyncLimit
+	}
+	fmt.Fprintf(w, "syncLimit=%d\n", sc.SyncLimit)
+	if sc.ClientPort <= 0 {
+		sc.ClientPort = DefaultPort
+	}
+	fmt.Fprintf(w, "clientPort=%d\n", sc.ClientPort)
+	if sc.AutoPurgePurgeInterval > 0 {
+		if sc.AutoPurgeSnapRetainCount <= 0 {
+			sc.AutoPurgeSnapRetainCount = DefaultServerAutoPurgeSnapRetainCount
+		}
+		fmt.Fprintf(w, "autopurge.snapRetainCount=%d\n", sc.AutoPurgeSnapRetainCount)
+		fmt.Fprintf(w, "autopurge.purgeInterval=%d\n", sc.AutoPurgePurgeInterval)
+	}
+	if len(sc.Servers) > 0 {
+		for _, srv := range sc.Servers {
+			if srv.PeerPort <= 0 {
+				srv.PeerPort = DefaultPeerPort
+			}
+			if srv.LeaderElectionPort <= 0 {
+				srv.LeaderElectionPort = DefaultLeaderElectionPort
+			}
+			fmt.Fprintf(w, "server.%d=%s:%d:%d\n", srv.ID, srv.Host, srv.PeerPort, srv.LeaderElectionPort)
+		}
+	}
+	return nil
+}
+
+var jarSearchPaths = []string{
+	"zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
+	"../zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
+	"/usr/share/java/zookeeper-*.jar",
+	"/usr/local/zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
+	"/usr/local/Cellar/zookeeper/*/libexec/contrib/fatjar/zookeeper-*-fatjar.jar",
+}
+
+func findZookeeperFatJar() string {
+	var paths []string
+	zkPath := os.Getenv("ZOOKEEPER_PATH")
+	if zkPath == "" {
+		paths = jarSearchPaths
+	} else {
+		paths = []string{filepath.Join(zkPath, "contrib/fatjar/zookeeper-*-fatjar.jar")}
+	}
+	for _, path := range paths {
+		matches, _ := filepath.Glob(path)
+		// TODO: could sort by version and pick latest
+		if len(matches) > 0 {
+			return matches[0]
+		}
+	}
+	return ""
+}
+
+type Server struct {
+	JarPath        string
+	ConfigPath     string
+	Stdout, Stderr io.Writer
+
+	cmd *exec.Cmd
+}
+
+func (srv *Server) Start() error {
+	if srv.JarPath == "" {
+		srv.JarPath = findZookeeperFatJar()
+		if srv.JarPath == "" {
+			return fmt.Errorf("zk: unable to find server jar")
+		}
+	}
+	srv.cmd = exec.Command("java", "-jar", srv.JarPath, "server", srv.ConfigPath)
+	srv.cmd.Stdout = srv.Stdout
+	srv.cmd.Stderr = srv.Stderr
+	return srv.cmd.Start()
+}
+
+func (srv *Server) Stop() error {
+	srv.cmd.Process.Signal(os.Kill)
+	return srv.cmd.Wait()
+}

+ 633 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs.go

@@ -0,0 +1,633 @@
+package zk
+
+import (
+	"encoding/binary"
+	"errors"
+	"reflect"
+	"runtime"
+	"time"
+)
+
+var (
+	ErrUnhandledFieldType = errors.New("zk: unhandled field type")
+	ErrPtrExpected        = errors.New("zk: encode/decode expect a non-nil pointer to struct")
+	ErrShortBuffer        = errors.New("zk: buffer too small")
+)
+
+type ACL struct {
+	Perms  int32
+	Scheme string
+	ID     string
+}
+
+type Stat struct {
+	Czxid          int64 // The zxid of the change that caused this znode to be created.
+	Mzxid          int64 // The zxid of the change that last modified this znode.
+	Ctime          int64 // The time in milliseconds from epoch when this znode was created.
+	Mtime          int64 // The time in milliseconds from epoch when this znode was last modified.
+	Version        int32 // The number of changes to the data of this znode.
+	Cversion       int32 // The number of changes to the children of this znode.
+	Aversion       int32 // The number of changes to the ACL of this znode.
+	EphemeralOwner int64 // The session id of the owner of this znode if the znode is an ephemeral node. If it is not an ephemeral node, it will be zero.
+	DataLength     int32 // The length of the data field of this znode.
+	NumChildren    int32 // The number of children of this znode.
+	Pzxid          int64 // last modified children
+}
+
+// ServerClient is the information for a single Zookeeper client and its session.
+// This is used to parse/extract the output fo the `cons` command.
+type ServerClient struct {
+	Queued        int64
+	Received      int64
+	Sent          int64
+	SessionID     int64
+	Lcxid         int64
+	Lzxid         int64
+	Timeout       int32
+	LastLatency   int32
+	MinLatency    int32
+	AvgLatency    int32
+	MaxLatency    int32
+	Established   time.Time
+	LastResponse  time.Time
+	Addr          string
+	LastOperation string // maybe?
+	Error         error
+}
+
+// ServerClients is a struct for the FLWCons() function. It's used to provide
+// the list of Clients.
+//
+// This is needed because FLWCons() takes multiple servers.
+type ServerClients struct {
+	Clients []*ServerClient
+	Error   error
+}
+
+// ServerStats is the information pulled from the Zookeeper `stat` command.
+type ServerStats struct {
+	Sent        int64
+	Received    int64
+	NodeCount   int64
+	MinLatency  int64
+	AvgLatency  int64
+	MaxLatency  int64
+	Connections int64
+	Outstanding int64
+	Epoch       int32
+	Counter     int32
+	BuildTime   time.Time
+	Mode        Mode
+	Version     string
+	Error       error
+}
+
+type requestHeader struct {
+	Xid    int32
+	Opcode int32
+}
+
+type responseHeader struct {
+	Xid  int32
+	Zxid int64
+	Err  ErrCode
+}
+
+type multiHeader struct {
+	Type int32
+	Done bool
+	Err  ErrCode
+}
+
+type auth struct {
+	Type   int32
+	Scheme string
+	Auth   []byte
+}
+
+// Generic request structs
+
+type pathRequest struct {
+	Path string
+}
+
+type PathVersionRequest struct {
+	Path    string
+	Version int32
+}
+
+type pathWatchRequest struct {
+	Path  string
+	Watch bool
+}
+
+type pathResponse struct {
+	Path string
+}
+
+type statResponse struct {
+	Stat Stat
+}
+
+//
+
+type CheckVersionRequest PathVersionRequest
+type closeRequest struct{}
+type closeResponse struct{}
+
+type connectRequest struct {
+	ProtocolVersion int32
+	LastZxidSeen    int64
+	TimeOut         int32
+	SessionID       int64
+	Passwd          []byte
+}
+
+type connectResponse struct {
+	ProtocolVersion int32
+	TimeOut         int32
+	SessionID       int64
+	Passwd          []byte
+}
+
+type CreateRequest struct {
+	Path  string
+	Data  []byte
+	Acl   []ACL
+	Flags int32
+}
+
+type createResponse pathResponse
+type DeleteRequest PathVersionRequest
+type deleteResponse struct{}
+
+type errorResponse struct {
+	Err int32
+}
+
+type existsRequest pathWatchRequest
+type existsResponse statResponse
+type getAclRequest pathRequest
+
+type getAclResponse struct {
+	Acl  []ACL
+	Stat Stat
+}
+
+type getChildrenRequest pathRequest
+
+type getChildrenResponse struct {
+	Children []string
+}
+
+type getChildren2Request pathWatchRequest
+
+type getChildren2Response struct {
+	Children []string
+	Stat     Stat
+}
+
+type getDataRequest pathWatchRequest
+
+type getDataResponse struct {
+	Data []byte
+	Stat Stat
+}
+
+type getMaxChildrenRequest pathRequest
+
+type getMaxChildrenResponse struct {
+	Max int32
+}
+
+type getSaslRequest struct {
+	Token []byte
+}
+
+type pingRequest struct{}
+type pingResponse struct{}
+
+type setAclRequest struct {
+	Path    string
+	Acl     []ACL
+	Version int32
+}
+
+type setAclResponse statResponse
+
+type SetDataRequest struct {
+	Path    string
+	Data    []byte
+	Version int32
+}
+
+type setDataResponse statResponse
+
+type setMaxChildren struct {
+	Path string
+	Max  int32
+}
+
+type setSaslRequest struct {
+	Token string
+}
+
+type setSaslResponse struct {
+	Token string
+}
+
+type setWatchesRequest struct {
+	RelativeZxid int64
+	DataWatches  []string
+	ExistWatches []string
+	ChildWatches []string
+}
+
+type setWatchesResponse struct{}
+
+type syncRequest pathRequest
+type syncResponse pathResponse
+
+type setAuthRequest auth
+type setAuthResponse struct{}
+
+type multiRequestOp struct {
+	Header multiHeader
+	Op     interface{}
+}
+type multiRequest struct {
+	Ops        []multiRequestOp
+	DoneHeader multiHeader
+}
+type multiResponseOp struct {
+	Header multiHeader
+	String string
+	Stat   *Stat
+}
+type multiResponse struct {
+	Ops        []multiResponseOp
+	DoneHeader multiHeader
+}
+
+func (r *multiRequest) Encode(buf []byte) (int, error) {
+	total := 0
+	for _, op := range r.Ops {
+		op.Header.Done = false
+		n, err := encodePacketValue(buf[total:], reflect.ValueOf(op))
+		if err != nil {
+			return total, err
+		}
+		total += n
+	}
+	r.DoneHeader.Done = true
+	n, err := encodePacketValue(buf[total:], reflect.ValueOf(r.DoneHeader))
+	if err != nil {
+		return total, err
+	}
+	total += n
+
+	return total, nil
+}
+
+func (r *multiRequest) Decode(buf []byte) (int, error) {
+	r.Ops = make([]multiRequestOp, 0)
+	r.DoneHeader = multiHeader{-1, true, -1}
+	total := 0
+	for {
+		header := &multiHeader{}
+		n, err := decodePacketValue(buf[total:], reflect.ValueOf(header))
+		if err != nil {
+			return total, err
+		}
+		total += n
+		if header.Done {
+			r.DoneHeader = *header
+			break
+		}
+
+		req := requestStructForOp(header.Type)
+		if req == nil {
+			return total, ErrAPIError
+		}
+		n, err = decodePacketValue(buf[total:], reflect.ValueOf(req))
+		if err != nil {
+			return total, err
+		}
+		total += n
+		r.Ops = append(r.Ops, multiRequestOp{*header, req})
+	}
+	return total, nil
+}
+
+func (r *multiResponse) Decode(buf []byte) (int, error) {
+	r.Ops = make([]multiResponseOp, 0)
+	r.DoneHeader = multiHeader{-1, true, -1}
+	total := 0
+	for {
+		header := &multiHeader{}
+		n, err := decodePacketValue(buf[total:], reflect.ValueOf(header))
+		if err != nil {
+			return total, err
+		}
+		total += n
+		if header.Done {
+			r.DoneHeader = *header
+			break
+		}
+
+		res := multiResponseOp{Header: *header}
+		var w reflect.Value
+		switch header.Type {
+		default:
+			return total, ErrAPIError
+		case opCreate:
+			w = reflect.ValueOf(&res.String)
+		case opSetData:
+			res.Stat = new(Stat)
+			w = reflect.ValueOf(res.Stat)
+		case opCheck, opDelete:
+		}
+		if w.IsValid() {
+			n, err := decodePacketValue(buf[total:], w)
+			if err != nil {
+				return total, err
+			}
+			total += n
+		}
+		r.Ops = append(r.Ops, res)
+	}
+	return total, nil
+}
+
+type watcherEvent struct {
+	Type  EventType
+	State State
+	Path  string
+}
+
+type decoder interface {
+	Decode(buf []byte) (int, error)
+}
+
+type encoder interface {
+	Encode(buf []byte) (int, error)
+}
+
+func decodePacket(buf []byte, st interface{}) (n int, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" {
+				err = ErrShortBuffer
+			} else {
+				panic(r)
+			}
+		}
+	}()
+
+	v := reflect.ValueOf(st)
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return 0, ErrPtrExpected
+	}
+	return decodePacketValue(buf, v)
+}
+
+func decodePacketValue(buf []byte, v reflect.Value) (int, error) {
+	rv := v
+	kind := v.Kind()
+	if kind == reflect.Ptr {
+		if v.IsNil() {
+			v.Set(reflect.New(v.Type().Elem()))
+		}
+		v = v.Elem()
+		kind = v.Kind()
+	}
+
+	n := 0
+	switch kind {
+	default:
+		return n, ErrUnhandledFieldType
+	case reflect.Struct:
+		if de, ok := rv.Interface().(decoder); ok {
+			return de.Decode(buf)
+		} else if de, ok := v.Interface().(decoder); ok {
+			return de.Decode(buf)
+		} else {
+			for i := 0; i < v.NumField(); i++ {
+				field := v.Field(i)
+				n2, err := decodePacketValue(buf[n:], field)
+				n += n2
+				if err != nil {
+					return n, err
+				}
+			}
+		}
+	case reflect.Bool:
+		v.SetBool(buf[n] != 0)
+		n++
+	case reflect.Int32:
+		v.SetInt(int64(binary.BigEndian.Uint32(buf[n : n+4])))
+		n += 4
+	case reflect.Int64:
+		v.SetInt(int64(binary.BigEndian.Uint64(buf[n : n+8])))
+		n += 8
+	case reflect.String:
+		ln := int(binary.BigEndian.Uint32(buf[n : n+4]))
+		v.SetString(string(buf[n+4 : n+4+ln]))
+		n += 4 + ln
+	case reflect.Slice:
+		switch v.Type().Elem().Kind() {
+		default:
+			count := int(binary.BigEndian.Uint32(buf[n : n+4]))
+			n += 4
+			values := reflect.MakeSlice(v.Type(), count, count)
+			v.Set(values)
+			for i := 0; i < count; i++ {
+				n2, err := decodePacketValue(buf[n:], values.Index(i))
+				n += n2
+				if err != nil {
+					return n, err
+				}
+			}
+		case reflect.Uint8:
+			ln := int(int32(binary.BigEndian.Uint32(buf[n : n+4])))
+			if ln < 0 {
+				n += 4
+				v.SetBytes(nil)
+			} else {
+				bytes := make([]byte, ln)
+				copy(bytes, buf[n+4:n+4+ln])
+				v.SetBytes(bytes)
+				n += 4 + ln
+			}
+		}
+	}
+	return n, nil
+}
+
+func encodePacket(buf []byte, st interface{}) (n int, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" {
+				err = ErrShortBuffer
+			} else {
+				panic(r)
+			}
+		}
+	}()
+
+	v := reflect.ValueOf(st)
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return 0, ErrPtrExpected
+	}
+	return encodePacketValue(buf, v)
+}
+
+func encodePacketValue(buf []byte, v reflect.Value) (int, error) {
+	rv := v
+	for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface {
+		v = v.Elem()
+	}
+
+	n := 0
+	switch v.Kind() {
+	default:
+		return n, ErrUnhandledFieldType
+	case reflect.Struct:
+		if en, ok := rv.Interface().(encoder); ok {
+			return en.Encode(buf)
+		} else if en, ok := v.Interface().(encoder); ok {
+			return en.Encode(buf)
+		} else {
+			for i := 0; i < v.NumField(); i++ {
+				field := v.Field(i)
+				n2, err := encodePacketValue(buf[n:], field)
+				n += n2
+				if err != nil {
+					return n, err
+				}
+			}
+		}
+	case reflect.Bool:
+		if v.Bool() {
+			buf[n] = 1
+		} else {
+			buf[n] = 0
+		}
+		n++
+	case reflect.Int32:
+		binary.BigEndian.PutUint32(buf[n:n+4], uint32(v.Int()))
+		n += 4
+	case reflect.Int64:
+		binary.BigEndian.PutUint64(buf[n:n+8], uint64(v.Int()))
+		n += 8
+	case reflect.String:
+		str := v.String()
+		binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(str)))
+		copy(buf[n+4:n+4+len(str)], []byte(str))
+		n += 4 + len(str)
+	case reflect.Slice:
+		switch v.Type().Elem().Kind() {
+		default:
+			count := v.Len()
+			startN := n
+			n += 4
+			for i := 0; i < count; i++ {
+				n2, err := encodePacketValue(buf[n:], v.Index(i))
+				n += n2
+				if err != nil {
+					return n, err
+				}
+			}
+			binary.BigEndian.PutUint32(buf[startN:startN+4], uint32(count))
+		case reflect.Uint8:
+			if v.IsNil() {
+				binary.BigEndian.PutUint32(buf[n:n+4], uint32(0xffffffff))
+				n += 4
+			} else {
+				bytes := v.Bytes()
+				binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(bytes)))
+				copy(buf[n+4:n+4+len(bytes)], bytes)
+				n += 4 + len(bytes)
+			}
+		}
+	}
+	return n, nil
+}
+
+func requestStructForOp(op int32) interface{} {
+	switch op {
+	case opClose:
+		return &closeRequest{}
+	case opCreate:
+		return &CreateRequest{}
+	case opDelete:
+		return &DeleteRequest{}
+	case opExists:
+		return &existsRequest{}
+	case opGetAcl:
+		return &getAclRequest{}
+	case opGetChildren:
+		return &getChildrenRequest{}
+	case opGetChildren2:
+		return &getChildren2Request{}
+	case opGetData:
+		return &getDataRequest{}
+	case opPing:
+		return &pingRequest{}
+	case opSetAcl:
+		return &setAclRequest{}
+	case opSetData:
+		return &SetDataRequest{}
+	case opSetWatches:
+		return &setWatchesRequest{}
+	case opSync:
+		return &syncRequest{}
+	case opSetAuth:
+		return &setAuthRequest{}
+	case opCheck:
+		return &CheckVersionRequest{}
+	case opMulti:
+		return &multiRequest{}
+	}
+	return nil
+}
+
+func responseStructForOp(op int32) interface{} {
+	switch op {
+	case opClose:
+		return &closeResponse{}
+	case opCreate:
+		return &createResponse{}
+	case opDelete:
+		return &deleteResponse{}
+	case opExists:
+		return &existsResponse{}
+	case opGetAcl:
+		return &getAclResponse{}
+	case opGetChildren:
+		return &getChildrenResponse{}
+	case opGetChildren2:
+		return &getChildren2Response{}
+	case opGetData:
+		return &getDataResponse{}
+	case opPing:
+		return &pingResponse{}
+	case opSetAcl:
+		return &setAclResponse{}
+	case opSetData:
+		return &setDataResponse{}
+	case opSetWatches:
+		return &setWatchesResponse{}
+	case opSync:
+		return &syncResponse{}
+	case opWatcherEvent:
+		return &watcherEvent{}
+	case opSetAuth:
+		return &setAuthResponse{}
+	// case opCheck:
+	// 	return &checkVersionResponse{}
+	case opMulti:
+		return &multiResponse{}
+	}
+	return nil
+}

+ 60 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/structs_test.go

@@ -0,0 +1,60 @@
+package zk
+
+import (
+	"reflect"
+	"testing"
+)
+
+func TestEncodeDecodePacket(t *testing.T) {
+	encodeDecodeTest(t, &requestHeader{-2, 5})
+	encodeDecodeTest(t, &connectResponse{1, 2, 3, nil})
+	encodeDecodeTest(t, &connectResponse{1, 2, 3, []byte{4, 5, 6}})
+	encodeDecodeTest(t, &getAclResponse{[]ACL{{12, "s", "anyone"}}, Stat{}})
+	encodeDecodeTest(t, &getChildrenResponse{[]string{"foo", "bar"}})
+	encodeDecodeTest(t, &pathWatchRequest{"path", true})
+	encodeDecodeTest(t, &pathWatchRequest{"path", false})
+	encodeDecodeTest(t, &CheckVersionRequest{"/", -1})
+	encodeDecodeTest(t, &multiRequest{Ops: []multiRequestOp{{multiHeader{opCheck, false, -1}, &CheckVersionRequest{"/", -1}}}})
+}
+
+func encodeDecodeTest(t *testing.T, r interface{}) {
+	buf := make([]byte, 1024)
+	n, err := encodePacket(buf, r)
+	if err != nil {
+		t.Errorf("encodePacket returned non-nil error %+v\n", err)
+		return
+	}
+	t.Logf("%+v %x", r, buf[:n])
+	r2 := reflect.New(reflect.ValueOf(r).Elem().Type()).Interface()
+	n2, err := decodePacket(buf[:n], r2)
+	if err != nil {
+		t.Errorf("decodePacket returned non-nil error %+v\n", err)
+		return
+	}
+	if n != n2 {
+		t.Errorf("sizes don't match: %d != %d", n, n2)
+		return
+	}
+	if !reflect.DeepEqual(r, r2) {
+		t.Errorf("results don't match: %+v != %+v", r, r2)
+		return
+	}
+}
+
+func TestEncodeShortBuffer(t *testing.T) {
+	buf := make([]byte, 0)
+	_, err := encodePacket(buf, &requestHeader{1, 2})
+	if err != ErrShortBuffer {
+		t.Errorf("encodePacket should return ErrShortBuffer on a short buffer instead of '%+v'", err)
+		return
+	}
+}
+
+func TestDecodeShortBuffer(t *testing.T) {
+	buf := make([]byte, 0)
+	_, err := decodePacket(buf, &responseHeader{})
+	if err != ErrShortBuffer {
+		t.Errorf("decodePacket should return ErrShortBuffer on a short buffer instead of '%+v'", err)
+		return
+	}
+}

+ 148 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/tracer.go

@@ -0,0 +1,148 @@
+package zk
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"net"
+	"sync"
+)
+
+var (
+	requests     = make(map[int32]int32) // Map of Xid -> Opcode
+	requestsLock = &sync.Mutex{}
+)
+
+func trace(conn1, conn2 net.Conn, client bool) {
+	defer conn1.Close()
+	defer conn2.Close()
+	buf := make([]byte, 10*1024)
+	init := true
+	for {
+		_, err := io.ReadFull(conn1, buf[:4])
+		if err != nil {
+			fmt.Println("1>", client, err)
+			return
+		}
+
+		blen := int(binary.BigEndian.Uint32(buf[:4]))
+
+		_, err = io.ReadFull(conn1, buf[4:4+blen])
+		if err != nil {
+			fmt.Println("2>", client, err)
+			return
+		}
+
+		var cr interface{}
+		opcode := int32(-1)
+		readHeader := true
+		if client {
+			if init {
+				cr = &connectRequest{}
+				readHeader = false
+			} else {
+				xid := int32(binary.BigEndian.Uint32(buf[4:8]))
+				opcode = int32(binary.BigEndian.Uint32(buf[8:12]))
+				requestsLock.Lock()
+				requests[xid] = opcode
+				requestsLock.Unlock()
+				cr = requestStructForOp(opcode)
+				if cr == nil {
+					fmt.Printf("Unknown opcode %d\n", opcode)
+				}
+			}
+		} else {
+			if init {
+				cr = &connectResponse{}
+				readHeader = false
+			} else {
+				xid := int32(binary.BigEndian.Uint32(buf[4:8]))
+				zxid := int64(binary.BigEndian.Uint64(buf[8:16]))
+				errnum := int32(binary.BigEndian.Uint32(buf[16:20]))
+				if xid != -1 || zxid != -1 {
+					requestsLock.Lock()
+					found := false
+					opcode, found = requests[xid]
+					if !found {
+						opcode = 0
+					}
+					delete(requests, xid)
+					requestsLock.Unlock()
+				} else {
+					opcode = opWatcherEvent
+				}
+				cr = responseStructForOp(opcode)
+				if cr == nil {
+					fmt.Printf("Unknown opcode %d\n", opcode)
+				}
+				if errnum != 0 {
+					cr = &struct{}{}
+				}
+			}
+		}
+		opname := "."
+		if opcode != -1 {
+			opname = opNames[opcode]
+		}
+		if cr == nil {
+			fmt.Printf("%+v %s %+v\n", client, opname, buf[4:4+blen])
+		} else {
+			n := 4
+			hdrStr := ""
+			if readHeader {
+				var hdr interface{}
+				if client {
+					hdr = &requestHeader{}
+				} else {
+					hdr = &responseHeader{}
+				}
+				if n2, err := decodePacket(buf[n:n+blen], hdr); err != nil {
+					fmt.Println(err)
+				} else {
+					n += n2
+				}
+				hdrStr = fmt.Sprintf(" %+v", hdr)
+			}
+			if _, err := decodePacket(buf[n:n+blen], cr); err != nil {
+				fmt.Println(err)
+			}
+			fmt.Printf("%+v %s%s %+v\n", client, opname, hdrStr, cr)
+		}
+
+		init = false
+
+		written, err := conn2.Write(buf[:4+blen])
+		if err != nil {
+			fmt.Println("3>", client, err)
+			return
+		} else if written != 4+blen {
+			fmt.Printf("Written != read: %d != %d\n", written, blen)
+			return
+		}
+	}
+}
+
+func handleConnection(addr string, conn net.Conn) {
+	zkConn, err := net.Dial("tcp", addr)
+	if err != nil {
+		fmt.Println(err)
+		return
+	}
+	go trace(conn, zkConn, true)
+	trace(zkConn, conn, false)
+}
+
+func StartTracer(listenAddr, serverAddr string) {
+	ln, err := net.Listen("tcp", listenAddr)
+	if err != nil {
+		panic(err)
+	}
+	for {
+		conn, err := ln.Accept()
+		if err != nil {
+			fmt.Println(err)
+			continue
+		}
+		go handleConnection(serverAddr, conn)
+	}
+}

+ 54 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util.go

@@ -0,0 +1,54 @@
+package zk
+
+import (
+	"crypto/sha1"
+	"encoding/base64"
+	"fmt"
+	"math/rand"
+	"strconv"
+	"strings"
+)
+
+// AuthACL produces an ACL list containing a single ACL which uses the
+// provided permissions, with the scheme "auth", and ID "", which is used
+// by ZooKeeper to represent any authenticated user.
+func AuthACL(perms int32) []ACL {
+	return []ACL{{perms, "auth", ""}}
+}
+
+// WorldACL produces an ACL list containing a single ACL which uses the
+// provided permissions, with the scheme "world", and ID "anyone", which
+// is used by ZooKeeper to represent any user at all.
+func WorldACL(perms int32) []ACL {
+	return []ACL{{perms, "world", "anyone"}}
+}
+
+func DigestACL(perms int32, user, password string) []ACL {
+	userPass := []byte(fmt.Sprintf("%s:%s", user, password))
+	h := sha1.New()
+	if n, err := h.Write(userPass); err != nil || n != len(userPass) {
+		panic("SHA1 failed")
+	}
+	digest := base64.StdEncoding.EncodeToString(h.Sum(nil))
+	return []ACL{{perms, "digest", fmt.Sprintf("%s:%s", user, digest)}}
+}
+
+// FormatServers takes a slice of addresses, and makes sure they are in a format
+// that resembles <addr>:<port>. If the server has no port provided, the
+// DefaultPort constant is added to the end.
+func FormatServers(servers []string) []string {
+	for i := range servers {
+		if !strings.Contains(servers[i], ":") {
+			servers[i] = servers[i] + ":" + strconv.Itoa(DefaultPort)
+		}
+	}
+	return servers
+}
+
+// stringShuffle performs a Fisher-Yates shuffle on a slice of strings
+func stringShuffle(s []string) {
+	for i := len(s) - 1; i > 0; i-- {
+		j := rand.Intn(i + 1)
+		s[i], s[j] = s[j], s[i]
+	}
+}

+ 17 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/util_test.go

@@ -0,0 +1,17 @@
+package zk
+
+import "testing"
+
+func TestFormatServers(t *testing.T) {
+	servers := []string{"127.0.0.1:2181", "127.0.0.42", "127.0.42.1:8811"}
+	r := []string{"127.0.0.1:2181", "127.0.0.42:2181", "127.0.42.1:8811"}
+
+	var s []string
+	s = FormatServers(servers)
+
+	for i := range s {
+		if s[i] != r[i] {
+			t.Errorf("%v should equal %v", s[i], r[i])
+		}
+	}
+}

+ 518 - 0
libnetwork/Godeps/_workspace/src/github.com/samuel/go-zookeeper/zk/zk_test.go

@@ -0,0 +1,518 @@
+package zk
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"testing"
+	"time"
+
+	"camlistore.org/pkg/throttle"
+)
+
+func TestCreate(t *testing.T) {
+	ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk, _, err := ts.ConnectAll()
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk.Close()
+
+	path := "/gozk-test"
+
+	if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
+		t.Fatalf("Delete returned error: %+v", err)
+	}
+	if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
+		t.Fatalf("Create returned error: %+v", err)
+	} else if p != path {
+		t.Fatalf("Create returned different path '%s' != '%s'", p, path)
+	}
+	if data, stat, err := zk.Get(path); err != nil {
+		t.Fatalf("Get returned error: %+v", err)
+	} else if stat == nil {
+		t.Fatal("Get returned nil stat")
+	} else if len(data) < 4 {
+		t.Fatal("Get returned wrong size data")
+	}
+}
+
+func TestMulti(t *testing.T) {
+	ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk, _, err := ts.ConnectAll()
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk.Close()
+
+	path := "/gozk-test"
+
+	if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
+		t.Fatalf("Delete returned error: %+v", err)
+	}
+	ops := []interface{}{
+		&CreateRequest{Path: path, Data: []byte{1, 2, 3, 4}, Acl: WorldACL(PermAll)},
+		&SetDataRequest{Path: path, Data: []byte{1, 2, 3, 4}, Version: -1},
+	}
+	if res, err := zk.Multi(ops...); err != nil {
+		t.Fatalf("Multi returned error: %+v", err)
+	} else if len(res) != 2 {
+		t.Fatalf("Expected 2 responses got %d", len(res))
+	} else {
+		t.Logf("%+v", res)
+	}
+	if data, stat, err := zk.Get(path); err != nil {
+		t.Fatalf("Get returned error: %+v", err)
+	} else if stat == nil {
+		t.Fatal("Get returned nil stat")
+	} else if len(data) < 4 {
+		t.Fatal("Get returned wrong size data")
+	}
+}
+
+func TestGetSetACL(t *testing.T) {
+	ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk, _, err := ts.ConnectAll()
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk.Close()
+
+	if err := zk.AddAuth("digest", []byte("blah")); err != nil {
+		t.Fatalf("AddAuth returned error %+v", err)
+	}
+
+	path := "/gozk-test"
+
+	if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
+		t.Fatalf("Delete returned error: %+v", err)
+	}
+	if path, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
+		t.Fatalf("Create returned error: %+v", err)
+	} else if path != "/gozk-test" {
+		t.Fatalf("Create returned different path '%s' != '/gozk-test'", path)
+	}
+
+	expected := WorldACL(PermAll)
+
+	if acl, stat, err := zk.GetACL(path); err != nil {
+		t.Fatalf("GetACL returned error %+v", err)
+	} else if stat == nil {
+		t.Fatalf("GetACL returned nil Stat")
+	} else if len(acl) != 1 || expected[0] != acl[0] {
+		t.Fatalf("GetACL mismatch expected %+v instead of %+v", expected, acl)
+	}
+
+	expected = []ACL{{PermAll, "ip", "127.0.0.1"}}
+
+	if stat, err := zk.SetACL(path, expected, -1); err != nil {
+		t.Fatalf("SetACL returned error %+v", err)
+	} else if stat == nil {
+		t.Fatalf("SetACL returned nil Stat")
+	}
+
+	if acl, stat, err := zk.GetACL(path); err != nil {
+		t.Fatalf("GetACL returned error %+v", err)
+	} else if stat == nil {
+		t.Fatalf("GetACL returned nil Stat")
+	} else if len(acl) != 1 || expected[0] != acl[0] {
+		t.Fatalf("GetACL mismatch expected %+v instead of %+v", expected, acl)
+	}
+}
+
+func TestAuth(t *testing.T) {
+	ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk, _, err := ts.ConnectAll()
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk.Close()
+
+	path := "/gozk-digest-test"
+	if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
+		t.Fatalf("Delete returned error: %+v", err)
+	}
+
+	acl := DigestACL(PermAll, "user", "password")
+
+	if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, acl); err != nil {
+		t.Fatalf("Create returned error: %+v", err)
+	} else if p != path {
+		t.Fatalf("Create returned different path '%s' != '%s'", p, path)
+	}
+
+	if a, stat, err := zk.GetACL(path); err != nil {
+		t.Fatalf("GetACL returned error %+v", err)
+	} else if stat == nil {
+		t.Fatalf("GetACL returned nil Stat")
+	} else if len(a) != 1 || acl[0] != a[0] {
+		t.Fatalf("GetACL mismatch expected %+v instead of %+v", acl, a)
+	}
+
+	if _, _, err := zk.Get(path); err != ErrNoAuth {
+		t.Fatalf("Get returned error %+v instead of ErrNoAuth", err)
+	}
+
+	if err := zk.AddAuth("digest", []byte("user:password")); err != nil {
+		t.Fatalf("AddAuth returned error %+v", err)
+	}
+
+	if data, stat, err := zk.Get(path); err != nil {
+		t.Fatalf("Get returned error %+v", err)
+	} else if stat == nil {
+		t.Fatalf("Get returned nil Stat")
+	} else if len(data) != 4 {
+		t.Fatalf("Get returned wrong data length")
+	}
+}
+
+func TestChildWatch(t *testing.T) {
+	ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk, _, err := ts.ConnectAll()
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk.Close()
+
+	if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
+		t.Fatalf("Delete returned error: %+v", err)
+	}
+
+	children, stat, childCh, err := zk.ChildrenW("/")
+	if err != nil {
+		t.Fatalf("Children returned error: %+v", err)
+	} else if stat == nil {
+		t.Fatal("Children returned nil stat")
+	} else if len(children) < 1 {
+		t.Fatal("Children should return at least 1 child")
+	}
+
+	if path, err := zk.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
+		t.Fatalf("Create returned error: %+v", err)
+	} else if path != "/gozk-test" {
+		t.Fatalf("Create returned different path '%s' != '/gozk-test'", path)
+	}
+
+	select {
+	case ev := <-childCh:
+		if ev.Err != nil {
+			t.Fatalf("Child watcher error %+v", ev.Err)
+		}
+		if ev.Path != "/" {
+			t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/")
+		}
+	case _ = <-time.After(time.Second * 2):
+		t.Fatal("Child watcher timed out")
+	}
+
+	// Delete of the watched node should trigger the watch
+
+	children, stat, childCh, err = zk.ChildrenW("/gozk-test")
+	if err != nil {
+		t.Fatalf("Children returned error: %+v", err)
+	} else if stat == nil {
+		t.Fatal("Children returned nil stat")
+	} else if len(children) != 0 {
+		t.Fatal("Children should return 0 children")
+	}
+
+	if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
+		t.Fatalf("Delete returned error: %+v", err)
+	}
+
+	select {
+	case ev := <-childCh:
+		if ev.Err != nil {
+			t.Fatalf("Child watcher error %+v", ev.Err)
+		}
+		if ev.Path != "/gozk-test" {
+			t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/")
+		}
+	case _ = <-time.After(time.Second * 2):
+		t.Fatal("Child watcher timed out")
+	}
+}
+
+func TestSetWatchers(t *testing.T) {
+	ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk, _, err := ts.ConnectAll()
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk.Close()
+
+	zk.reconnectDelay = time.Second
+
+	zk2, _, err := ts.ConnectAll()
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk2.Close()
+
+	if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
+		t.Fatalf("Delete returned error: %+v", err)
+	}
+
+	testPath, err := zk.Create("/gozk-test-2", []byte{}, 0, WorldACL(PermAll))
+	if err != nil {
+		t.Fatalf("Create returned: %+v", err)
+	}
+
+	_, _, testEvCh, err := zk.GetW(testPath)
+	if err != nil {
+		t.Fatalf("GetW returned: %+v", err)
+	}
+
+	children, stat, childCh, err := zk.ChildrenW("/")
+	if err != nil {
+		t.Fatalf("Children returned error: %+v", err)
+	} else if stat == nil {
+		t.Fatal("Children returned nil stat")
+	} else if len(children) < 1 {
+		t.Fatal("Children should return at least 1 child")
+	}
+
+	zk.conn.Close()
+	if err := zk2.Delete(testPath, -1); err != nil && err != ErrNoNode {
+		t.Fatalf("Delete returned error: %+v", err)
+	}
+	time.Sleep(time.Millisecond * 100)
+
+	if path, err := zk2.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
+		t.Fatalf("Create returned error: %+v", err)
+	} else if path != "/gozk-test" {
+		t.Fatalf("Create returned different path '%s' != '/gozk-test'", path)
+	}
+
+	select {
+	case ev := <-testEvCh:
+		if ev.Err != nil {
+			t.Fatalf("GetW watcher error %+v", ev.Err)
+		}
+		if ev.Path != testPath {
+			t.Fatalf("GetW watcher wrong path %s instead of %s", ev.Path, testPath)
+		}
+	case <-time.After(2 * time.Second):
+		t.Fatal("GetW watcher timed out")
+	}
+
+	select {
+	case ev := <-childCh:
+		if ev.Err != nil {
+			t.Fatalf("Child watcher error %+v", ev.Err)
+		}
+		if ev.Path != "/" {
+			t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/")
+		}
+	case <-time.After(2 * time.Second):
+		t.Fatal("Child watcher timed out")
+	}
+}
+
+func TestExpiringWatch(t *testing.T) {
+	ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+	zk, _, err := ts.ConnectAll()
+	if err != nil {
+		t.Fatalf("Connect returned error: %+v", err)
+	}
+	defer zk.Close()
+
+	if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
+		t.Fatalf("Delete returned error: %+v", err)
+	}
+
+	children, stat, childCh, err := zk.ChildrenW("/")
+	if err != nil {
+		t.Fatalf("Children returned error: %+v", err)
+	} else if stat == nil {
+		t.Fatal("Children returned nil stat")
+	} else if len(children) < 1 {
+		t.Fatal("Children should return at least 1 child")
+	}
+
+	zk.sessionID = 99999
+	zk.conn.Close()
+
+	select {
+	case ev := <-childCh:
+		if ev.Err != ErrSessionExpired {
+			t.Fatalf("Child watcher error %+v instead of expected ErrSessionExpired", ev.Err)
+		}
+		if ev.Path != "/" {
+			t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/")
+		}
+	case <-time.After(2 * time.Second):
+		t.Fatal("Child watcher timed out")
+	}
+}
+
+func TestRequestFail(t *testing.T) {
+	// If connecting fails to all servers in the list then pending requests
+	// should be errored out so they don't hang forever.
+
+	zk, _, err := Connect([]string{"127.0.0.1:32444"}, time.Second*15)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer zk.Close()
+
+	ch := make(chan error)
+	go func() {
+		_, _, err := zk.Get("/blah")
+		ch <- err
+	}()
+	select {
+	case err := <-ch:
+		if err == nil {
+			t.Fatal("Expected non-nil error on failed request due to connection failure")
+		}
+	case <-time.After(time.Second * 2):
+		t.Fatal("Get hung when connection could not be made")
+	}
+}
+
+func TestSlowServer(t *testing.T) {
+	ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer ts.Stop()
+
+	realAddr := fmt.Sprintf("127.0.0.1:%d", ts.Servers[0].Port)
+	proxyAddr, stopCh, err := startSlowProxy(t,
+		throttle.Rate{}, throttle.Rate{},
+		realAddr, func(ln *throttle.Listener) {
+			if ln.Up.Latency == 0 {
+				ln.Up.Latency = time.Millisecond * 2000
+				ln.Down.Latency = time.Millisecond * 2000
+			} else {
+				ln.Up.Latency = 0
+				ln.Down.Latency = 0
+			}
+		})
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer close(stopCh)
+
+	zk, _, err := Connect([]string{proxyAddr}, time.Millisecond*500)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer zk.Close()
+
+	_, _, wch, err := zk.ChildrenW("/")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Force a reconnect to get a throttled connection
+	zk.conn.Close()
+
+	time.Sleep(time.Millisecond * 100)
+
+	if err := zk.Delete("/gozk-test", -1); err == nil {
+		t.Fatal("Delete should have failed")
+	}
+
+	// The previous request should have timed out causing the server to be disconnected and reconnected
+
+	if _, err := zk.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
+		t.Fatal(err)
+	}
+
+	// Make sure event is still returned because the session should not have been affected
+	select {
+	case ev := <-wch:
+		t.Logf("Received event: %+v", ev)
+	case <-time.After(time.Second):
+		t.Fatal("Expected to receive a watch event")
+	}
+}
+
+func startSlowProxy(t *testing.T, up, down throttle.Rate, upstream string, adj func(ln *throttle.Listener)) (string, chan bool, error) {
+	ln, err := net.Listen("tcp", "127.0.0.1:0")
+	if err != nil {
+		return "", nil, err
+	}
+	tln := &throttle.Listener{
+		Listener: ln,
+		Up:       up,
+		Down:     down,
+	}
+	stopCh := make(chan bool)
+	go func() {
+		<-stopCh
+		tln.Close()
+	}()
+	go func() {
+		for {
+			cn, err := tln.Accept()
+			if err != nil {
+				if !strings.Contains(err.Error(), "use of closed network connection") {
+					t.Fatalf("Accept failed: %s", err.Error())
+				}
+				return
+			}
+			if adj != nil {
+				adj(tln)
+			}
+			go func(cn net.Conn) {
+				defer cn.Close()
+				upcn, err := net.Dial("tcp", upstream)
+				if err != nil {
+					t.Log(err)
+					return
+				}
+				// This will leave hanging goroutines util stopCh is closed
+				// but it doesn't matter in the context of running tests.
+				go func() {
+					<-stopCh
+					upcn.Close()
+				}()
+				go func() {
+					if _, err := io.Copy(upcn, cn); err != nil {
+						if !strings.Contains(err.Error(), "use of closed network connection") {
+							// log.Printf("Upstream write failed: %s", err.Error())
+						}
+					}
+				}()
+				if _, err := io.Copy(cn, upcn); err != nil {
+					if !strings.Contains(err.Error(), "use of closed network connection") {
+						// log.Printf("Upstream read failed: %s", err.Error())
+					}
+				}
+			}(cn)
+		}
+	}()
+	return ln.Addr().String(), stopCh, nil
+}