Browse Source

Replace swarm store with libkv

Signed-off-by: Madhu Venugopal <madhu@docker.com>
Madhu Venugopal 10 years ago
parent
commit
3b2d2aa3ee
25 changed files with 2661 additions and 5 deletions
  1. 16 0
      libnetwork/Godeps/Godeps.json
  2. 34 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/.travis.yml
  3. 191 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/LICENSE
  4. 108 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/README.md
  5. 33 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/libkv.go
  6. 91 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/libkv_test.go
  7. 33 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/script/.validate
  8. 21 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/script/coverage
  9. 18 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/script/travis_consul.sh
  10. 11 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/script/travis_etcd.sh
  11. 12 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/script/travis_zk.sh
  12. 30 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/script/validate-gofmt
  13. 419 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/consul/consul.go
  14. 61 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/consul/consul_test.go
  15. 478 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/etcd/etcd.go
  16. 33 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/etcd/etcd_test.go
  17. 47 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/helpers.go
  18. 113 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/mock/mock.go
  19. 394 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/store-test.go
  20. 120 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/store.go
  21. 357 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/zookeeper/zookeeper.go
  22. 33 0
      libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/zookeeper/zookeeper_test.go
  23. 6 2
      libnetwork/datastore/datastore.go
  24. 1 1
      libnetwork/datastore/mock_store.go
  25. 1 2
      libnetwork/network.go

+ 16 - 0
libnetwork/Godeps/Godeps.json

@@ -85,6 +85,10 @@
 			"Comment": "v0.3.0-rc2",
 			"Rev": "a5b2e57496762cb6971eb65809b2e428cb179719"
 		},
+		{
+			"ImportPath": "github.com/docker/libkv",
+			"Rev": "ab16c3d4a8785a9877c62d0b11ea4441cf09120c"
+		},
 		{
 			"ImportPath": "github.com/godbus/dbus",
 			"Comment": "v2-3-g4160802",
@@ -107,6 +111,18 @@
 			"ImportPath": "github.com/samuel/go-zookeeper/zk",
 			"Rev": "d0e0d8e11f318e000a8cc434616d69e329edc374"
 		},
+		{
+			"ImportPath": "github.com/stretchr/objx",
+			"Rev": "cbeaeb16a013161a98496fad62933b1d21786672"
+		},
+		{
+			"ImportPath": "github.com/stretchr/testify/assert",
+			"Rev": "dab07ac62d4905d3e48d17dc549c684ac3b7c15a"
+		},
+		{
+			"ImportPath": "github.com/stretchr/testify/mock",
+			"Rev": "dab07ac62d4905d3e48d17dc549c684ac3b7c15a"
+		},
 		{
 			"ImportPath": "github.com/vishvananda/netlink",
 			"Rev": "8eb64238879fed52fd51c5b30ad20b928fb4c36c"

+ 34 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/.travis.yml

@@ -0,0 +1,34 @@
+language: go
+
+go:
+  - 1.3
+#  - 1.4
+# see https://github.com/moovweb/gvm/pull/116 for why Go 1.4 is currently disabled
+
+# let us have speedy Docker-based Travis workers
+sudo: false
+
+before_install:
+  # Symlink below is needed for Travis CI to work correctly on personal forks of libkv
+  - ln -s $HOME/gopath/src/github.com/${TRAVIS_REPO_SLUG///libkv/} $HOME/gopath/src/github.com/docker
+  - go get code.google.com/p/go.tools/cmd/vet
+  - go get code.google.com/p/go.tools/cmd/cover
+  - go get github.com/mattn/goveralls
+  - go get github.com/golang/lint/golint
+  - go get github.com/GeertJohan/fgt
+
+before_script:
+  - script/travis_consul.sh 0.5.2 
+  - script/travis_etcd.sh 2.0.11
+  - script/travis_zk.sh 3.4.6
+
+script:
+  - ./consul agent -server -bootstrap-expect 1 -data-dir /tmp/consul -config-file=./config.json 1>/dev/null &
+  - ./etcd/etcd --listen-client-urls 'http://0.0.0.0:4001' --advertise-client-urls 'http://127.0.0.1:4001' >/dev/null 2>&1 &
+  - ./zk/bin/zkServer.sh start ./zk/conf/zoo.cfg 1> /dev/null
+  - script/validate-gofmt
+  - go vet ./...
+  - fgt golint ./...
+  - go test -v -race ./...
+  - script/coverage
+  - goveralls -service=travis-ci -coverprofile=goverage.report

+ 191 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/LICENSE

@@ -0,0 +1,191 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   Copyright 2014-2015 Docker, Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 108 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/README.md

@@ -0,0 +1,108 @@
+# libkv
+
+[![GoDoc](https://godoc.org/github.com/docker/libkv?status.png)](https://godoc.org/github.com/docker/libkv)
+[![Build Status](https://travis-ci.org/docker/libkv.svg?branch=master)](https://travis-ci.org/docker/libkv)
+[![Coverage Status](https://coveralls.io/repos/docker/libkv/badge.svg)](https://coveralls.io/r/docker/libkv)
+
+`libkv` provides a `Go` native library to store metadata.
+
+The goal of `libkv` is to abstract common store operations for multiple Key/Value backends and offer the same experience no matter which one of the backend you want to use.
+
+For example, you can use it to store your metadata or for service discovery to register machines and endpoints inside your cluster.
+
+You can also easily implement a generic *Leader Election* on top of it (see the [swarm/leadership](https://github.com/docker/swarm/tree/master/leadership) package).
+
+As of now, `libkv` offers support for `Consul`, `Etcd` and `Zookeeper`.
+
+## Example of usage
+
+### Create a new store and use Put/Get
+
+```go
+package main
+
+import (
+	"fmt"
+	"time"
+	
+	"github.com/docker/libkv"
+	"github.com/docker/libkv/store"
+	log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+	client := "localhost:8500"
+
+	// Initialize a new store with consul
+	kv, err = libkv.NewStore(
+		store.CONSUL, // or "consul"
+		[]string{client},
+		&store.Config{
+			ConnectionTimeout: 10*time.Second,
+		},
+	)
+	if err != nil {
+		log.Fatal("Cannot create store consul")
+	}
+
+	key := "foo"
+	err = kv.Put(key, []byte("bar"), nil)
+	if err != nil {
+		log.Error("Error trying to put value at key `", key, "`")
+	}
+
+	pair, err := kv.Get(key)
+	if err != nil {
+		log.Error("Error trying accessing value at key `", key, "`")
+	}
+
+	log.Info("value: ", string(pair.Value))
+}
+```
+
+You can find other usage examples for `libkv` under the `docker/swarm` or `docker/libnetwork` repositories.
+
+## Details
+
+You should expect the same experience for basic operations like `Get`/`Put`, etc.
+
+However calls like `WatchTree` may return different events (or number of events) depending on the backend (for now, `Etcd` and `Consul` will likely return more events than `Zookeeper` that you should triage properly).
+
+## Create a new storage backend
+
+A new **storage backend** should include those calls:
+
+```go
+type Store interface {
+	Put(key string, value []byte, options *WriteOptions) error
+	Get(key string) (*KVPair, error)
+	Delete(key string) error
+	Exists(key string) (bool, error)
+	Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error)
+	WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error)
+	NewLock(key string, options *LockOptions) (Locker, error)
+	List(directory string) ([]*KVPair, error)
+	DeleteTree(directory string) error
+	AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error)
+	AtomicDelete(key string, previous *KVPair) (bool, error)
+	Close()
+}
+```
+
+You can get inspiration from existing backends to create a new one. This interface could be subject to changes to improve the experience of using the library and contributing to a new backend.
+
+##Roadmap
+
+- Make the API nicer to use (using `options`)
+- Provide more options (`consistency` for example)
+- Improve performance (remove extras `Get`/`List` operations)
+- Add more exhaustive tests
+- New backends?
+
+##Contributing
+
+Want to hack on libkv? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply.
+
+##Copyright and license
+
+Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons.

+ 33 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/libkv.go

@@ -0,0 +1,33 @@
+package libkv
+
+import (
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/libkv/store"
+	"github.com/docker/libkv/store/consul"
+	"github.com/docker/libkv/store/etcd"
+	"github.com/docker/libkv/store/mock"
+	"github.com/docker/libkv/store/zookeeper"
+)
+
+// Initialize creates a new Store object, initializing the client
+type Initialize func(addrs []string, options *store.Config) (store.Store, error)
+
+var (
+	// Backend initializers
+	initializers = map[store.Backend]Initialize{
+		store.MOCK:   mock.InitializeMock,
+		store.CONSUL: consul.InitializeConsul,
+		store.ETCD:   etcd.InitializeEtcd,
+		store.ZK:     zookeeper.InitializeZookeeper,
+	}
+)
+
+// NewStore creates a an instance of store
+func NewStore(backend store.Backend, addrs []string, options *store.Config) (store.Store, error) {
+	if init, exists := initializers[backend]; exists {
+		log.WithFields(log.Fields{"backend": backend}).Debug("Initializing store service")
+		return init(addrs, options)
+	}
+
+	return nil, store.ErrNotSupported
+}

+ 91 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/libkv_test.go

@@ -0,0 +1,91 @@
+package libkv
+
+import (
+	"testing"
+	"time"
+
+	"github.com/docker/libkv/store"
+	"github.com/docker/libkv/store/consul"
+	"github.com/docker/libkv/store/etcd"
+	"github.com/docker/libkv/store/mock"
+	"github.com/docker/libkv/store/zookeeper"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestNewStoreConsul(t *testing.T) {
+	client := "localhost:8500"
+
+	kv, err := NewStore(
+		store.CONSUL,
+		[]string{client},
+		&store.Config{
+			ConnectionTimeout: 10 * time.Second,
+		},
+	)
+	assert.NoError(t, err)
+	assert.NotNil(t, kv)
+
+	if _, ok := kv.(*consul.Consul); !ok {
+		t.Fatal("Error while initializing store consul")
+	}
+}
+
+func TestNewStoreEtcd(t *testing.T) {
+	client := "localhost:4001"
+
+	kv, err := NewStore(
+		store.ETCD,
+		[]string{client},
+		&store.Config{
+			ConnectionTimeout: 10 * time.Second,
+		},
+	)
+	assert.NoError(t, err)
+	assert.NotNil(t, kv)
+
+	if _, ok := kv.(*etcd.Etcd); !ok {
+		t.Fatal("Error while initializing store etcd")
+	}
+}
+
+func TestNewStoreZookeeper(t *testing.T) {
+	client := "localhost:2181"
+
+	kv, err := NewStore(
+		store.ZK,
+		[]string{client},
+		&store.Config{
+			ConnectionTimeout: 10 * time.Second,
+		},
+	)
+	assert.NoError(t, err)
+	assert.NotNil(t, kv)
+
+	if _, ok := kv.(*zookeeper.Zookeeper); !ok {
+		t.Fatal("Error while initializing store zookeeper")
+	}
+}
+
+func TestNewStoreMock(t *testing.T) {
+	kv, err := NewStore(store.MOCK, []string{}, &store.Config{})
+	assert.NoError(t, err)
+	assert.NotNil(t, kv)
+
+	if _, ok := kv.(*mock.Mock); !ok {
+		t.Fatal("Error while initializing mock store")
+	}
+}
+
+func TestNewStoreUnsupported(t *testing.T) {
+	client := "localhost:9999"
+
+	kv, err := NewStore(
+		"unsupported",
+		[]string{client},
+		&store.Config{
+			ConnectionTimeout: 10 * time.Second,
+		},
+	)
+	assert.Error(t, err)
+	assert.Nil(t, kv)
+}

+ 33 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/script/.validate

@@ -0,0 +1,33 @@
+#!/bin/bash
+
+if [ -z "$VALIDATE_UPSTREAM" ]; then
+	# this is kind of an expensive check, so let's not do this twice if we
+	# are running more than one validate bundlescript
+	
+	VALIDATE_REPO='https://github.com/docker/libkv.git'
+	VALIDATE_BRANCH='master'
+	
+	if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
+		VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
+		VALIDATE_BRANCH="${TRAVIS_BRANCH}"
+	fi
+	
+	VALIDATE_HEAD="$(git rev-parse --verify HEAD)"
+	
+	git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH"
+	VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)"
+	
+	VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD"
+	VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD"
+	
+	validate_diff() {
+		if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
+			git diff "$VALIDATE_COMMIT_DIFF" "$@"
+		fi
+	}
+	validate_log() {
+		if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
+			git log "$VALIDATE_COMMIT_LOG" "$@"
+		fi
+	}
+fi

+ 21 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/script/coverage

@@ -0,0 +1,21 @@
+#!/bin/bash
+
+MODE="mode: count"
+ROOT=${TRAVIS_BUILD_DIR:-.}/../../..
+
+# Grab the list of packages.
+# Exclude the API and CLI from coverage as it will be covered by integration tests.
+PACKAGES=`go list ./...`
+
+# Create the empty coverage file.
+echo $MODE > goverage.report
+
+# Run coverage on every package.
+for package in $PACKAGES; do
+	output="$ROOT/$package/coverage.out"
+
+	go test -test.short -covermode=count -coverprofile=$output $package
+	if [ -f "$output" ] ; then
+		cat "$output" | grep -v "$MODE" >> goverage.report
+	fi
+done

+ 18 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/script/travis_consul.sh

@@ -0,0 +1,18 @@
+#!/bin/bash
+
+if [  $# -gt 0 ] ; then
+    CONSUL_VERSION="$1"
+else
+    CONSUL_VERSION="0.5.2"
+fi
+
+# install consul
+wget "https://dl.bintray.com/mitchellh/consul/${CONSUL_VERSION}_linux_amd64.zip"
+unzip "${CONSUL_VERSION}_linux_amd64.zip"
+
+# make config for minimum ttl
+touch config.json
+echo "{\"session_ttl_min\": \"2s\"}" >> config.json
+
+# check
+./consul --version

+ 11 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/script/travis_etcd.sh

@@ -0,0 +1,11 @@
+#!/bin/bash
+
+if [  $# -gt 0 ] ; then
+    ETCD_VERSION="$1"
+else
+    ETCD_VERSION="2.0.11"
+fi
+
+curl -L https://github.com/coreos/etcd/releases/download/v$ETCD_VERSION/etcd-v$ETCD_VERSION-linux-amd64.tar.gz -o etcd-v$ETCD_VERSION-linux-amd64.tar.gz
+tar xzvf etcd-v$ETCD_VERSION-linux-amd64.tar.gz
+mv etcd-v$ETCD_VERSION-linux-amd64 etcd

+ 12 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/script/travis_zk.sh

@@ -0,0 +1,12 @@
+#!/bin/bash
+
+if [  $# -gt 0 ] ; then
+    ZK_VERSION="$1"
+else
+    ZK_VERSION="3.4.6"
+fi
+
+wget "http://mirrors.ukfast.co.uk/sites/ftp.apache.org/zookeeper/stable/zookeeper-${ZK_VERSION}.tar.gz"
+tar -xvf "zookeeper-${ZK_VERSION}.tar.gz"
+mv zookeeper-$ZK_VERSION zk
+mv ./zk/conf/zoo_sample.cfg ./zk/conf/zoo.cfg

+ 30 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/script/validate-gofmt

@@ -0,0 +1,30 @@
+#!/bin/bash
+
+source "$(dirname "$BASH_SOURCE")/.validate"
+
+IFS=$'\n'
+files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^Godeps/' || true) )
+unset IFS
+
+badFiles=()
+for f in "${files[@]}"; do
+	# we use "git show" here to validate that what's committed is formatted
+	if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then
+		badFiles+=( "$f" )
+	fi
+done
+
+if [ ${#badFiles[@]} -eq 0 ]; then
+	echo 'Congratulations!  All Go source files are properly formatted.'
+else
+	{
+		echo "These files are not properly gofmt'd:"
+		for f in "${badFiles[@]}"; do
+			echo " - $f"
+		done
+		echo
+		echo 'Please reformat the above files using "gofmt -s -w" and commit the result.'
+		echo
+	} >&2
+	false
+fi

+ 419 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/consul/consul.go

@@ -0,0 +1,419 @@
+package consul
+
+import (
+	"crypto/tls"
+	"net/http"
+	"strings"
+	"sync"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/libkv/store"
+	api "github.com/hashicorp/consul/api"
+)
+
+const (
+	// DefaultWatchWaitTime is how long we block for at a
+	// time to check if the watched key has changed. This
+	// affects the minimum time it takes to cancel a watch.
+	DefaultWatchWaitTime = 15 * time.Second
+)
+
+// Consul is the receiver type for the
+// Store interface
+type Consul struct {
+	sync.Mutex
+	config       *api.Config
+	client       *api.Client
+	ephemeralTTL time.Duration
+}
+
+type consulLock struct {
+	lock *api.Lock
+}
+
+// InitializeConsul creates a new Consul client given
+// a list of endpoints and optional tls config
+func InitializeConsul(endpoints []string, options *store.Config) (store.Store, error) {
+	s := &Consul{}
+
+	// Create Consul client
+	config := api.DefaultConfig()
+	s.config = config
+	config.HttpClient = http.DefaultClient
+	config.Address = endpoints[0]
+	config.Scheme = "http"
+
+	// Set options
+	if options != nil {
+		if options.TLS != nil {
+			s.setTLS(options.TLS)
+		}
+		if options.ConnectionTimeout != 0 {
+			s.setTimeout(options.ConnectionTimeout)
+		}
+		if options.EphemeralTTL != 0 {
+			s.setEphemeralTTL(options.EphemeralTTL)
+		}
+	}
+
+	// Creates a new client
+	client, err := api.NewClient(config)
+	if err != nil {
+		log.Errorf("Couldn't initialize consul client..")
+		return nil, err
+	}
+	s.client = client
+
+	return s, nil
+}
+
+// SetTLS sets Consul TLS options
+func (s *Consul) setTLS(tls *tls.Config) {
+	s.config.HttpClient.Transport = &http.Transport{
+		TLSClientConfig: tls,
+	}
+	s.config.Scheme = "https"
+}
+
+// SetTimeout sets the timout for connecting to Consul
+func (s *Consul) setTimeout(time time.Duration) {
+	s.config.WaitTime = time
+}
+
+// SetEphemeralTTL sets the ttl for ephemeral nodes
+func (s *Consul) setEphemeralTTL(ttl time.Duration) {
+	s.ephemeralTTL = ttl
+}
+
+// Normalize the key for usage in Consul
+func (s *Consul) normalize(key string) string {
+	key = store.Normalize(key)
+	return strings.TrimPrefix(key, "/")
+}
+
+func (s *Consul) refreshSession(pair *api.KVPair) error {
+	// Check if there is any previous session with an active TTL
+	session, err := s.getActiveSession(pair.Key)
+	if err != nil {
+		return err
+	}
+
+	if session == "" {
+		entry := &api.SessionEntry{
+			Behavior: api.SessionBehaviorDelete,
+			TTL:      s.ephemeralTTL.String(),
+		}
+
+		// Create the key session
+		session, _, err = s.client.Session().Create(entry, nil)
+		if err != nil {
+			return err
+		}
+	}
+
+	lockOpts := &api.LockOptions{
+		Key:     pair.Key,
+		Session: session,
+	}
+
+	// Lock and ignore if lock is held
+	// It's just a placeholder for the
+	// ephemeral behavior
+	lock, _ := s.client.LockOpts(lockOpts)
+	if lock != nil {
+		lock.Lock(nil)
+	}
+
+	_, _, err = s.client.Session().Renew(session, nil)
+	if err != nil {
+		return s.refreshSession(pair)
+	}
+	return nil
+}
+
+// getActiveSession checks if the key already has
+// a session attached
+func (s *Consul) getActiveSession(key string) (string, error) {
+	pair, _, err := s.client.KV().Get(key, nil)
+	if err != nil {
+		return "", err
+	}
+	if pair != nil && pair.Session != "" {
+		return pair.Session, nil
+	}
+	return "", nil
+}
+
+// Get the value at "key", returns the last modified index
+// to use in conjunction to CAS calls
+func (s *Consul) Get(key string) (*store.KVPair, error) {
+	options := &api.QueryOptions{
+		AllowStale:        false,
+		RequireConsistent: true,
+	}
+
+	pair, meta, err := s.client.KV().Get(s.normalize(key), options)
+	if err != nil {
+		return nil, err
+	}
+
+	// If pair is nil then the key does not exist
+	if pair == nil {
+		return nil, store.ErrKeyNotFound
+	}
+
+	return &store.KVPair{Key: pair.Key, Value: pair.Value, LastIndex: meta.LastIndex}, nil
+}
+
+// Put a value at "key"
+func (s *Consul) Put(key string, value []byte, opts *store.WriteOptions) error {
+	key = s.normalize(key)
+
+	p := &api.KVPair{
+		Key:   key,
+		Value: value,
+	}
+
+	if opts != nil && opts.Ephemeral {
+		// Create or refresh the session
+		err := s.refreshSession(p)
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err := s.client.KV().Put(p, nil)
+	return err
+}
+
+// Delete a value at "key"
+func (s *Consul) Delete(key string) error {
+	_, err := s.client.KV().Delete(s.normalize(key), nil)
+	return err
+}
+
+// Exists checks that the key exists inside the store
+func (s *Consul) Exists(key string) (bool, error) {
+	_, err := s.Get(key)
+	if err != nil && err == store.ErrKeyNotFound {
+		return false, err
+	}
+	return true, nil
+}
+
+// List child nodes of a given directory
+func (s *Consul) List(directory string) ([]*store.KVPair, error) {
+	pairs, _, err := s.client.KV().List(s.normalize(directory), nil)
+	if err != nil {
+		return nil, err
+	}
+	if len(pairs) == 0 {
+		return nil, store.ErrKeyNotFound
+	}
+
+	kv := []*store.KVPair{}
+
+	for _, pair := range pairs {
+		if pair.Key == directory {
+			continue
+		}
+		kv = append(kv, &store.KVPair{
+			Key:       pair.Key,
+			Value:     pair.Value,
+			LastIndex: pair.ModifyIndex,
+		})
+	}
+
+	return kv, nil
+}
+
+// DeleteTree deletes a range of keys under a given directory
+func (s *Consul) DeleteTree(directory string) error {
+	_, err := s.client.KV().DeleteTree(s.normalize(directory), nil)
+	return err
+}
+
+// Watch for changes on a "key"
+// It returns a channel that will receive changes or pass
+// on errors. Upon creation, the current value will first
+// be sent to the channel. Providing a non-nil stopCh can
+// be used to stop watching.
+func (s *Consul) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
+	kv := s.client.KV()
+	watchCh := make(chan *store.KVPair)
+
+	go func() {
+		defer close(watchCh)
+
+		// Use a wait time in order to check if we should quit
+		// from time to time.
+		opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
+
+		for {
+			// Check if we should quit
+			select {
+			case <-stopCh:
+				return
+			default:
+			}
+
+			// Get the key
+			pair, meta, err := kv.Get(key, opts)
+			if err != nil {
+				return
+			}
+
+			// If LastIndex didn't change then it means `Get` returned
+			// because of the WaitTime and the key didn't changed.
+			if opts.WaitIndex == meta.LastIndex {
+				continue
+			}
+			opts.WaitIndex = meta.LastIndex
+
+			// Return the value to the channel
+			// FIXME: What happens when a key is deleted?
+			if pair != nil {
+				watchCh <- &store.KVPair{
+					Key:       pair.Key,
+					Value:     pair.Value,
+					LastIndex: pair.ModifyIndex,
+				}
+			}
+		}
+	}()
+
+	return watchCh, nil
+}
+
+// WatchTree watches for changes on a "directory"
+// It returns a channel that will receive changes or pass
+// on errors. Upon creating a watch, the current childs values
+// will be sent to the channel .Providing a non-nil stopCh can
+// be used to stop watching.
+func (s *Consul) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
+	kv := s.client.KV()
+	watchCh := make(chan []*store.KVPair)
+
+	go func() {
+		defer close(watchCh)
+
+		// Use a wait time in order to check if we should quit
+		// from time to time.
+		opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
+		for {
+			// Check if we should quit
+			select {
+			case <-stopCh:
+				return
+			default:
+			}
+
+			// Get all the childrens
+			pairs, meta, err := kv.List(directory, opts)
+			if err != nil {
+				log.Errorf("consul: %v", err)
+				return
+			}
+
+			// If LastIndex didn't change then it means `Get` returned
+			// because of the WaitTime and the child keys didn't change.
+			if opts.WaitIndex == meta.LastIndex {
+				continue
+			}
+			opts.WaitIndex = meta.LastIndex
+
+			// Return children KV pairs to the channel
+			kv := []*store.KVPair{}
+			for _, pair := range pairs {
+				if pair.Key == directory {
+					continue
+				}
+				kv = append(kv, &store.KVPair{
+					Key:       pair.Key,
+					Value:     pair.Value,
+					LastIndex: pair.ModifyIndex,
+				})
+			}
+			watchCh <- kv
+		}
+	}()
+
+	return watchCh, nil
+}
+
+// NewLock returns a handle to a lock struct which can
+// be used to provide mutual exclusion on a key
+func (s *Consul) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
+	consulOpts := &api.LockOptions{
+		Key: s.normalize(key),
+	}
+
+	if options != nil {
+		consulOpts.Value = options.Value
+	}
+
+	l, err := s.client.LockOpts(consulOpts)
+	if err != nil {
+		return nil, err
+	}
+
+	return &consulLock{lock: l}, nil
+}
+
+// Lock attempts to acquire the lock and blocks while
+// doing so. It returns a channel that is closed if our
+// lock is lost or if an error occurs
+func (l *consulLock) Lock() (<-chan struct{}, error) {
+	return l.lock.Lock(nil)
+}
+
+// Unlock the "key". Calling unlock while
+// not holding the lock will throw an error
+func (l *consulLock) Unlock() error {
+	return l.lock.Unlock()
+}
+
+// AtomicPut put a value at "key" if the key has not been
+// modified in the meantime, throws an error if this is the case
+func (s *Consul) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) {
+	if previous == nil {
+		return false, nil, store.ErrPreviousNotSpecified
+	}
+
+	p := &api.KVPair{Key: s.normalize(key), Value: value, ModifyIndex: previous.LastIndex}
+	if work, _, err := s.client.KV().CAS(p, nil); err != nil {
+		return false, nil, err
+	} else if !work {
+		return false, nil, store.ErrKeyModified
+	}
+
+	pair, err := s.Get(key)
+	if err != nil {
+		return false, nil, err
+	}
+
+	return true, pair, nil
+}
+
+// AtomicDelete deletes a value at "key" if the key has not
+// been modified in the meantime, throws an error if this is the case
+func (s *Consul) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
+	if previous == nil {
+		return false, store.ErrPreviousNotSpecified
+	}
+
+	p := &api.KVPair{Key: s.normalize(key), ModifyIndex: previous.LastIndex}
+	if work, _, err := s.client.KV().DeleteCAS(p, nil); err != nil {
+		return false, err
+	} else if !work {
+		return false, store.ErrKeyModified
+	}
+
+	return true, nil
+}
+
+// Close closes the client connection
+func (s *Consul) Close() {
+	return
+}

+ 61 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/consul/consul_test.go

@@ -0,0 +1,61 @@
+package consul
+
+import (
+	"testing"
+	"time"
+
+	"github.com/docker/libkv/store"
+	"github.com/stretchr/testify/assert"
+)
+
+func makeConsulClient(t *testing.T) store.Store {
+	client := "localhost:8500"
+
+	kv, err := InitializeConsul(
+		[]string{client},
+		&store.Config{
+			ConnectionTimeout: 3 * time.Second,
+			EphemeralTTL:      2 * time.Second,
+		},
+	)
+
+	if err != nil {
+		t.Fatalf("cannot create store: %v", err)
+	}
+
+	return kv
+}
+
+func TestConsulStore(t *testing.T) {
+	kv := makeConsulClient(t)
+	backup := makeConsulClient(t)
+
+	store.TestStore(t, kv, backup)
+}
+
+func TestGetActiveSession(t *testing.T) {
+	kv := makeConsulClient(t)
+
+	consul := kv.(*Consul)
+
+	key := "foo"
+	value := []byte("bar")
+
+	// Put the first key with the Ephemeral flag
+	err := kv.Put(key, value, &store.WriteOptions{Ephemeral: true})
+	assert.NoError(t, err)
+
+	// Session should not be empty
+	session, err := consul.getActiveSession(key)
+	assert.NoError(t, err)
+	assert.NotEqual(t, session, "")
+
+	// Delete the key
+	err = kv.Delete(key)
+	assert.NoError(t, err)
+
+	// Check the session again, it should return nothing
+	session, err = consul.getActiveSession(key)
+	assert.NoError(t, err)
+	assert.Equal(t, session, "")
+}

+ 478 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/etcd/etcd.go

@@ -0,0 +1,478 @@
+package etcd
+
+import (
+	"crypto/tls"
+	"net"
+	"net/http"
+	"strings"
+	"time"
+
+	etcd "github.com/coreos/go-etcd/etcd"
+	"github.com/docker/libkv/store"
+)
+
+// Etcd is the receiver type for the
+// Store interface
+type Etcd struct {
+	client       *etcd.Client
+	ephemeralTTL time.Duration
+}
+
+type etcdLock struct {
+	client   *etcd.Client
+	stopLock chan struct{}
+	key      string
+	value    string
+	last     *etcd.Response
+	ttl      uint64
+}
+
+const (
+	periodicSync      = 10 * time.Minute
+	defaultLockTTL    = 20 * time.Second
+	defaultUpdateTime = 5 * time.Second
+)
+
+// InitializeEtcd creates a new Etcd client given
+// a list of endpoints and an optional tls config
+func InitializeEtcd(addrs []string, options *store.Config) (store.Store, error) {
+	s := &Etcd{}
+
+	entries := store.CreateEndpoints(addrs, "http")
+	s.client = etcd.NewClient(entries)
+
+	// Set options
+	if options != nil {
+		if options.TLS != nil {
+			s.setTLS(options.TLS)
+		}
+		if options.ConnectionTimeout != 0 {
+			s.setTimeout(options.ConnectionTimeout)
+		}
+		if options.EphemeralTTL != 0 {
+			s.setEphemeralTTL(options.EphemeralTTL)
+		}
+	}
+
+	// Periodic SyncCluster
+	go func() {
+		for {
+			s.client.SyncCluster()
+			time.Sleep(periodicSync)
+		}
+	}()
+
+	return s, nil
+}
+
+// SetTLS sets the tls configuration given the path
+// of certificate files
+func (s *Etcd) setTLS(tls *tls.Config) {
+	// Change to https scheme
+	var addrs []string
+	entries := s.client.GetCluster()
+	for _, entry := range entries {
+		addrs = append(addrs, strings.Replace(entry, "http", "https", -1))
+	}
+	s.client.SetCluster(addrs)
+
+	// Set transport
+	t := http.Transport{
+		Dial: (&net.Dialer{
+			Timeout:   30 * time.Second,
+			KeepAlive: 30 * time.Second,
+		}).Dial,
+		TLSHandshakeTimeout: 10 * time.Second,
+		TLSClientConfig:     tls,
+	}
+	s.client.SetTransport(&t)
+}
+
+// setTimeout sets the timeout used for connecting to the store
+func (s *Etcd) setTimeout(time time.Duration) {
+	s.client.SetDialTimeout(time)
+}
+
+// setEphemeralHeartbeat sets the heartbeat value to notify
+// that a node is alive
+func (s *Etcd) setEphemeralTTL(time time.Duration) {
+	s.ephemeralTTL = time
+}
+
+// createDirectory creates the entire path for a directory
+// that does not exist
+func (s *Etcd) createDirectory(path string) error {
+	if _, err := s.client.CreateDir(store.Normalize(path), 10); err != nil {
+		if etcdError, ok := err.(*etcd.EtcdError); ok {
+			// Skip key already exists
+			if etcdError.ErrorCode != 105 {
+				return err
+			}
+		} else {
+			return err
+		}
+	}
+	return nil
+}
+
+// Get the value at "key", returns the last modified index
+// to use in conjunction to Atomic calls
+func (s *Etcd) Get(key string) (pair *store.KVPair, err error) {
+	result, err := s.client.Get(store.Normalize(key), false, false)
+	if err != nil {
+		if etcdError, ok := err.(*etcd.EtcdError); ok {
+			// Not a Directory or Not a file
+			if etcdError.ErrorCode == 102 || etcdError.ErrorCode == 104 {
+				return nil, store.ErrKeyNotFound
+			}
+		}
+		return nil, err
+	}
+
+	pair = &store.KVPair{
+		Key:       key,
+		Value:     []byte(result.Node.Value),
+		LastIndex: result.Node.ModifiedIndex,
+	}
+
+	return pair, nil
+}
+
+// Put a value at "key"
+func (s *Etcd) Put(key string, value []byte, opts *store.WriteOptions) error {
+
+	// Default TTL = 0 means no expiration
+	var ttl uint64
+	if opts != nil && opts.Ephemeral {
+		ttl = uint64(s.ephemeralTTL.Seconds())
+	}
+
+	if _, err := s.client.Set(key, string(value), ttl); err != nil {
+		if etcdError, ok := err.(*etcd.EtcdError); ok {
+
+			// Not a directory
+			if etcdError.ErrorCode == 104 {
+				// Remove the last element (the actual key)
+				// and create the full directory path
+				err = s.createDirectory(store.GetDirectory(key))
+				if err != nil {
+					return err
+				}
+
+				// Now that the directory is created, set the key
+				if _, err := s.client.Set(key, string(value), ttl); err != nil {
+					return err
+				}
+			}
+		}
+		return err
+	}
+	return nil
+}
+
+// Delete a value at "key"
+func (s *Etcd) Delete(key string) error {
+	_, err := s.client.Delete(store.Normalize(key), false)
+	return err
+}
+
+// Exists checks if the key exists inside the store
+func (s *Etcd) Exists(key string) (bool, error) {
+	entry, err := s.Get(key)
+	if err != nil && entry != nil {
+		if err == store.ErrKeyNotFound || entry.Value == nil {
+			return false, nil
+		}
+		return false, err
+	}
+	return true, nil
+}
+
+// Watch for changes on a "key"
+// It returns a channel that will receive changes or pass
+// on errors. Upon creation, the current value will first
+// be sent to the channel. Providing a non-nil stopCh can
+// be used to stop watching.
+func (s *Etcd) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
+	// Get the current value
+	current, err := s.Get(key)
+	if err != nil {
+		return nil, err
+	}
+
+	// Start an etcd watch.
+	// Note: etcd will send the current value through the channel.
+	etcdWatchCh := make(chan *etcd.Response)
+	etcdStopCh := make(chan bool)
+	go s.client.Watch(store.Normalize(key), 0, false, etcdWatchCh, etcdStopCh)
+
+	// Adapter goroutine: The goal here is to convert whatever
+	// format etcd is using into our interface.
+	watchCh := make(chan *store.KVPair)
+	go func() {
+		defer close(watchCh)
+
+		// Push the current value through the channel.
+		watchCh <- current
+
+		for {
+			select {
+			case result := <-etcdWatchCh:
+				watchCh <- &store.KVPair{
+					Key:       key,
+					Value:     []byte(result.Node.Value),
+					LastIndex: result.Node.ModifiedIndex,
+				}
+			case <-stopCh:
+				etcdStopCh <- true
+				return
+			}
+		}
+	}()
+	return watchCh, nil
+}
+
+// WatchTree watches for changes on a "directory"
+// It returns a channel that will receive changes or pass
+// on errors. Upon creating a watch, the current childs values
+// will be sent to the channel .Providing a non-nil stopCh can
+// be used to stop watching.
+func (s *Etcd) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
+	// Get child values
+	current, err := s.List(directory)
+	if err != nil {
+		return nil, err
+	}
+
+	// Start the watch
+	etcdWatchCh := make(chan *etcd.Response)
+	etcdStopCh := make(chan bool)
+	go s.client.Watch(store.Normalize(directory), 0, true, etcdWatchCh, etcdStopCh)
+
+	// Adapter goroutine: The goal here is to convert whatever
+	// format etcd is using into our interface.
+	watchCh := make(chan []*store.KVPair)
+	go func() {
+		defer close(watchCh)
+
+		// Push the current value through the channel.
+		watchCh <- current
+
+		for {
+			select {
+			case <-etcdWatchCh:
+				// FIXME: We should probably use the value pushed by the channel.
+				// However, Node.Nodes seems to be empty.
+				if list, err := s.List(directory); err == nil {
+					watchCh <- list
+				}
+			case <-stopCh:
+				etcdStopCh <- true
+				return
+			}
+		}
+	}()
+	return watchCh, nil
+}
+
+// AtomicPut put a value at "key" if the key has not been
+// modified in the meantime, throws an error if this is the case
+func (s *Etcd) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) {
+	if previous == nil {
+		return false, nil, store.ErrPreviousNotSpecified
+	}
+
+	meta, err := s.client.CompareAndSwap(store.Normalize(key), string(value), 0, "", previous.LastIndex)
+	if err != nil {
+		if etcdError, ok := err.(*etcd.EtcdError); ok {
+			// Compare Failed
+			if etcdError.ErrorCode == 101 {
+				return false, nil, store.ErrKeyModified
+			}
+		}
+		return false, nil, err
+	}
+
+	updated := &store.KVPair{
+		Key:       key,
+		Value:     value,
+		LastIndex: meta.Node.ModifiedIndex,
+	}
+
+	return true, updated, nil
+}
+
+// AtomicDelete deletes a value at "key" if the key
+// has not been modified in the meantime, throws an
+// error if this is the case
+func (s *Etcd) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
+	if previous == nil {
+		return false, store.ErrPreviousNotSpecified
+	}
+
+	_, err := s.client.CompareAndDelete(store.Normalize(key), "", previous.LastIndex)
+	if err != nil {
+		if etcdError, ok := err.(*etcd.EtcdError); ok {
+			// Compare failed
+			if etcdError.ErrorCode == 101 {
+				return false, store.ErrKeyModified
+			}
+		}
+		return false, err
+	}
+
+	return true, nil
+}
+
+// List child nodes of a given directory
+func (s *Etcd) List(directory string) ([]*store.KVPair, error) {
+	resp, err := s.client.Get(store.Normalize(directory), true, true)
+	if err != nil {
+		return nil, err
+	}
+	kv := []*store.KVPair{}
+	for _, n := range resp.Node.Nodes {
+		key := strings.TrimLeft(n.Key, "/")
+		kv = append(kv, &store.KVPair{
+			Key:       key,
+			Value:     []byte(n.Value),
+			LastIndex: n.ModifiedIndex,
+		})
+	}
+	return kv, nil
+}
+
+// DeleteTree deletes a range of keys under a given directory
+func (s *Etcd) DeleteTree(directory string) error {
+	_, err := s.client.Delete(store.Normalize(directory), true)
+	return err
+}
+
+// NewLock returns a handle to a lock struct which can
+// be used to provide mutual exclusion on a key
+func (s *Etcd) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
+	var value string
+	ttl := uint64(time.Duration(defaultLockTTL).Seconds())
+
+	// Apply options on Lock
+	if options != nil {
+		if options.Value != nil {
+			value = string(options.Value)
+		}
+		if options.TTL != 0 {
+			ttl = uint64(options.TTL.Seconds())
+		}
+	}
+
+	// Create lock object
+	lock = &etcdLock{
+		client: s.client,
+		key:    key,
+		value:  value,
+		ttl:    ttl,
+	}
+
+	return lock, nil
+}
+
+// Lock attempts to acquire the lock and blocks while
+// doing so. It returns a channel that is closed if our
+// lock is lost or if an error occurs
+func (l *etcdLock) Lock() (<-chan struct{}, error) {
+
+	key := store.Normalize(l.key)
+
+	// Lock holder channels
+	lockHeld := make(chan struct{})
+	stopLocking := make(chan struct{})
+
+	var lastIndex uint64
+
+	for {
+		resp, err := l.client.Create(key, l.value, l.ttl)
+		if err != nil {
+			if etcdError, ok := err.(*etcd.EtcdError); ok {
+				// Key already exists
+				if etcdError.ErrorCode != 105 {
+					lastIndex = ^uint64(0)
+				}
+			}
+		} else {
+			lastIndex = resp.Node.ModifiedIndex
+		}
+
+		_, err = l.client.CompareAndSwap(key, l.value, l.ttl, "", lastIndex)
+
+		if err == nil {
+			// Leader section
+			l.stopLock = stopLocking
+			go l.holdLock(key, lockHeld, stopLocking)
+			break
+		} else {
+			// Seeker section
+			chW := make(chan *etcd.Response)
+			chWStop := make(chan bool)
+			l.waitLock(key, chW, chWStop)
+
+			// Delete or Expire event occured
+			// Retry
+		}
+	}
+
+	return lockHeld, nil
+}
+
+// Hold the lock as long as we can
+// Updates the key ttl periodically until we receive
+// an explicit stop signal from the Unlock method
+func (l *etcdLock) holdLock(key string, lockHeld chan struct{}, stopLocking chan struct{}) {
+	defer close(lockHeld)
+
+	update := time.NewTicker(defaultUpdateTime)
+	defer update.Stop()
+
+	var err error
+
+	for {
+		select {
+		case <-update.C:
+			l.last, err = l.client.Update(key, l.value, l.ttl)
+			if err != nil {
+				return
+			}
+
+		case <-stopLocking:
+			return
+		}
+	}
+}
+
+// WaitLock simply waits for the key to be available for creation
+func (l *etcdLock) waitLock(key string, eventCh chan *etcd.Response, stopWatchCh chan bool) {
+	go l.client.Watch(key, 0, false, eventCh, stopWatchCh)
+	for event := range eventCh {
+		if event.Action == "delete" || event.Action == "expire" {
+			return
+		}
+	}
+}
+
+// Unlock the "key". Calling unlock while
+// not holding the lock will throw an error
+func (l *etcdLock) Unlock() error {
+	if l.stopLock != nil {
+		l.stopLock <- struct{}{}
+	}
+	if l.last != nil {
+		_, err := l.client.CompareAndDelete(store.Normalize(l.key), l.value, l.last.Node.ModifiedIndex)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Close closes the client connection
+func (s *Etcd) Close() {
+	return
+}

+ 33 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/etcd/etcd_test.go

@@ -0,0 +1,33 @@
+package etcd
+
+import (
+	"testing"
+	"time"
+
+	"github.com/docker/libkv/store"
+)
+
+func makeEtcdClient(t *testing.T) store.Store {
+	client := "localhost:4001"
+
+	kv, err := InitializeEtcd(
+		[]string{client},
+		&store.Config{
+			ConnectionTimeout: 3 * time.Second,
+			EphemeralTTL:      2 * time.Second,
+		},
+	)
+
+	if err != nil {
+		t.Fatalf("cannot create store: %v", err)
+	}
+
+	return kv
+}
+
+func TestEtcdStore(t *testing.T) {
+	kv := makeEtcdClient(t)
+	backup := makeEtcdClient(t)
+
+	store.TestStore(t, kv, backup)
+}

+ 47 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/helpers.go

@@ -0,0 +1,47 @@
+package store
+
+import (
+	"strings"
+)
+
+// CreateEndpoints creates a list of endpoints given the right scheme
+func CreateEndpoints(addrs []string, scheme string) (entries []string) {
+	for _, addr := range addrs {
+		entries = append(entries, scheme+"://"+addr)
+	}
+	return entries
+}
+
+// Normalize the key for each store to the form:
+//
+//     /path/to/key
+//
+func Normalize(key string) string {
+	return "/" + join(SplitKey(key))
+}
+
+// GetDirectory gets the full directory part of
+// the key to the form:
+//
+//     /path/to/
+//
+func GetDirectory(key string) string {
+	parts := SplitKey(key)
+	parts = parts[:len(parts)-1]
+	return "/" + join(parts)
+}
+
+// SplitKey splits the key to extract path informations
+func SplitKey(key string) (path []string) {
+	if strings.Contains(key, "/") {
+		path = strings.Split(key, "/")
+	} else {
+		path = []string{key}
+	}
+	return path
+}
+
+// join the path parts with '/'
+func join(parts []string) string {
+	return strings.Join(parts, "/")
+}

+ 113 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/mock/mock.go

@@ -0,0 +1,113 @@
+package mock
+
+import (
+	"github.com/docker/libkv/store"
+	"github.com/stretchr/testify/mock"
+)
+
+// Mock store. Mocks all Store functions using testify.Mock
+type Mock struct {
+	mock.Mock
+
+	// Endpoints passed to InitializeMock
+	Endpoints []string
+
+	// Options passed to InitializeMock
+	Options *store.Config
+}
+
+// InitializeMock creates a Mock store.
+func InitializeMock(endpoints []string, options *store.Config) (store.Store, error) {
+	s := &Mock{}
+	s.Endpoints = endpoints
+	s.Options = options
+	return s, nil
+}
+
+// Put mock
+func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error {
+	args := s.Mock.Called(key, value, opts)
+	return args.Error(0)
+}
+
+// Get mock
+func (s *Mock) Get(key string) (*store.KVPair, error) {
+	args := s.Mock.Called(key)
+	return args.Get(0).(*store.KVPair), args.Error(1)
+}
+
+// Delete mock
+func (s *Mock) Delete(key string) error {
+	args := s.Mock.Called(key)
+	return args.Error(0)
+}
+
+// Exists mock
+func (s *Mock) Exists(key string) (bool, error) {
+	args := s.Mock.Called(key)
+	return args.Bool(0), args.Error(1)
+}
+
+// Watch mock
+func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
+	args := s.Mock.Called(key, stopCh)
+	return args.Get(0).(<-chan *store.KVPair), args.Error(1)
+}
+
+// WatchTree mock
+func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
+	args := s.Mock.Called(prefix, stopCh)
+	return args.Get(0).(chan []*store.KVPair), args.Error(1)
+}
+
+// NewLock mock
+func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
+	args := s.Mock.Called(key, options)
+	return args.Get(0).(store.Locker), args.Error(1)
+}
+
+// List mock
+func (s *Mock) List(prefix string) ([]*store.KVPair, error) {
+	args := s.Mock.Called(prefix)
+	return args.Get(0).([]*store.KVPair), args.Error(1)
+}
+
+// DeleteTree mock
+func (s *Mock) DeleteTree(prefix string) error {
+	args := s.Mock.Called(prefix)
+	return args.Error(0)
+}
+
+// AtomicPut mock
+func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) {
+	args := s.Mock.Called(key, value, previous, opts)
+	return args.Bool(0), args.Get(1).(*store.KVPair), args.Error(2)
+}
+
+// AtomicDelete mock
+func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
+	args := s.Mock.Called(key, previous)
+	return args.Bool(0), args.Error(1)
+}
+
+// Lock mock implementation of Locker
+type Lock struct {
+	mock.Mock
+}
+
+// Lock mock
+func (l *Lock) Lock() (<-chan struct{}, error) {
+	args := l.Mock.Called()
+	return args.Get(0).(<-chan struct{}), args.Error(1)
+}
+
+// Unlock mock
+func (l *Lock) Unlock() error {
+	args := l.Mock.Called()
+	return args.Error(0)
+}
+
+// Close mock
+func (s *Mock) Close() {
+	return
+}

+ 394 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/store-test.go

@@ -0,0 +1,394 @@
+package store
+
+import (
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+)
+
+// TestStore is an helper testing method that is
+// called by each K/V backend sub-package testing
+func TestStore(t *testing.T, kv Store, backup Store) {
+	testPutGetDelete(t, kv)
+	testWatch(t, kv)
+	testWatchTree(t, kv)
+	testAtomicPut(t, kv)
+	testAtomicDelete(t, kv)
+	testLockUnlock(t, kv)
+	testPutEphemeral(t, kv, backup)
+	testList(t, kv)
+	testDeleteTree(t, kv)
+}
+
+func testPutGetDelete(t *testing.T, kv Store) {
+	key := "foo"
+	value := []byte("bar")
+
+	// Put the key
+	err := kv.Put(key, value, nil)
+	assert.NoError(t, err)
+
+	// Get should return the value and an incremented index
+	pair, err := kv.Get(key)
+	assert.NoError(t, err)
+	if assert.NotNil(t, pair) {
+		assert.NotNil(t, pair.Value)
+	}
+	assert.Equal(t, pair.Value, value)
+	assert.NotEqual(t, pair.LastIndex, 0)
+
+	// Delete the key
+	err = kv.Delete(key)
+	assert.NoError(t, err)
+
+	// Get should fail
+	pair, err = kv.Get(key)
+	assert.Error(t, err)
+	assert.Nil(t, pair)
+}
+
+func testWatch(t *testing.T, kv Store) {
+	key := "hello"
+	value := []byte("world")
+	newValue := []byte("world!")
+
+	// Put the key
+	err := kv.Put(key, value, nil)
+	assert.NoError(t, err)
+
+	stopCh := make(<-chan struct{})
+	events, err := kv.Watch(key, stopCh)
+	assert.NoError(t, err)
+	assert.NotNil(t, events)
+
+	// Update loop
+	go func() {
+		timeout := time.After(1 * time.Second)
+		tick := time.Tick(250 * time.Millisecond)
+		for {
+			select {
+			case <-timeout:
+				return
+			case <-tick:
+				err := kv.Put(key, newValue, nil)
+				if assert.NoError(t, err) {
+					continue
+				}
+				return
+			}
+		}
+	}()
+
+	// Check for updates
+	eventCount := 1
+	for {
+		select {
+		case event := <-events:
+			assert.NotNil(t, event)
+			if eventCount == 1 {
+				assert.Equal(t, event.Key, key)
+				assert.Equal(t, event.Value, value)
+			} else {
+				assert.Equal(t, event.Key, key)
+				assert.Equal(t, event.Value, newValue)
+			}
+			eventCount++
+			// We received all the events we wanted to check
+			if eventCount >= 4 {
+				return
+			}
+		case <-time.After(4 * time.Second):
+			t.Fatal("Timeout reached")
+			return
+		}
+	}
+}
+
+func testWatchTree(t *testing.T, kv Store) {
+	dir := "tree"
+
+	node1 := "tree/node1"
+	value1 := []byte("node1")
+
+	node2 := "tree/node2"
+	value2 := []byte("node2")
+
+	node3 := "tree/node3"
+	value3 := []byte("node3")
+
+	err := kv.Put(node1, value1, nil)
+	assert.NoError(t, err)
+	err = kv.Put(node2, value2, nil)
+	assert.NoError(t, err)
+	err = kv.Put(node3, value3, nil)
+	assert.NoError(t, err)
+
+	stopCh := make(<-chan struct{})
+	events, err := kv.WatchTree(dir, stopCh)
+	assert.NoError(t, err)
+	assert.NotNil(t, events)
+
+	// Update loop
+	go func() {
+		timeout := time.After(250 * time.Millisecond)
+		for {
+			select {
+			case <-timeout:
+				err := kv.Delete(node3)
+				assert.NoError(t, err)
+				return
+			}
+		}
+	}()
+
+	// Check for updates
+	for {
+		select {
+		case event := <-events:
+			assert.NotNil(t, event)
+			// We received the Delete event on a child node
+			// Exit test successfully
+			if len(event) == 2 {
+				return
+			}
+		case <-time.After(4 * time.Second):
+			t.Fatal("Timeout reached")
+			return
+		}
+	}
+}
+
+func testAtomicPut(t *testing.T, kv Store) {
+	key := "hello"
+	value := []byte("world")
+
+	// Put the key
+	err := kv.Put(key, value, nil)
+	assert.NoError(t, err)
+
+	// Get should return the value and an incremented index
+	pair, err := kv.Get(key)
+	assert.NoError(t, err)
+	if assert.NotNil(t, pair) {
+		assert.NotNil(t, pair.Value)
+	}
+	assert.Equal(t, pair.Value, value)
+	assert.NotEqual(t, pair.LastIndex, 0)
+
+	// This CAS should fail: no previous
+	success, _, err := kv.AtomicPut("hello", []byte("WORLD"), nil, nil)
+	assert.Error(t, err)
+	assert.False(t, success)
+
+	// This CAS should succeed
+	success, _, err = kv.AtomicPut("hello", []byte("WORLD"), pair, nil)
+	assert.NoError(t, err)
+	assert.True(t, success)
+
+	// This CAS should fail
+	pair.LastIndex = 0
+	success, _, err = kv.AtomicPut("hello", []byte("WORLDWORLD"), pair, nil)
+	assert.Error(t, err)
+	assert.False(t, success)
+}
+
+func testAtomicDelete(t *testing.T, kv Store) {
+	key := "atomic"
+	value := []byte("world")
+
+	// Put the key
+	err := kv.Put(key, value, nil)
+	assert.NoError(t, err)
+
+	// Get should return the value and an incremented index
+	pair, err := kv.Get(key)
+	assert.NoError(t, err)
+	if assert.NotNil(t, pair) {
+		assert.NotNil(t, pair.Value)
+	}
+	assert.Equal(t, pair.Value, value)
+	assert.NotEqual(t, pair.LastIndex, 0)
+
+	tempIndex := pair.LastIndex
+
+	// AtomicDelete should fail
+	pair.LastIndex = 0
+	success, err := kv.AtomicDelete(key, pair)
+	assert.Error(t, err)
+	assert.False(t, success)
+
+	// AtomicDelete should succeed
+	pair.LastIndex = tempIndex
+	success, err = kv.AtomicDelete(key, pair)
+	assert.NoError(t, err)
+	assert.True(t, success)
+}
+
+func testLockUnlock(t *testing.T, kv Store) {
+	key := "foo"
+	value := []byte("bar")
+
+	// We should be able to create a new lock on key
+	lock, err := kv.NewLock(key, &LockOptions{Value: value})
+	assert.NoError(t, err)
+	assert.NotNil(t, lock)
+
+	// Lock should successfully succeed or block
+	lockChan, err := lock.Lock()
+	assert.NoError(t, err)
+	assert.NotNil(t, lockChan)
+
+	// Get should work
+	pair, err := kv.Get(key)
+	assert.NoError(t, err)
+	if assert.NotNil(t, pair) {
+		assert.NotNil(t, pair.Value)
+	}
+	assert.Equal(t, pair.Value, value)
+	assert.NotEqual(t, pair.LastIndex, 0)
+
+	// Unlock should succeed
+	err = lock.Unlock()
+	assert.NoError(t, err)
+
+	// Get should work
+	pair, err = kv.Get(key)
+	assert.NoError(t, err)
+	if assert.NotNil(t, pair) {
+		assert.NotNil(t, pair.Value)
+	}
+	assert.Equal(t, pair.Value, value)
+	assert.NotEqual(t, pair.LastIndex, 0)
+}
+
+func testPutEphemeral(t *testing.T, kv Store, otherConn Store) {
+	firstKey := "first"
+	firstValue := []byte("foo")
+
+	secondKey := "second"
+	secondValue := []byte("bar")
+
+	// Put the first key with the Ephemeral flag
+	err := otherConn.Put(firstKey, firstValue, &WriteOptions{Ephemeral: true})
+	assert.NoError(t, err)
+
+	// Put a second key with the Ephemeral flag
+	err = otherConn.Put(secondKey, secondValue, &WriteOptions{Ephemeral: true})
+	assert.NoError(t, err)
+
+	// Get on firstKey should work
+	pair, err := kv.Get(firstKey)
+	assert.NoError(t, err)
+	assert.NotNil(t, pair)
+
+	// Get on secondKey should work
+	pair, err = kv.Get(secondKey)
+	assert.NoError(t, err)
+	assert.NotNil(t, pair)
+
+	// Close the connection
+	otherConn.Close()
+
+	// Let the session expire
+	time.Sleep(5 * time.Second)
+
+	// Get on firstKey shouldn't work
+	pair, err = kv.Get(firstKey)
+	assert.Error(t, err)
+	assert.Nil(t, pair)
+
+	// Get on secondKey shouldn't work
+	pair, err = kv.Get(secondKey)
+	assert.Error(t, err)
+	assert.Nil(t, pair)
+}
+
+func testList(t *testing.T, kv Store) {
+	prefix := "nodes"
+
+	firstKey := "nodes/first"
+	firstValue := []byte("first")
+
+	secondKey := "nodes/second"
+	secondValue := []byte("second")
+
+	// Put the first key
+	err := kv.Put(firstKey, firstValue, nil)
+	assert.NoError(t, err)
+
+	// Put the second key
+	err = kv.Put(secondKey, secondValue, nil)
+	assert.NoError(t, err)
+
+	// List should work and return the two correct values
+	pairs, err := kv.List(prefix)
+	assert.NoError(t, err)
+	if assert.NotNil(t, pairs) {
+		assert.Equal(t, len(pairs), 2)
+	}
+
+	// Check pairs, those are not necessarily in Put order
+	for _, pair := range pairs {
+		if pair.Key == firstKey {
+			assert.Equal(t, pair.Value, firstValue)
+		}
+		if pair.Key == secondKey {
+			assert.Equal(t, pair.Value, secondValue)
+		}
+	}
+
+	// List should fail: the key does not exist
+	pairs, err = kv.List("idontexist")
+	assert.Error(t, err)
+	assert.Nil(t, pairs)
+}
+
+func testDeleteTree(t *testing.T, kv Store) {
+	prefix := "nodes"
+
+	firstKey := "nodes/first"
+	firstValue := []byte("first")
+
+	secondKey := "nodes/second"
+	secondValue := []byte("second")
+
+	// Put the first key
+	err := kv.Put(firstKey, firstValue, nil)
+	assert.NoError(t, err)
+
+	// Put the second key
+	err = kv.Put(secondKey, secondValue, nil)
+	assert.NoError(t, err)
+
+	// Get should work on the first Key
+	pair, err := kv.Get(firstKey)
+	assert.NoError(t, err)
+	if assert.NotNil(t, pair) {
+		assert.NotNil(t, pair.Value)
+	}
+	assert.Equal(t, pair.Value, firstValue)
+	assert.NotEqual(t, pair.LastIndex, 0)
+
+	// Get should work on the second Key
+	pair, err = kv.Get(secondKey)
+	assert.NoError(t, err)
+	if assert.NotNil(t, pair) {
+		assert.NotNil(t, pair.Value)
+	}
+	assert.Equal(t, pair.Value, secondValue)
+	assert.NotEqual(t, pair.LastIndex, 0)
+
+	// Delete Values under directory `nodes`
+	err = kv.DeleteTree(prefix)
+	assert.NoError(t, err)
+
+	// Get should fail on both keys
+	pair, err = kv.Get(firstKey)
+	assert.Error(t, err)
+	assert.Nil(t, pair)
+
+	pair, err = kv.Get(secondKey)
+	assert.Error(t, err)
+	assert.Nil(t, pair)
+}

+ 120 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/store.go

@@ -0,0 +1,120 @@
+package store
+
+import (
+	"crypto/tls"
+	"errors"
+	"time"
+)
+
+// Backend represents a KV Store Backend
+type Backend string
+
+const (
+	// MOCK backend
+	MOCK Backend = "mock"
+	// CONSUL backend
+	CONSUL = "consul"
+	// ETCD backend
+	ETCD = "etcd"
+	// ZK backend
+	ZK = "zk"
+)
+
+var (
+	// ErrNotSupported is thrown when the backend k/v store is not supported by libkv
+	ErrNotSupported = errors.New("Backend storage not supported yet, please choose another one")
+	// ErrNotImplemented is thrown when a method is not implemented by the current backend
+	ErrNotImplemented = errors.New("Call not implemented in current backend")
+	// ErrNotReachable is thrown when the API cannot be reached for issuing common store operations
+	ErrNotReachable = errors.New("Api not reachable")
+	// ErrCannotLock is thrown when there is an error acquiring a lock on a key
+	ErrCannotLock = errors.New("Error acquiring the lock")
+	// ErrKeyModified is thrown during an atomic operation if the index does not match the one in the store
+	ErrKeyModified = errors.New("Unable to complete atomic operation, key modified")
+	// ErrKeyNotFound is thrown when the key is not found in the store during a Get operation
+	ErrKeyNotFound = errors.New("Key not found in store")
+	// ErrPreviousNotSpecified is thrown when the previous value is not specified for an atomic operation
+	ErrPreviousNotSpecified = errors.New("Previous K/V pair should be provided for the Atomic operation")
+)
+
+// Config contains the options for a storage client
+type Config struct {
+	TLS               *tls.Config
+	ConnectionTimeout time.Duration
+	EphemeralTTL      time.Duration
+}
+
+// Store represents the backend K/V storage
+// Each store should support every call listed
+// here. Or it couldn't be implemented as a K/V
+// backend for libkv
+type Store interface {
+	// Put a value at the specified key
+	Put(key string, value []byte, options *WriteOptions) error
+
+	// Get a value given its key
+	Get(key string) (*KVPair, error)
+
+	// Delete the value at the specified key
+	Delete(key string) error
+
+	// Verify if a Key exists in the store
+	Exists(key string) (bool, error)
+
+	// Watch for changes on a key
+	Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error)
+
+	// WatchTree watches for changes on child nodes under
+	// a given a directory
+	WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error)
+
+	// CreateLock for a given key.
+	// The returned Locker is not held and must be acquired
+	// with `.Lock`. The Value is optional.
+	NewLock(key string, options *LockOptions) (Locker, error)
+
+	// List the content of a given prefix
+	List(directory string) ([]*KVPair, error)
+
+	// DeleteTree deletes a range of keys under a given directory
+	DeleteTree(directory string) error
+
+	// Atomic operation on a single value
+	AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error)
+
+	// Atomic delete of a single value
+	AtomicDelete(key string, previous *KVPair) (bool, error)
+
+	// Close the store connection
+	Close()
+}
+
+// KVPair represents {Key, Value, Lastindex} tuple
+type KVPair struct {
+	Key       string
+	Value     []byte
+	LastIndex uint64
+}
+
+// WriteOptions contains optional request parameters
+type WriteOptions struct {
+	Heartbeat time.Duration
+	Ephemeral bool
+}
+
+// LockOptions contains optional request parameters
+type LockOptions struct {
+	Value []byte        // Optional, value to associate with the lock
+	TTL   time.Duration // Optional, expiration ttl associated with the lock
+}
+
+// WatchCallback is used for watch methods on keys
+// and is triggered on key change
+type WatchCallback func(entries ...*KVPair)
+
+// Locker provides locking mechanism on top of the store.
+// Similar to `sync.Lock` except it may return errors.
+type Locker interface {
+	Lock() (<-chan struct{}, error)
+	Unlock() error
+}

+ 357 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/zookeeper/zookeeper.go

@@ -0,0 +1,357 @@
+package zookeeper
+
+import (
+	"strings"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/libkv/store"
+	zk "github.com/samuel/go-zookeeper/zk"
+)
+
+const defaultTimeout = 10 * time.Second
+
+// Zookeeper is the receiver type for
+// the Store interface
+type Zookeeper struct {
+	timeout time.Duration
+	client  *zk.Conn
+}
+
+type zookeeperLock struct {
+	client *zk.Conn
+	lock   *zk.Lock
+	key    string
+	value  []byte
+}
+
+// InitializeZookeeper creates a new Zookeeper client
+// given a list of endpoints and an optional tls config
+func InitializeZookeeper(endpoints []string, options *store.Config) (store.Store, error) {
+	s := &Zookeeper{}
+	s.timeout = defaultTimeout
+
+	// Set options
+	if options != nil {
+		if options.ConnectionTimeout != 0 {
+			s.setTimeout(options.ConnectionTimeout)
+		}
+	}
+
+	// Connect to Zookeeper
+	conn, _, err := zk.Connect(endpoints, s.timeout)
+	if err != nil {
+		log.Error(err)
+		return nil, err
+	}
+	s.client = conn
+
+	return s, nil
+}
+
+// setTimeout sets the timeout for connecting to Zookeeper
+func (s *Zookeeper) setTimeout(time time.Duration) {
+	s.timeout = time
+}
+
+// Get the value at "key", returns the last modified index
+// to use in conjunction to Atomic calls
+func (s *Zookeeper) Get(key string) (pair *store.KVPair, err error) {
+	resp, meta, err := s.client.Get(store.Normalize(key))
+	if err != nil {
+		return nil, err
+	}
+
+	// If resp is nil, the key does not exist
+	if resp == nil {
+		return nil, store.ErrKeyNotFound
+	}
+
+	pair = &store.KVPair{
+		Key:       key,
+		Value:     resp,
+		LastIndex: uint64(meta.Version),
+	}
+
+	return pair, nil
+}
+
+// createFullPath creates the entire path for a directory
+// that does not exist
+func (s *Zookeeper) createFullPath(path []string, ephemeral bool) error {
+	for i := 1; i <= len(path); i++ {
+		newpath := "/" + strings.Join(path[:i], "/")
+		if i == len(path) && ephemeral {
+			_, err := s.client.Create(newpath, []byte{1}, zk.FlagEphemeral, zk.WorldACL(zk.PermAll))
+			return err
+		}
+		_, err := s.client.Create(newpath, []byte{1}, 0, zk.WorldACL(zk.PermAll))
+		if err != nil {
+			// Skip if node already exists
+			if err != zk.ErrNodeExists {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// Put a value at "key"
+func (s *Zookeeper) Put(key string, value []byte, opts *store.WriteOptions) error {
+	fkey := store.Normalize(key)
+
+	exists, err := s.Exists(key)
+	if err != nil {
+		return err
+	}
+
+	if !exists {
+		if opts != nil && opts.Ephemeral {
+			s.createFullPath(store.SplitKey(key), opts.Ephemeral)
+		} else {
+			s.createFullPath(store.SplitKey(key), false)
+		}
+	}
+
+	_, err = s.client.Set(fkey, value, -1)
+	return err
+}
+
+// Delete a value at "key"
+func (s *Zookeeper) Delete(key string) error {
+	err := s.client.Delete(store.Normalize(key), -1)
+	return err
+}
+
+// Exists checks if the key exists inside the store
+func (s *Zookeeper) Exists(key string) (bool, error) {
+	exists, _, err := s.client.Exists(store.Normalize(key))
+	if err != nil {
+		return false, err
+	}
+	return exists, nil
+}
+
+// Watch for changes on a "key"
+// It returns a channel that will receive changes or pass
+// on errors. Upon creation, the current value will first
+// be sent to the channel. Providing a non-nil stopCh can
+// be used to stop watching.
+func (s *Zookeeper) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
+	// Get the key first
+	pair, err := s.Get(key)
+	if err != nil {
+		return nil, err
+	}
+
+	// Catch zk notifications and fire changes into the channel.
+	watchCh := make(chan *store.KVPair)
+	go func() {
+		defer close(watchCh)
+
+		// Get returns the current value to the channel prior
+		// to listening to any event that may occur on that key
+		watchCh <- pair
+		for {
+			_, _, eventCh, err := s.client.GetW(store.Normalize(key))
+			if err != nil {
+				return
+			}
+			select {
+			case e := <-eventCh:
+				if e.Type == zk.EventNodeDataChanged {
+					if entry, err := s.Get(key); err == nil {
+						watchCh <- entry
+					}
+				}
+			case <-stopCh:
+				// There is no way to stop GetW so just quit
+				return
+			}
+		}
+	}()
+
+	return watchCh, nil
+}
+
+// WatchTree watches for changes on a "directory"
+// It returns a channel that will receive changes or pass
+// on errors. Upon creating a watch, the current childs values
+// will be sent to the channel .Providing a non-nil stopCh can
+// be used to stop watching.
+func (s *Zookeeper) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
+	// List the childrens first
+	entries, err := s.List(directory)
+	if err != nil {
+		return nil, err
+	}
+
+	// Catch zk notifications and fire changes into the channel.
+	watchCh := make(chan []*store.KVPair)
+	go func() {
+		defer close(watchCh)
+
+		// List returns the children values to the channel
+		// prior to listening to any events that may occur
+		// on those keys
+		watchCh <- entries
+
+		for {
+			_, _, eventCh, err := s.client.ChildrenW(store.Normalize(directory))
+			if err != nil {
+				return
+			}
+			select {
+			case e := <-eventCh:
+				if e.Type == zk.EventNodeChildrenChanged {
+					if kv, err := s.List(directory); err == nil {
+						watchCh <- kv
+					}
+				}
+			case <-stopCh:
+				// There is no way to stop GetW so just quit
+				return
+			}
+		}
+	}()
+
+	return watchCh, nil
+}
+
+// List child nodes of a given directory
+func (s *Zookeeper) List(directory string) ([]*store.KVPair, error) {
+	keys, stat, err := s.client.Children(store.Normalize(directory))
+	if err != nil {
+		return nil, err
+	}
+
+	kv := []*store.KVPair{}
+
+	// FIXME Costly Get request for each child key..
+	for _, key := range keys {
+		pair, err := s.Get(directory + store.Normalize(key))
+		if err != nil {
+			return nil, err
+		}
+
+		kv = append(kv, &store.KVPair{
+			Key:       key,
+			Value:     []byte(pair.Value),
+			LastIndex: uint64(stat.Version),
+		})
+	}
+
+	return kv, nil
+}
+
+// DeleteTree deletes a range of keys under a given directory
+func (s *Zookeeper) DeleteTree(directory string) error {
+	pairs, err := s.List(directory)
+	if err != nil {
+		return err
+	}
+
+	var reqs []interface{}
+
+	for _, pair := range pairs {
+		reqs = append(reqs, &zk.DeleteRequest{
+			Path:    store.Normalize(directory + "/" + pair.Key),
+			Version: -1,
+		})
+	}
+
+	_, err = s.client.Multi(reqs...)
+	return err
+}
+
+// AtomicPut put a value at "key" if the key has not been
+// modified in the meantime, throws an error if this is the case
+func (s *Zookeeper) AtomicPut(key string, value []byte, previous *store.KVPair, _ *store.WriteOptions) (bool, *store.KVPair, error) {
+	if previous == nil {
+		return false, nil, store.ErrPreviousNotSpecified
+	}
+
+	meta, err := s.client.Set(store.Normalize(key), value, int32(previous.LastIndex))
+	if err != nil {
+		// Compare Failed
+		if err == zk.ErrBadVersion {
+			return false, nil, store.ErrKeyModified
+		}
+		return false, nil, err
+	}
+
+	pair := &store.KVPair{
+		Key:       key,
+		Value:     value,
+		LastIndex: uint64(meta.Version),
+	}
+
+	return true, pair, nil
+}
+
+// AtomicDelete deletes a value at "key" if the key
+// has not been modified in the meantime, throws an
+// error if this is the case
+func (s *Zookeeper) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
+	if previous == nil {
+		return false, store.ErrPreviousNotSpecified
+	}
+
+	err := s.client.Delete(store.Normalize(key), int32(previous.LastIndex))
+	if err != nil {
+		if err == zk.ErrBadVersion {
+			return false, store.ErrKeyModified
+		}
+		return false, err
+	}
+	return true, nil
+}
+
+// NewLock returns a handle to a lock struct which can
+// be used to provide mutual exclusion on a key
+func (s *Zookeeper) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
+	value := []byte("")
+
+	// Apply options
+	if options != nil {
+		if options.Value != nil {
+			value = options.Value
+		}
+	}
+
+	lock = &zookeeperLock{
+		client: s.client,
+		key:    store.Normalize(key),
+		value:  value,
+		lock:   zk.NewLock(s.client, store.Normalize(key), zk.WorldACL(zk.PermAll)),
+	}
+
+	return lock, err
+}
+
+// Lock attempts to acquire the lock and blocks while
+// doing so. It returns a channel that is closed if our
+// lock is lost or if an error occurs
+func (l *zookeeperLock) Lock() (<-chan struct{}, error) {
+	err := l.lock.Lock()
+
+	if err == nil {
+		// We hold the lock, we can set our value
+		// FIXME: The value is left behind
+		// (problematic for leader election)
+		_, err = l.client.Set(l.key, l.value, -1)
+	}
+
+	return make(chan struct{}), err
+}
+
+// Unlock the "key". Calling unlock while
+// not holding the lock will throw an error
+func (l *zookeeperLock) Unlock() error {
+	return l.lock.Unlock()
+}
+
+// Close closes the client connection
+func (s *Zookeeper) Close() {
+	s.client.Close()
+}

+ 33 - 0
libnetwork/Godeps/_workspace/src/github.com/docker/libkv/store/zookeeper/zookeeper_test.go

@@ -0,0 +1,33 @@
+package zookeeper
+
+import (
+	"testing"
+	"time"
+
+	"github.com/docker/libkv/store"
+)
+
+func makeZkClient(t *testing.T) store.Store {
+	client := "localhost:2181"
+
+	kv, err := InitializeZookeeper(
+		[]string{client},
+		&store.Config{
+			ConnectionTimeout: 3 * time.Second,
+			EphemeralTTL:      2 * time.Second,
+		},
+	)
+
+	if err != nil {
+		t.Fatalf("cannot create store: %v", err)
+	}
+
+	return kv
+}
+
+func TestZkStore(t *testing.T) {
+	kv := makeZkClient(t)
+	backup := makeZkClient(t)
+
+	store.TestStore(t, kv, backup)
+}

+ 6 - 2
libnetwork/datastore/datastore.go

@@ -5,9 +5,10 @@ import (
 	"reflect"
 	"strings"
 
+	"github.com/docker/libkv"
+	"github.com/docker/libkv/store"
 	"github.com/docker/libnetwork/config"
 	"github.com/docker/libnetwork/types"
-	"github.com/docker/swarm/pkg/store"
 )
 
 //DataStore exported
@@ -28,6 +29,9 @@ type DataStore interface {
 	KVStore() store.Store
 }
 
+// ErrKeyModified is raised for an atomic update when the update is working on a stale state
+var ErrKeyModified = store.ErrKeyModified
+
 type datastore struct {
 	store store.Store
 }
@@ -75,7 +79,7 @@ func ParseKey(key string) ([]string, error) {
 
 // newClient used to connect to KV Store
 func newClient(kv string, addrs string) (DataStore, error) {
-	store, err := store.NewStore(store.Backend(kv), []string{addrs}, &store.Config{})
+	store, err := libkv.NewStore(store.Backend(kv), []string{addrs}, &store.Config{})
 	if err != nil {
 		return nil, err
 	}

+ 1 - 1
libnetwork/datastore/mock_store.go

@@ -3,8 +3,8 @@ package datastore
 import (
 	"errors"
 
+	"github.com/docker/libkv/store"
 	"github.com/docker/libnetwork/types"
-	"github.com/docker/swarm/pkg/store"
 )
 
 var (

+ 1 - 2
libnetwork/network.go

@@ -11,7 +11,6 @@ import (
 	"github.com/docker/libnetwork/netlabel"
 	"github.com/docker/libnetwork/options"
 	"github.com/docker/libnetwork/types"
-	"github.com/docker/swarm/pkg/store"
 )
 
 // A Network represents a logical connectivity zone that containers may
@@ -216,7 +215,7 @@ func (n *network) Delete() error {
 	// deleteNetworkFromStore performs an atomic delete operation and the network.endpointCnt field will help
 	// prevent any possible race between endpoint join and network delete
 	if err = ctrlr.deleteNetworkFromStore(n); err != nil {
-		if err == store.ErrKeyModified {
+		if err == datastore.ErrKeyModified {
 			return types.InternalErrorf("operation in progress. delete failed for network %s. Please try again.")
 		}
 		return err