Sfoglia il codice sorgente

Add dependency to docker/swarmkit

Add a dependency to `docker/swarmkit` in preparation for adding
first-class Swarm-wide service management inside the Docker Engine as
described in our ROADMAP.md.

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
Signed-off-by: Jana Radhakrishnan <mrjana@docker.com>
Signed-off-by: Victor Vieux <vieux@docker.com>
Signed-off-by: Madhu Venugopal <madhu@docker.com>
Tonis Tiigi 9 anni fa
parent
commit
44793049ce
100 ha cambiato i file con 16454 aggiunte e 8 eliminazioni
  1. 26 5
      hack/vendor.sh
  2. 13 0
      vendor/src/bitbucket.org/ww/goautoneg/Makefile
  3. 67 0
      vendor/src/bitbucket.org/ww/goautoneg/README.txt
  4. 162 0
      vendor/src/bitbucket.org/ww/goautoneg/autoneg.go
  5. 2388 0
      vendor/src/github.com/beorn7/perks/quantile/exampledata.txt
  6. 292 0
      vendor/src/github.com/beorn7/perks/quantile/stream.go
  7. 24 0
      vendor/src/github.com/cloudflare/cfssl/LICENSE
  8. 94 0
      vendor/src/github.com/cloudflare/cfssl/auth/auth.go
  9. 58 0
      vendor/src/github.com/cloudflare/cfssl/certdb/README.md
  10. 40 0
      vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go
  11. 563 0
      vendor/src/github.com/cloudflare/cfssl/config/config.go
  12. 188 0
      vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go
  13. 414 0
      vendor/src/github.com/cloudflare/cfssl/csr/csr.go
  14. 46 0
      vendor/src/github.com/cloudflare/cfssl/errors/doc.go
  15. 420 0
      vendor/src/github.com/cloudflare/cfssl/errors/error.go
  16. 47 0
      vendor/src/github.com/cloudflare/cfssl/errors/http.go
  17. 42 0
      vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go
  18. 478 0
      vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go
  19. 15 0
      vendor/src/github.com/cloudflare/cfssl/info/info.go
  20. 278 0
      vendor/src/github.com/cloudflare/cfssl/initca/initca.go
  21. 174 0
      vendor/src/github.com/cloudflare/cfssl/log/log.go
  22. 13 0
      vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go
  23. 447 0
      vendor/src/github.com/cloudflare/cfssl/signer/local/local.go
  24. 385 0
      vendor/src/github.com/cloudflare/cfssl/signer/signer.go
  25. 13 0
      vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE
  26. 43 0
      vendor/src/github.com/coreos/etcd/pkg/crc/crc.go
  27. 75 0
      vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go
  28. 29 0
      vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go
  29. 79 0
      vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go
  30. 87 0
      vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go
  31. 65 0
      vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go
  32. 60 0
      vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go
  33. 28 0
      vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go
  34. 42 0
      vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go
  35. 80 0
      vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go
  36. 26 0
      vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go
  37. 29 0
      vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go
  38. 78 0
      vendor/src/github.com/coreos/etcd/pkg/idutil/id.go
  39. 60 0
      vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go
  40. 57 0
      vendor/src/github.com/coreos/etcd/raft/design.md
  41. 293 0
      vendor/src/github.com/coreos/etcd/raft/doc.go
  42. 361 0
      vendor/src/github.com/coreos/etcd/raft/log.go
  43. 139 0
      vendor/src/github.com/coreos/etcd/raft/log_unstable.go
  44. 126 0
      vendor/src/github.com/coreos/etcd/raft/logger.go
  45. 488 0
      vendor/src/github.com/coreos/etcd/raft/node.go
  46. 245 0
      vendor/src/github.com/coreos/etcd/raft/progress.go
  47. 898 0
      vendor/src/github.com/coreos/etcd/raft/raft.go
  48. 1768 0
      vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go
  49. 86 0
      vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto
  50. 228 0
      vendor/src/github.com/coreos/etcd/raft/rawnode.go
  51. 76 0
      vendor/src/github.com/coreos/etcd/raft/status.go
  52. 252 0
      vendor/src/github.com/coreos/etcd/raft/storage.go
  53. 116 0
      vendor/src/github.com/coreos/etcd/raft/util.go
  54. 74 0
      vendor/src/github.com/coreos/etcd/snap/db.go
  55. 59 0
      vendor/src/github.com/coreos/etcd/snap/message.go
  56. 41 0
      vendor/src/github.com/coreos/etcd/snap/metrics.go
  57. 332 0
      vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go
  58. 14 0
      vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto
  59. 189 0
      vendor/src/github.com/coreos/etcd/snap/snapshotter.go
  60. 103 0
      vendor/src/github.com/coreos/etcd/wal/decoder.go
  61. 68 0
      vendor/src/github.com/coreos/etcd/wal/doc.go
  62. 89 0
      vendor/src/github.com/coreos/etcd/wal/encoder.go
  63. 38 0
      vendor/src/github.com/coreos/etcd/wal/metrics.go
  64. 45 0
      vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go
  65. 106 0
      vendor/src/github.com/coreos/etcd/wal/repair.go
  66. 93 0
      vendor/src/github.com/coreos/etcd/wal/util.go
  67. 562 0
      vendor/src/github.com/coreos/etcd/wal/wal.go
  68. 29 0
      vendor/src/github.com/coreos/etcd/wal/walpb/record.go
  69. 495 0
      vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go
  70. 20 0
      vendor/src/github.com/coreos/etcd/wal/walpb/record.proto
  71. 202 0
      vendor/src/github.com/coreos/pkg/LICENSE
  72. 39 0
      vendor/src/github.com/coreos/pkg/capnslog/README.md
  73. 106 0
      vendor/src/github.com/coreos/pkg/capnslog/formatters.go
  74. 96 0
      vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go
  75. 49 0
      vendor/src/github.com/coreos/pkg/capnslog/init.go
  76. 25 0
      vendor/src/github.com/coreos/pkg/capnslog/init_windows.go
  77. 68 0
      vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go
  78. 39 0
      vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go
  79. 240 0
      vendor/src/github.com/coreos/pkg/capnslog/logmap.go
  80. 158 0
      vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go
  81. 65 0
      vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go
  82. 14 2
      vendor/src/github.com/docker/engine-api/client/client.go
  83. 51 0
      vendor/src/github.com/docker/engine-api/client/errors.go
  84. 17 0
      vendor/src/github.com/docker/engine-api/client/interface.go
  85. 25 0
      vendor/src/github.com/docker/engine-api/client/node_inspect.go
  86. 36 0
      vendor/src/github.com/docker/engine-api/client/node_list.go
  87. 10 0
      vendor/src/github.com/docker/engine-api/client/node_remove.go
  88. 18 0
      vendor/src/github.com/docker/engine-api/client/node_update.go
  89. 3 1
      vendor/src/github.com/docker/engine-api/client/request.go
  90. 22 0
      vendor/src/github.com/docker/engine-api/client/service_create.go
  91. 25 0
      vendor/src/github.com/docker/engine-api/client/service_inspect.go
  92. 35 0
      vendor/src/github.com/docker/engine-api/client/service_list.go
  93. 10 0
      vendor/src/github.com/docker/engine-api/client/service_remove.go
  94. 18 0
      vendor/src/github.com/docker/engine-api/client/service_update.go
  95. 21 0
      vendor/src/github.com/docker/engine-api/client/swarm_init.go
  96. 21 0
      vendor/src/github.com/docker/engine-api/client/swarm_inspect.go
  97. 13 0
      vendor/src/github.com/docker/engine-api/client/swarm_join.go
  98. 18 0
      vendor/src/github.com/docker/engine-api/client/swarm_leave.go
  99. 18 0
      vendor/src/github.com/docker/engine-api/client/swarm_update.go
  100. 34 0
      vendor/src/github.com/docker/engine-api/client/task_inspect.go

+ 26 - 5
hack/vendor.sh

@@ -59,14 +59,14 @@ clone git github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
 clone git golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git
 clone git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git
 clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
-clone git github.com/docker/go-connections v0.2.0
-clone git github.com/docker/engine-api 8c2141e14bb9e7540938d155976b3ef0661e4814
+clone git github.com/docker/go-connections fa2850ff103453a9ad190da0df0af134f0314b3d
+clone git github.com/docker/engine-api 6b2f24f16a7f1598635b6a99dbe38ec8a5eccaf8
 clone git github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837
 clone git github.com/imdario/mergo 0.2.1
 
 #get libnetwork packages
-clone git github.com/docker/libnetwork b66c0385f30c6aa27b2957ed1072682c19a0b0b4
-clone git github.com/docker/go-events 2e7d352816128aa84f4d29b2a21d400133701a0d
+clone git github.com/docker/libnetwork e8da32ce5693f0ed6823d59c8415baf76c0809ea
+clone git github.com/docker/go-events 39718a26497694185f8fb58a7d6f31947f3dc42d
 clone git github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
 clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
 clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
@@ -75,7 +75,7 @@ clone git github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa07
 clone git github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
 clone git github.com/docker/libkv 7283ef27ed32fe267388510a91709b307bb9942c
 clone git github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
-clone git github.com/vishvananda/netlink 631962935bff4f3d20ff32a72e8944f6d2836a26
+clone git github.com/vishvananda/netlink 7995ff5647a22cbf0dc41bf5c0e977bdb0d5c6b7
 clone git github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
 clone git github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
 clone git github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
@@ -138,6 +138,27 @@ clone git github.com/docker/docker-credential-helpers v0.3.0
 # containerd
 clone git github.com/docker/containerd cf554d59dd96e459544748290eb9167f4bcde509
 
+# cluster
+clone git github.com/docker/swarmkit 45094b473cbdb2d45e4d8f703fb615989399ae29
+clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
+clone git github.com/gogo/protobuf 43a2e0b1c32252bfbbdf81f7faa7a88fb3fa4028
+clone git github.com/cloudflare/cfssl 92f037e39eb103fb30f9151be40d9ed267fc4ae2
+clone git github.com/google/certificate-transparency 025a5cab06f6a819c455d9fdc9e2a1b6d0982284
+clone git golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 https://github.com/golang/crypto.git
+clone git github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47
+clone git github.com/hashicorp/go-memdb 98f52f52d7a476958fa9da671354d270c50661a7
+clone git github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
+clone git github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
+clone git github.com/coreos/pkg 2c77715c4df99b5420ffcae14ead08f52104065d
+clone git github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0
+clone git github.com/prometheus/client_golang e51041b3fa41cece0dca035740ba6411905be473
+clone git github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d
+clone git github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
+clone git github.com/prometheus/common ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650
+clone git github.com/prometheus/procfs 454a56f35412459b5e684fd5ec0f9211b94f002a
+clone hg bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675
+clone git github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
+
 # cli
 clone git github.com/spf13/cobra 75205f23b3ea70dc7ae5e900d074e010c23c37e9 https://github.com/dnephin/cobra.git
 clone git github.com/spf13/pflag cb88ea77998c3f024757528e3305022ab50b43be

+ 13 - 0
vendor/src/bitbucket.org/ww/goautoneg/Makefile

@@ -0,0 +1,13 @@
+include $(GOROOT)/src/Make.inc
+
+TARG=bitbucket.org/ww/goautoneg
+GOFILES=autoneg.go
+
+include $(GOROOT)/src/Make.pkg
+
+format:
+	gofmt -w *.go
+
+docs:
+	gomake clean
+	godoc ${TARG} > README.txt

+ 67 - 0
vendor/src/bitbucket.org/ww/goautoneg/README.txt

@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+    Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+    Neither the name of the Open Knowledge Foundation Ltd. nor the
+    names of its contributors may be used to endorse or promote
+    products derived from this software without specific prior written
+    permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+    Type, SubType string
+    Q             float32
+    Params        map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+	.hg

+ 162 - 0
vendor/src/bitbucket.org/ww/goautoneg/autoneg.go

@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+    Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+    Neither the name of the Open Knowledge Foundation Ltd. nor the
+    names of its contributors may be used to endorse or promote
+    products derived from this software without specific prior written
+    permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+	Type, SubType string
+	Q             float64
+	Params        map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+	slice := []Accept(accept)
+	return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+	slice := []Accept(accept)
+	ai, aj := slice[i], slice[j]
+	if ai.Q > aj.Q {
+		return true
+	}
+	if ai.Type != "*" && aj.Type == "*" {
+		return true
+	}
+	if ai.SubType != "*" && aj.SubType == "*" {
+		return true
+	}
+	return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+	slice := []Accept(accept)
+	slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+	parts := strings.Split(header, ",")
+	accept = make([]Accept, 0, len(parts))
+	for _, part := range parts {
+		part := strings.Trim(part, " ")
+
+		a := Accept{}
+		a.Params = make(map[string]string)
+		a.Q = 1.0
+
+		mrp := strings.Split(part, ";")
+
+		media_range := mrp[0]
+		sp := strings.Split(media_range, "/")
+		a.Type = strings.Trim(sp[0], " ")
+
+		switch {
+		case len(sp) == 1 && a.Type == "*":
+			a.SubType = "*"
+		case len(sp) == 2:
+			a.SubType = strings.Trim(sp[1], " ")
+		default:
+			continue
+		}
+
+		if len(mrp) == 1 {
+			accept = append(accept, a)
+			continue
+		}
+
+		for _, param := range mrp[1:] {
+			sp := strings.SplitN(param, "=", 2)
+			if len(sp) != 2 {
+				continue
+			}
+			token := strings.Trim(sp[0], " ")
+			if token == "q" {
+				a.Q, _ = strconv.ParseFloat(sp[1], 32)
+			} else {
+				a.Params[token] = strings.Trim(sp[1], " ")
+			}
+		}
+
+		accept = append(accept, a)
+	}
+
+	slice := accept_slice(accept)
+	sort.Sort(slice)
+
+	return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+	asp := make([][]string, 0, len(alternatives))
+	for _, ctype := range alternatives {
+		asp = append(asp, strings.SplitN(ctype, "/", 2))
+	}
+	for _, clause := range ParseAccept(header) {
+		for i, ctsp := range asp {
+			if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+				content_type = alternatives[i]
+				return
+			}
+			if clause.Type == ctsp[0] && clause.SubType == "*" {
+				content_type = alternatives[i]
+				return
+			}
+			if clause.Type == "*" && clause.SubType == "*" {
+				content_type = alternatives[i]
+				return
+			}
+		}
+	}
+	return
+}

+ 2388 - 0
vendor/src/github.com/beorn7/perks/quantile/exampledata.txt

@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4

+ 292 - 0
vendor/src/github.com/beorn7/perks/quantile/stream.go

@@ -0,0 +1,292 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+	"math"
+	"sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+	Value float64 `json:",string"`
+	Width float64 `json:",string"`
+	Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int           { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		return 2 * epsilon * r
+	}
+	return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		return 2 * epsilon * (s.n - r)
+	}
+	return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targets map[float64]float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		var m = math.MaxFloat64
+		var f float64
+		for quantile, epsilon := range targets {
+			if quantile*s.n <= r {
+				f = (2 * epsilon * r) / quantile
+			} else {
+				f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+			}
+			if f < m {
+				m = f
+			}
+		}
+		return m
+	}
+	return newStream(ƒ)
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+	*stream
+	b      Samples
+	sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+	x := &stream{ƒ: ƒ}
+	return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+	s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+	s.b = append(s.b, sample)
+	s.sorted = false
+	if len(s.b) == cap(s.b) {
+		s.flush()
+	}
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+	if !s.flushed() {
+		// Fast path when there hasn't been enough data for a flush;
+		// this also yields better accuracy for small sets of data.
+		l := len(s.b)
+		if l == 0 {
+			return 0
+		}
+		i := int(float64(l) * q)
+		if i > 0 {
+			i -= 1
+		}
+		s.maybeSort()
+		return s.b[i].Value
+	}
+	s.flush()
+	return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+	sort.Sort(samples)
+	s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+	s.stream.reset()
+	s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+	if !s.flushed() {
+		return s.b
+	}
+	s.flush()
+	return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+	return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+	s.maybeSort()
+	s.stream.merge(s.b)
+	s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+	if !s.sorted {
+		s.sorted = true
+		sort.Sort(s.b)
+	}
+}
+
+func (s *Stream) flushed() bool {
+	return len(s.stream.l) > 0
+}
+
+type stream struct {
+	n float64
+	l []Sample
+	ƒ invariant
+}
+
+func (s *stream) reset() {
+	s.l = s.l[:0]
+	s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+	s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+	// TODO(beorn7): This tries to merge not only individual samples, but
+	// whole summaries. The paper doesn't mention merging summaries at
+	// all. Unittests show that the merging is inaccurate. Find out how to
+	// do merges properly.
+	var r float64
+	i := 0
+	for _, sample := range samples {
+		for ; i < len(s.l); i++ {
+			c := s.l[i]
+			if c.Value > sample.Value {
+				// Insert at position i.
+				s.l = append(s.l, Sample{})
+				copy(s.l[i+1:], s.l[i:])
+				s.l[i] = Sample{
+					sample.Value,
+					sample.Width,
+					math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+					// TODO(beorn7): How to calculate delta correctly?
+				}
+				i++
+				goto inserted
+			}
+			r += c.Width
+		}
+		s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+		i++
+	inserted:
+		s.n += sample.Width
+		r += sample.Width
+	}
+	s.compress()
+}
+
+func (s *stream) count() int {
+	return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+	t := math.Ceil(q * s.n)
+	t += math.Ceil(s.ƒ(s, t) / 2)
+	p := s.l[0]
+	var r float64
+	for _, c := range s.l[1:] {
+		r += p.Width
+		if r+c.Width+c.Delta > t {
+			return p.Value
+		}
+		p = c
+	}
+	return p.Value
+}
+
+func (s *stream) compress() {
+	if len(s.l) < 2 {
+		return
+	}
+	x := s.l[len(s.l)-1]
+	xi := len(s.l) - 1
+	r := s.n - 1 - x.Width
+
+	for i := len(s.l) - 2; i >= 0; i-- {
+		c := s.l[i]
+		if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+			x.Width += c.Width
+			s.l[xi] = x
+			// Remove element at i.
+			copy(s.l[i:], s.l[i+1:])
+			s.l = s.l[:len(s.l)-1]
+			xi -= 1
+		} else {
+			x = c
+			xi = i
+		}
+		r -= c.Width
+	}
+}
+
+func (s *stream) samples() Samples {
+	samples := make(Samples, len(s.l))
+	copy(samples, s.l)
+	return samples
+}

+ 24 - 0
vendor/src/github.com/cloudflare/cfssl/LICENSE

@@ -0,0 +1,24 @@
+Copyright (c) 2014 CloudFlare Inc.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 94 - 0
vendor/src/github.com/cloudflare/cfssl/auth/auth.go

@@ -0,0 +1,94 @@
+// Package auth implements an interface for providing CFSSL
+// authentication. This is meant to authenticate a client CFSSL to a
+// remote CFSSL in order to prevent unauthorised use of the signature
+// capabilities. This package provides both the interface and a
+// standard HMAC-based implementation.
+package auth
+
+import (
+	"crypto/hmac"
+	"crypto/sha256"
+	"encoding/hex"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"strings"
+)
+
+// An AuthenticatedRequest contains a request and authentication
+// token. The Provider may determine whether to validate the timestamp
+// and remote address.
+type AuthenticatedRequest struct {
+	// An Authenticator decides whether to use this field.
+	Timestamp     int64  `json:"timestamp,omitempty"`
+	RemoteAddress []byte `json:"remote_address,omitempty"`
+	Token         []byte `json:"token"`
+	Request       []byte `json:"request"`
+}
+
+// A Provider can generate tokens from a request and verify a
+// request. The handling of additional authentication data (such as
+// the IP address) is handled by the concrete type, as is any
+// serialisation and state-keeping.
+type Provider interface {
+	Token(req []byte) (token []byte, err error)
+	Verify(aReq *AuthenticatedRequest) bool
+}
+
+// Standard implements an HMAC-SHA-256 authentication provider. It may
+// be supplied additional data at creation time that will be used as
+// request || additional-data with the HMAC.
+type Standard struct {
+	key []byte
+	ad  []byte
+}
+
+// New generates a new standard authentication provider from the key
+// and additional data. The additional data will be used when
+// generating a new token.
+func New(key string, ad []byte) (*Standard, error) {
+	if splitKey := strings.SplitN(key, ":", 2); len(splitKey) == 2 {
+		switch splitKey[0] {
+		case "env":
+			key = os.Getenv(splitKey[1])
+		case "file":
+			data, err := ioutil.ReadFile(splitKey[1])
+			if err != nil {
+				return nil, err
+			}
+			key = string(data)
+		default:
+			return nil, fmt.Errorf("unknown key prefix: %s", splitKey[0])
+		}
+	}
+
+	keyBytes, err := hex.DecodeString(key)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Standard{keyBytes, ad}, nil
+}
+
+// Token generates a new authentication token from the request.
+func (p Standard) Token(req []byte) (token []byte, err error) {
+	h := hmac.New(sha256.New, p.key)
+	h.Write(req)
+	h.Write(p.ad)
+	return h.Sum(nil), nil
+}
+
+// Verify determines whether an authenticated request is valid.
+func (p Standard) Verify(ad *AuthenticatedRequest) bool {
+	if ad == nil {
+		return false
+	}
+
+	// Standard token generation returns no error.
+	token, _ := p.Token(ad.Request)
+	if len(ad.Token) != len(token) {
+		return false
+	}
+
+	return hmac.Equal(token, ad.Token)
+}

+ 58 - 0
vendor/src/github.com/cloudflare/cfssl/certdb/README.md

@@ -0,0 +1,58 @@
+# certdb usage
+
+Using a database enables additional functionality for existing commands when a
+db config is provided:
+
+ - `sign` and `gencert` add a certificate to the certdb after signing it
+ - `serve` enables database functionality for the sign and revoke endpoints
+
+A database is required for the following:
+
+ - `revoke` marks certificates revoked in the database with an optional reason
+ - `ocsprefresh` refreshes the table of cached OCSP responses
+ - `ocspdump` outputs cached OCSP responses in a concatenated base64-encoded format
+
+## Setup/Migration
+
+This directory stores [goose](https://bitbucket.org/liamstask/goose/) db migration scripts for various DB backends.
+Currently supported:
+ - SQLite in sqlite
+ - PostgreSQL in pg
+
+### Get goose
+
+    go get https://bitbucket.org/liamstask/goose/
+
+### Use goose to start and terminate a SQLite DB
+To start a SQLite DB using goose:
+
+    goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up'
+
+To tear down a SQLite DB using goose
+
+    goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down
+
+### Use goose to start and terminate a PostgreSQL DB
+To start a PostgreSQL using goose:
+
+    goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg up
+
+To tear down a PostgreSQL DB using goose
+
+    goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg down
+
+Note: the administration of PostgreSQL DB is not included. We assume
+the databases being connected to are already created and access control
+are properly handled.
+
+## CFSSL Configuration
+
+Several cfssl commands take a -db-config flag. Create a file with a
+JSON dictionary:
+
+    {"driver":"sqlite3","data_source":"certs.db"}
+
+or
+
+    {"driver":"postgres","data_source":"postgres://user:password@host/db"}
+

+ 40 - 0
vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go

@@ -0,0 +1,40 @@
+package certdb
+
+import (
+	"time"
+)
+
+// CertificateRecord encodes a certificate and its metadata
+// that will be recorded in a database.
+type CertificateRecord struct {
+	Serial    string    `db:"serial_number"`
+	AKI       string    `db:"authority_key_identifier"`
+	CALabel   string    `db:"ca_label"`
+	Status    string    `db:"status"`
+	Reason    int       `db:"reason"`
+	Expiry    time.Time `db:"expiry"`
+	RevokedAt time.Time `db:"revoked_at"`
+	PEM       string    `db:"pem"`
+}
+
+// OCSPRecord encodes a OCSP response body and its metadata
+// that will be recorded in a database.
+type OCSPRecord struct {
+	Serial string    `db:"serial_number"`
+	AKI    string    `db:"authority_key_identifier"`
+	Body   string    `db:"body"`
+	Expiry time.Time `db:"expiry"`
+}
+
+// Accessor abstracts the CRUD of certdb objects from a DB.
+type Accessor interface {
+	InsertCertificate(cr CertificateRecord) error
+	GetCertificate(serial, aki string) ([]CertificateRecord, error)
+	GetUnexpiredCertificates() ([]CertificateRecord, error)
+	RevokeCertificate(serial, aki string, reasonCode int) error
+	InsertOCSP(rr OCSPRecord) error
+	GetOCSP(serial, aki string) ([]OCSPRecord, error)
+	GetUnexpiredOCSPs() ([]OCSPRecord, error)
+	UpdateOCSP(serial, aki, body string, expiry time.Time) error
+	UpsertOCSP(serial, aki, body string, expiry time.Time) error
+}

+ 563 - 0
vendor/src/github.com/cloudflare/cfssl/config/config.go

@@ -0,0 +1,563 @@
+// Package config contains the configuration logic for CFSSL.
+package config
+
+import (
+	"crypto/x509"
+	"encoding/asn1"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/cloudflare/cfssl/auth"
+	cferr "github.com/cloudflare/cfssl/errors"
+	"github.com/cloudflare/cfssl/helpers"
+	"github.com/cloudflare/cfssl/log"
+	ocspConfig "github.com/cloudflare/cfssl/ocsp/config"
+)
+
+// A CSRWhitelist stores booleans for fields in the CSR. If a CSRWhitelist is
+// not present in a SigningProfile, all of these fields may be copied from the
+// CSR into the signed certificate. If a CSRWhitelist *is* present in a
+// SigningProfile, only those fields with a `true` value in the CSRWhitelist may
+// be copied from the CSR to the signed certificate. Note that some of these
+// fields, like Subject, can be provided or partially provided through the API.
+// Since API clients are expected to be trusted, but CSRs are not, fields
+// provided through the API are not subject to whitelisting through this
+// mechanism.
+type CSRWhitelist struct {
+	Subject, PublicKeyAlgorithm, PublicKey, SignatureAlgorithm bool
+	DNSNames, IPAddresses, EmailAddresses                      bool
+}
+
+// OID is our own version of asn1's ObjectIdentifier, so we can define a custom
+// JSON marshal / unmarshal.
+type OID asn1.ObjectIdentifier
+
+// CertificatePolicy represents the ASN.1 PolicyInformation structure from
+// https://tools.ietf.org/html/rfc3280.html#page-106.
+// Valid values of Type are "id-qt-unotice" and "id-qt-cps"
+type CertificatePolicy struct {
+	ID         OID
+	Qualifiers []CertificatePolicyQualifier
+}
+
+// CertificatePolicyQualifier represents a single qualifier from an ASN.1
+// PolicyInformation structure.
+type CertificatePolicyQualifier struct {
+	Type  string
+	Value string
+}
+
+// AuthRemote is an authenticated remote signer.
+type AuthRemote struct {
+	RemoteName  string `json:"remote"`
+	AuthKeyName string `json:"auth_key"`
+}
+
+// A SigningProfile stores information that the CA needs to store
+// signature policy.
+type SigningProfile struct {
+	Usage               []string   `json:"usages"`
+	IssuerURL           []string   `json:"issuer_urls"`
+	OCSP                string     `json:"ocsp_url"`
+	CRL                 string     `json:"crl_url"`
+	CA                  bool       `json:"is_ca"`
+	OCSPNoCheck         bool       `json:"ocsp_no_check"`
+	ExpiryString        string     `json:"expiry"`
+	BackdateString      string     `json:"backdate"`
+	AuthKeyName         string     `json:"auth_key"`
+	RemoteName          string     `json:"remote"`
+	NotBefore           time.Time  `json:"not_before"`
+	NotAfter            time.Time  `json:"not_after"`
+	NameWhitelistString string     `json:"name_whitelist"`
+	AuthRemote          AuthRemote `json:"auth_remote"`
+	CTLogServers        []string   `json:"ct_log_servers"`
+	AllowedExtensions   []OID      `json:"allowed_extensions"`
+	CertStore           string     `json:"cert_store"`
+
+	Policies                    []CertificatePolicy
+	Expiry                      time.Duration
+	Backdate                    time.Duration
+	Provider                    auth.Provider
+	RemoteProvider              auth.Provider
+	RemoteServer                string
+	CSRWhitelist                *CSRWhitelist
+	NameWhitelist               *regexp.Regexp
+	ExtensionWhitelist          map[string]bool
+	ClientProvidesSerialNumbers bool
+}
+
+// UnmarshalJSON unmarshals a JSON string into an OID.
+func (oid *OID) UnmarshalJSON(data []byte) (err error) {
+	if data[0] != '"' || data[len(data)-1] != '"' {
+		return errors.New("OID JSON string not wrapped in quotes." + string(data))
+	}
+	data = data[1 : len(data)-1]
+	parsedOid, err := parseObjectIdentifier(string(data))
+	if err != nil {
+		return err
+	}
+	*oid = OID(parsedOid)
+	return
+}
+
+// MarshalJSON marshals an oid into a JSON string.
+func (oid OID) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf(`"%v"`, asn1.ObjectIdentifier(oid))), nil
+}
+
+func parseObjectIdentifier(oidString string) (oid asn1.ObjectIdentifier, err error) {
+	validOID, err := regexp.MatchString("\\d(\\.\\d+)*", oidString)
+	if err != nil {
+		return
+	}
+	if !validOID {
+		err = errors.New("Invalid OID")
+		return
+	}
+
+	segments := strings.Split(oidString, ".")
+	oid = make(asn1.ObjectIdentifier, len(segments))
+	for i, intString := range segments {
+		oid[i], err = strconv.Atoi(intString)
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+const timeFormat = "2006-01-02T15:04:05"
+
+// populate is used to fill in the fields that are not in JSON
+//
+// First, the ExpiryString parameter is needed to parse
+// expiration timestamps from JSON. The JSON decoder is not able to
+// decode a string time duration to a time.Duration, so this is called
+// when loading the configuration to properly parse and fill out the
+// Expiry parameter.
+// This function is also used to create references to the auth key
+// and default remote for the profile.
+// It returns true if ExpiryString is a valid representation of a
+// time.Duration, and the AuthKeyString and RemoteName point to
+// valid objects. It returns false otherwise.
+func (p *SigningProfile) populate(cfg *Config) error {
+	if p == nil {
+		return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("can't parse nil profile"))
+	}
+
+	var err error
+	if p.RemoteName == "" && p.AuthRemote.RemoteName == "" {
+		log.Debugf("parse expiry in profile")
+		if p.ExpiryString == "" {
+			return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("empty expiry string"))
+		}
+
+		dur, err := time.ParseDuration(p.ExpiryString)
+		if err != nil {
+			return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
+		}
+
+		log.Debugf("expiry is valid")
+		p.Expiry = dur
+
+		if p.BackdateString != "" {
+			dur, err = time.ParseDuration(p.BackdateString)
+			if err != nil {
+				return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
+			}
+
+			p.Backdate = dur
+		}
+
+		if !p.NotBefore.IsZero() && !p.NotAfter.IsZero() && p.NotAfter.Before(p.NotBefore) {
+			return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
+		}
+
+		if len(p.Policies) > 0 {
+			for _, policy := range p.Policies {
+				for _, qualifier := range policy.Qualifiers {
+					if qualifier.Type != "" && qualifier.Type != "id-qt-unotice" && qualifier.Type != "id-qt-cps" {
+						return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+							errors.New("invalid policy qualifier type"))
+					}
+				}
+			}
+		}
+	} else if p.RemoteName != "" {
+		log.Debug("match remote in profile to remotes section")
+		if p.AuthRemote.RemoteName != "" {
+			log.Error("profile has both a remote and an auth remote specified")
+			return cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+		}
+		if remote := cfg.Remotes[p.RemoteName]; remote != "" {
+			if err := p.updateRemote(remote); err != nil {
+				return err
+			}
+		} else {
+			return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+				errors.New("failed to find remote in remotes section"))
+		}
+	} else {
+		log.Debug("match auth remote in profile to remotes section")
+		if remote := cfg.Remotes[p.AuthRemote.RemoteName]; remote != "" {
+			if err := p.updateRemote(remote); err != nil {
+				return err
+			}
+		} else {
+			return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+				errors.New("failed to find remote in remotes section"))
+		}
+	}
+
+	if p.AuthKeyName != "" {
+		log.Debug("match auth key in profile to auth_keys section")
+		if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true {
+			if key.Type == "standard" {
+				p.Provider, err = auth.New(key.Key, nil)
+				if err != nil {
+					log.Debugf("failed to create new standard auth provider: %v", err)
+					return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+						errors.New("failed to create new standard auth provider"))
+				}
+			} else {
+				log.Debugf("unknown authentication type %v", key.Type)
+				return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+					errors.New("unknown authentication type"))
+			}
+		} else {
+			return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+				errors.New("failed to find auth_key in auth_keys section"))
+		}
+	}
+
+	if p.AuthRemote.AuthKeyName != "" {
+		log.Debug("match auth remote key in profile to auth_keys section")
+		if key, ok := cfg.AuthKeys[p.AuthRemote.AuthKeyName]; ok == true {
+			if key.Type == "standard" {
+				p.RemoteProvider, err = auth.New(key.Key, nil)
+				if err != nil {
+					log.Debugf("failed to create new standard auth provider: %v", err)
+					return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+						errors.New("failed to create new standard auth provider"))
+				}
+			} else {
+				log.Debugf("unknown authentication type %v", key.Type)
+				return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+					errors.New("unknown authentication type"))
+			}
+		} else {
+			return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+				errors.New("failed to find auth_remote's auth_key in auth_keys section"))
+		}
+	}
+
+	if p.NameWhitelistString != "" {
+		log.Debug("compiling whitelist regular expression")
+		rule, err := regexp.Compile(p.NameWhitelistString)
+		if err != nil {
+			return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+				errors.New("failed to compile name whitelist section"))
+		}
+		p.NameWhitelist = rule
+	}
+
+	p.ExtensionWhitelist = map[string]bool{}
+	for _, oid := range p.AllowedExtensions {
+		p.ExtensionWhitelist[asn1.ObjectIdentifier(oid).String()] = true
+	}
+
+	return nil
+}
+
+// updateRemote takes a signing profile and initializes the remote server object
+// to the hostname:port combination sent by remote.
+func (p *SigningProfile) updateRemote(remote string) error {
+	if remote != "" {
+		p.RemoteServer = remote
+	}
+	return nil
+}
+
+// OverrideRemotes takes a signing configuration and updates the remote server object
+// to the hostname:port combination sent by remote
+func (p *Signing) OverrideRemotes(remote string) error {
+	if remote != "" {
+		var err error
+		for _, profile := range p.Profiles {
+			err = profile.updateRemote(remote)
+			if err != nil {
+				return err
+			}
+		}
+		err = p.Default.updateRemote(remote)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// NeedsRemoteSigner returns true if one of the profiles has a remote set
+func (p *Signing) NeedsRemoteSigner() bool {
+	for _, profile := range p.Profiles {
+		if profile.RemoteServer != "" {
+			return true
+		}
+	}
+
+	if p.Default.RemoteServer != "" {
+		return true
+	}
+
+	return false
+}
+
+// NeedsLocalSigner returns true if one of the profiles doe not have a remote set
+func (p *Signing) NeedsLocalSigner() bool {
+	for _, profile := range p.Profiles {
+		if profile.RemoteServer == "" {
+			return true
+		}
+	}
+
+	if p.Default.RemoteServer == "" {
+		return true
+	}
+
+	return false
+}
+
+// Usages parses the list of key uses in the profile, translating them
+// to a list of X.509 key usages and extended key usages.  The unknown
+// uses are collected into a slice that is also returned.
+func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) {
+	for _, keyUse := range p.Usage {
+		if kuse, ok := KeyUsage[keyUse]; ok {
+			ku |= kuse
+		} else if ekuse, ok := ExtKeyUsage[keyUse]; ok {
+			eku = append(eku, ekuse)
+		} else {
+			unk = append(unk, keyUse)
+		}
+	}
+	return
+}
+
+// A valid profile must be a valid local profile or a valid remote profile.
+// A valid local profile has defined at least key usages to be used, and a
+// valid local default profile has defined at least a default expiration.
+// A valid remote profile (default or not) has remote signer initialized.
+// In addition, a remote profile must has a valid auth provider if auth
+// key defined.
+func (p *SigningProfile) validProfile(isDefault bool) bool {
+	if p == nil {
+		return false
+	}
+
+	if p.RemoteName != "" {
+		log.Debugf("validate remote profile")
+
+		if p.RemoteServer == "" {
+			log.Debugf("invalid remote profile: no remote signer specified")
+			return false
+		}
+
+		if p.AuthKeyName != "" && p.Provider == nil {
+			log.Debugf("invalid remote profile: auth key name is defined but no auth provider is set")
+			return false
+		}
+
+		if p.AuthRemote.RemoteName != "" {
+			log.Debugf("invalid remote profile: auth remote is also specified")
+		}
+	} else if p.AuthRemote.RemoteName != "" {
+		log.Debugf("validate auth remote profile")
+		if p.RemoteServer == "" {
+			log.Debugf("invalid auth remote profile: no remote signer specified")
+			return false
+		}
+
+		if p.AuthRemote.AuthKeyName == "" || p.RemoteProvider == nil {
+			log.Debugf("invalid auth remote profile: no auth key is defined")
+			return false
+		}
+	} else {
+		log.Debugf("validate local profile")
+		if !isDefault {
+			if len(p.Usage) == 0 {
+				log.Debugf("invalid local profile: no usages specified")
+				return false
+			} else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) {
+				log.Debugf("invalid local profile: no valid usages")
+				return false
+			}
+		} else {
+			if p.Expiry == 0 {
+				log.Debugf("invalid local profile: no expiry set")
+				return false
+			}
+		}
+	}
+
+	log.Debugf("profile is valid")
+	return true
+}
+
+// Signing codifies the signature configuration policy for a CA.
+type Signing struct {
+	Profiles map[string]*SigningProfile `json:"profiles"`
+	Default  *SigningProfile            `json:"default"`
+}
+
+// Config stores configuration information for the CA.
+type Config struct {
+	Signing  *Signing           `json:"signing"`
+	OCSP     *ocspConfig.Config `json:"ocsp"`
+	AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"`
+	Remotes  map[string]string  `json:"remotes,omitempty"`
+}
+
+// Valid ensures that Config is a valid configuration. It should be
+// called immediately after parsing a configuration file.
+func (c *Config) Valid() bool {
+	return c.Signing.Valid()
+}
+
+// Valid checks the signature policies, ensuring they are valid
+// policies. A policy is valid if it has defined at least key usages
+// to be used, and a valid default profile has defined at least a
+// default expiration.
+func (p *Signing) Valid() bool {
+	if p == nil {
+		return false
+	}
+
+	log.Debugf("validating configuration")
+	if !p.Default.validProfile(true) {
+		log.Debugf("default profile is invalid")
+		return false
+	}
+
+	for _, sp := range p.Profiles {
+		if !sp.validProfile(false) {
+			log.Debugf("invalid profile")
+			return false
+		}
+	}
+	return true
+}
+
+// KeyUsage contains a mapping of string names to key usages.
+var KeyUsage = map[string]x509.KeyUsage{
+	"signing":             x509.KeyUsageDigitalSignature,
+	"digital signature":   x509.KeyUsageDigitalSignature,
+	"content committment": x509.KeyUsageContentCommitment,
+	"key encipherment":    x509.KeyUsageKeyEncipherment,
+	"key agreement":       x509.KeyUsageKeyAgreement,
+	"data encipherment":   x509.KeyUsageDataEncipherment,
+	"cert sign":           x509.KeyUsageCertSign,
+	"crl sign":            x509.KeyUsageCRLSign,
+	"encipher only":       x509.KeyUsageEncipherOnly,
+	"decipher only":       x509.KeyUsageDecipherOnly,
+}
+
+// ExtKeyUsage contains a mapping of string names to extended key
+// usages.
+var ExtKeyUsage = map[string]x509.ExtKeyUsage{
+	"any":              x509.ExtKeyUsageAny,
+	"server auth":      x509.ExtKeyUsageServerAuth,
+	"client auth":      x509.ExtKeyUsageClientAuth,
+	"code signing":     x509.ExtKeyUsageCodeSigning,
+	"email protection": x509.ExtKeyUsageEmailProtection,
+	"s/mime":           x509.ExtKeyUsageEmailProtection,
+	"ipsec end system": x509.ExtKeyUsageIPSECEndSystem,
+	"ipsec tunnel":     x509.ExtKeyUsageIPSECTunnel,
+	"ipsec user":       x509.ExtKeyUsageIPSECUser,
+	"timestamping":     x509.ExtKeyUsageTimeStamping,
+	"ocsp signing":     x509.ExtKeyUsageOCSPSigning,
+	"microsoft sgc":    x509.ExtKeyUsageMicrosoftServerGatedCrypto,
+	"netscape sgc":     x509.ExtKeyUsageNetscapeServerGatedCrypto,
+}
+
+// An AuthKey contains an entry for a key used for authentication.
+type AuthKey struct {
+	// Type contains information needed to select the appropriate
+	// constructor. For example, "standard" for HMAC-SHA-256,
+	// "standard-ip" for HMAC-SHA-256 incorporating the client's
+	// IP.
+	Type string `json:"type"`
+	// Key contains the key information, such as a hex-encoded
+	// HMAC key.
+	Key string `json:"key"`
+}
+
+// DefaultConfig returns a default configuration specifying basic key
+// usage and a 1 year expiration time. The key usages chosen are
+// signing, key encipherment, client auth and server auth.
+func DefaultConfig() *SigningProfile {
+	d := helpers.OneYear
+	return &SigningProfile{
+		Usage:        []string{"signing", "key encipherment", "server auth", "client auth"},
+		Expiry:       d,
+		ExpiryString: "8760h",
+	}
+}
+
+// LoadFile attempts to load the configuration file stored at the path
+// and returns the configuration. On error, it returns nil.
+func LoadFile(path string) (*Config, error) {
+	log.Debugf("loading configuration file from %s", path)
+	if path == "" {
+		return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid path"))
+	}
+
+	body, err := ioutil.ReadFile(path)
+	if err != nil {
+		return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("could not read configuration file"))
+	}
+
+	return LoadConfig(body)
+}
+
+// LoadConfig attempts to load the configuration from a byte slice.
+// On error, it returns nil.
+func LoadConfig(config []byte) (*Config, error) {
+	var cfg = &Config{}
+	err := json.Unmarshal(config, &cfg)
+	if err != nil {
+		return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+			errors.New("failed to unmarshal configuration: "+err.Error()))
+	}
+
+	if cfg.Signing == nil {
+		return nil, errors.New("No \"signing\" field present")
+	}
+
+	if cfg.Signing.Default == nil {
+		log.Debugf("no default given: using default config")
+		cfg.Signing.Default = DefaultConfig()
+	} else {
+		if err := cfg.Signing.Default.populate(cfg); err != nil {
+			return nil, err
+		}
+	}
+
+	for k := range cfg.Signing.Profiles {
+		if err := cfg.Signing.Profiles[k].populate(cfg); err != nil {
+			return nil, err
+		}
+	}
+
+	if !cfg.Valid() {
+		return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid configuration"))
+	}
+
+	log.Debugf("configuration ok")
+	return cfg, nil
+}

+ 188 - 0
vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go

@@ -0,0 +1,188 @@
+// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically
+// used to package certificates and CRLs.  Using openssl, every certificate converted
+// to PKCS #7 format from another encoding such as PEM conforms to this implementation.
+// reference: https://www.openssl.org/docs/apps/crl2pkcs7.html
+//
+//			PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315
+//
+// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements,
+// for example data can be encrypted and signed and then packaged through pkcs#7 to be
+// sent over a network and then verified and decrypted.  It is asn1, and the type of
+// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is:
+//
+//			ContentInfo ::= SEQUENCE {
+//				contentType ContentType,
+//				content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL
+//			}
+//
+// There are 6 possible ContentTypes, data, signedData, envelopedData,
+// signedAndEnvelopedData, digestedData, and encryptedData.  Here signedData, Data, and encrypted
+// Data are implemented, as the degenerate case of signedData without a signature is the typical
+// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12
+// formats.
+// The ContentType signedData has the form:
+//
+//
+//			signedData ::= SEQUENCE {
+//				version Version,
+//				digestAlgorithms DigestAlgorithmIdentifiers,
+//				contentInfo ContentInfo,
+//				certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL
+//				crls [1] IMPLICIT CertificateRevocationLists OPTIONAL,
+//				signerInfos SignerInfos
+//			}
+//
+// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to
+// this system's use of PKCS #7 data.  Version is an integer type, note that PKCS #7 is
+// recursive, this second layer of ContentInfo is similar ignored for our degenerate
+// usage.  The ExtendedCertificatesAndCertificates type consists of a sequence of choices
+// between PKCS #6 extended certificates and x509 certificates.  Any sequence consisting
+// of any number of extended certificates is not yet supported in this implementation.
+//
+// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice.
+//
+// The ContentType encryptedData is the most complicated and its form can be gathered by
+// the go type below.  It essentially contains a raw octet string of encrypted data and an
+// algorithm identifier for use in decrypting this data.
+package pkcs7
+
+import (
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/asn1"
+	"errors"
+
+	cferr "github.com/cloudflare/cfssl/errors"
+)
+
+// Types used for asn1 Unmarshaling.
+
+type signedData struct {
+	Version          int
+	DigestAlgorithms asn1.RawValue
+	ContentInfo      asn1.RawValue
+	Certificates     asn1.RawValue `asn1:"optional" asn1:"tag:0"`
+	Crls             asn1.RawValue `asn1:"optional"`
+	SignerInfos      asn1.RawValue
+}
+
+type initPKCS7 struct {
+	Raw         asn1.RawContent
+	ContentType asn1.ObjectIdentifier
+	Content     asn1.RawValue `asn1:"tag:0,explicit,optional"`
+}
+
+// Object identifier strings of the three implemented PKCS7 types.
+const (
+	ObjIDData          = "1.2.840.113549.1.7.1"
+	ObjIDSignedData    = "1.2.840.113549.1.7.2"
+	ObjIDEncryptedData = "1.2.840.113549.1.7.6"
+)
+
+// PKCS7 represents the ASN1 PKCS #7 Content type.  It contains one of three
+// possible types of Content objects, as denoted by the object identifier in
+// the ContentInfo field, the other two being nil.  SignedData
+// is the degenerate SignedData Content info without signature used
+// to hold certificates and crls.  Data is raw bytes, and EncryptedData
+// is as defined in PKCS #7 standard.
+type PKCS7 struct {
+	Raw         asn1.RawContent
+	ContentInfo string
+	Content     Content
+}
+
+// Content implements three of the six possible PKCS7 data types.  Only one is non-nil.
+type Content struct {
+	Data          []byte
+	SignedData    SignedData
+	EncryptedData EncryptedData
+}
+
+// SignedData defines the typical carrier of certificates and crls.
+type SignedData struct {
+	Raw          asn1.RawContent
+	Version      int
+	Certificates []*x509.Certificate
+	Crl          *pkix.CertificateList
+}
+
+// Data contains raw bytes.  Used as a subtype in PKCS12.
+type Data struct {
+	Bytes []byte
+}
+
+// EncryptedData contains encrypted data.  Used as a subtype in PKCS12.
+type EncryptedData struct {
+	Raw                  asn1.RawContent
+	Version              int
+	EncryptedContentInfo EncryptedContentInfo
+}
+
+// EncryptedContentInfo is a subtype of PKCS7EncryptedData.
+type EncryptedContentInfo struct {
+	Raw                        asn1.RawContent
+	ContentType                asn1.ObjectIdentifier
+	ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
+	EncryptedContent           []byte `asn1:"tag:0,optional"`
+}
+
+// ParsePKCS7 attempts to parse the DER encoded bytes of a
+// PKCS7 structure.
+func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
+
+	var pkcs7 initPKCS7
+	_, err = asn1.Unmarshal(raw, &pkcs7)
+	if err != nil {
+		return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+	}
+
+	msg = new(PKCS7)
+	msg.Raw = pkcs7.Raw
+	msg.ContentInfo = pkcs7.ContentType.String()
+	switch {
+	case msg.ContentInfo == ObjIDData:
+		msg.ContentInfo = "Data"
+		_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data)
+		if err != nil {
+			return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+		}
+	case msg.ContentInfo == ObjIDSignedData:
+		msg.ContentInfo = "SignedData"
+		var signedData signedData
+		_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData)
+		if err != nil {
+			return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+		}
+		if len(signedData.Certificates.Bytes) != 0 {
+			msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes)
+			if err != nil {
+				return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+			}
+		}
+		if len(signedData.Crls.Bytes) != 0 {
+			msg.Content.SignedData.Crl, err = x509.ParseDERCRL(signedData.Crls.Bytes)
+			if err != nil {
+				return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+			}
+		}
+		msg.Content.SignedData.Version = signedData.Version
+		msg.Content.SignedData.Raw = pkcs7.Content.Bytes
+	case msg.ContentInfo == ObjIDEncryptedData:
+		msg.ContentInfo = "EncryptedData"
+		var encryptedData EncryptedData
+		_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData)
+		if err != nil {
+			return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+		}
+		if encryptedData.Version != 0 {
+			return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Only support for PKCS #7 encryptedData version 0"))
+		}
+		msg.Content.EncryptedData = encryptedData
+
+	default:
+		return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Attempt to parse PKCS# 7 Content not of type data, signed data or encrypted data"))
+	}
+
+	return msg, nil
+
+}

+ 414 - 0
vendor/src/github.com/cloudflare/cfssl/csr/csr.go

@@ -0,0 +1,414 @@
+// Package csr implements certificate requests for CFSSL.
+package csr
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/pem"
+	"errors"
+	"net"
+	"net/mail"
+	"strings"
+
+	cferr "github.com/cloudflare/cfssl/errors"
+	"github.com/cloudflare/cfssl/helpers"
+	"github.com/cloudflare/cfssl/log"
+)
+
+const (
+	curveP256 = 256
+	curveP384 = 384
+	curveP521 = 521
+)
+
+// A Name contains the SubjectInfo fields.
+type Name struct {
+	C            string // Country
+	ST           string // State
+	L            string // Locality
+	O            string // OrganisationName
+	OU           string // OrganisationalUnitName
+	SerialNumber string
+}
+
+// A KeyRequest is a generic request for a new key.
+type KeyRequest interface {
+	Algo() string
+	Size() int
+	Generate() (crypto.PrivateKey, error)
+	SigAlgo() x509.SignatureAlgorithm
+}
+
+// A BasicKeyRequest contains the algorithm and key size for a new private key.
+type BasicKeyRequest struct {
+	A string `json:"algo"`
+	S int    `json:"size"`
+}
+
+// NewBasicKeyRequest returns a default BasicKeyRequest.
+func NewBasicKeyRequest() *BasicKeyRequest {
+	return &BasicKeyRequest{"ecdsa", curveP256}
+}
+
+// Algo returns the requested key algorithm represented as a string.
+func (kr *BasicKeyRequest) Algo() string {
+	return kr.A
+}
+
+// Size returns the requested key size.
+func (kr *BasicKeyRequest) Size() int {
+	return kr.S
+}
+
+// Generate generates a key as specified in the request. Currently,
+// only ECDSA and RSA are supported.
+func (kr *BasicKeyRequest) Generate() (crypto.PrivateKey, error) {
+	log.Debugf("generate key from request: algo=%s, size=%d", kr.Algo(), kr.Size())
+	switch kr.Algo() {
+	case "rsa":
+		if kr.Size() < 2048 {
+			return nil, errors.New("RSA key is too weak")
+		}
+		if kr.Size() > 8192 {
+			return nil, errors.New("RSA key size too large")
+		}
+		return rsa.GenerateKey(rand.Reader, kr.Size())
+	case "ecdsa":
+		var curve elliptic.Curve
+		switch kr.Size() {
+		case curveP256:
+			curve = elliptic.P256()
+		case curveP384:
+			curve = elliptic.P384()
+		case curveP521:
+			curve = elliptic.P521()
+		default:
+			return nil, errors.New("invalid curve")
+		}
+		return ecdsa.GenerateKey(curve, rand.Reader)
+	default:
+		return nil, errors.New("invalid algorithm")
+	}
+}
+
+// SigAlgo returns an appropriate X.509 signature algorithm given the
+// key request's type and size.
+func (kr *BasicKeyRequest) SigAlgo() x509.SignatureAlgorithm {
+	switch kr.Algo() {
+	case "rsa":
+		switch {
+		case kr.Size() >= 4096:
+			return x509.SHA512WithRSA
+		case kr.Size() >= 3072:
+			return x509.SHA384WithRSA
+		case kr.Size() >= 2048:
+			return x509.SHA256WithRSA
+		default:
+			return x509.SHA1WithRSA
+		}
+	case "ecdsa":
+		switch kr.Size() {
+		case curveP521:
+			return x509.ECDSAWithSHA512
+		case curveP384:
+			return x509.ECDSAWithSHA384
+		case curveP256:
+			return x509.ECDSAWithSHA256
+		default:
+			return x509.ECDSAWithSHA1
+		}
+	default:
+		return x509.UnknownSignatureAlgorithm
+	}
+}
+
+// CAConfig is a section used in the requests initialising a new CA.
+type CAConfig struct {
+	PathLength int    `json:"pathlen"`
+	Expiry     string `json:"expiry"`
+}
+
+// A CertificateRequest encapsulates the API interface to the
+// certificate request functionality.
+type CertificateRequest struct {
+	CN           string
+	Names        []Name     `json:"names"`
+	Hosts        []string   `json:"hosts"`
+	KeyRequest   KeyRequest `json:"key,omitempty"`
+	CA           *CAConfig  `json:"ca,omitempty"`
+	SerialNumber string     `json:"serialnumber,omitempty"`
+}
+
+// New returns a new, empty CertificateRequest with a
+// BasicKeyRequest.
+func New() *CertificateRequest {
+	return &CertificateRequest{
+		KeyRequest: NewBasicKeyRequest(),
+	}
+}
+
+// appendIf appends to a if s is not an empty string.
+func appendIf(s string, a *[]string) {
+	if s != "" {
+		*a = append(*a, s)
+	}
+}
+
+// Name returns the PKIX name for the request.
+func (cr *CertificateRequest) Name() pkix.Name {
+	var name pkix.Name
+	name.CommonName = cr.CN
+
+	for _, n := range cr.Names {
+		appendIf(n.C, &name.Country)
+		appendIf(n.ST, &name.Province)
+		appendIf(n.L, &name.Locality)
+		appendIf(n.O, &name.Organization)
+		appendIf(n.OU, &name.OrganizationalUnit)
+	}
+	name.SerialNumber = cr.SerialNumber
+	return name
+}
+
+// ParseRequest takes a certificate request and generates a key and
+// CSR from it. It does no validation -- caveat emptor. It will,
+// however, fail if the key request is not valid (i.e., an unsupported
+// curve or RSA key size). The lack of validation was specifically
+// chosen to allow the end user to define a policy and validate the
+// request appropriately before calling this function.
+func ParseRequest(req *CertificateRequest) (csr, key []byte, err error) {
+	log.Info("received CSR")
+	if req.KeyRequest == nil {
+		req.KeyRequest = NewBasicKeyRequest()
+	}
+
+	log.Infof("generating key: %s-%d", req.KeyRequest.Algo(), req.KeyRequest.Size())
+	priv, err := req.KeyRequest.Generate()
+	if err != nil {
+		err = cferr.Wrap(cferr.PrivateKeyError, cferr.GenerationFailed, err)
+		return
+	}
+
+	switch priv := priv.(type) {
+	case *rsa.PrivateKey:
+		key = x509.MarshalPKCS1PrivateKey(priv)
+		block := pem.Block{
+			Type:  "RSA PRIVATE KEY",
+			Bytes: key,
+		}
+		key = pem.EncodeToMemory(&block)
+	case *ecdsa.PrivateKey:
+		key, err = x509.MarshalECPrivateKey(priv)
+		if err != nil {
+			err = cferr.Wrap(cferr.PrivateKeyError, cferr.Unknown, err)
+			return
+		}
+		block := pem.Block{
+			Type:  "EC PRIVATE KEY",
+			Bytes: key,
+		}
+		key = pem.EncodeToMemory(&block)
+	default:
+		panic("Generate should have failed to produce a valid key.")
+	}
+
+	var tpl = x509.CertificateRequest{
+		Subject:            req.Name(),
+		SignatureAlgorithm: req.KeyRequest.SigAlgo(),
+	}
+
+	for i := range req.Hosts {
+		if ip := net.ParseIP(req.Hosts[i]); ip != nil {
+			tpl.IPAddresses = append(tpl.IPAddresses, ip)
+		} else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {
+			tpl.EmailAddresses = append(tpl.EmailAddresses, req.Hosts[i])
+		} else {
+			tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
+		}
+	}
+
+	csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
+	if err != nil {
+		log.Errorf("failed to generate a CSR: %v", err)
+		err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
+		return
+	}
+	block := pem.Block{
+		Type:  "CERTIFICATE REQUEST",
+		Bytes: csr,
+	}
+
+	log.Info("encoded CSR")
+	csr = pem.EncodeToMemory(&block)
+	return
+}
+
+// ExtractCertificateRequest extracts a CertificateRequest from
+// x509.Certificate. It is aimed to used for generating a new certificate
+// from an existing certificate. For a root certificate, the CA expiry
+// length is calculated as the duration between cert.NotAfter and cert.NotBefore.
+func ExtractCertificateRequest(cert *x509.Certificate) *CertificateRequest {
+	req := New()
+	req.CN = cert.Subject.CommonName
+	req.Names = getNames(cert.Subject)
+	req.Hosts = getHosts(cert)
+	req.SerialNumber = cert.Subject.SerialNumber
+
+	if cert.IsCA {
+		req.CA = new(CAConfig)
+		// CA expiry length is calculated based on the input cert
+		// issue date and expiry date.
+		req.CA.Expiry = cert.NotAfter.Sub(cert.NotBefore).String()
+		req.CA.PathLength = cert.MaxPathLen
+	}
+
+	return req
+}
+
+func getHosts(cert *x509.Certificate) []string {
+	var hosts []string
+	for _, ip := range cert.IPAddresses {
+		hosts = append(hosts, ip.String())
+	}
+	for _, dns := range cert.DNSNames {
+		hosts = append(hosts, dns)
+	}
+	for _, email := range cert.EmailAddresses {
+		hosts = append(hosts, email)
+	}
+
+	return hosts
+}
+
+// getNames returns an array of Names from the certificate
+// It onnly cares about Country, Organization, OrganizationalUnit, Locality, Province
+func getNames(sub pkix.Name) []Name {
+	// anonymous func for finding the max of a list of interger
+	max := func(v1 int, vn ...int) (max int) {
+		max = v1
+		for i := 0; i < len(vn); i++ {
+			if vn[i] > max {
+				max = vn[i]
+			}
+		}
+		return max
+	}
+
+	nc := len(sub.Country)
+	norg := len(sub.Organization)
+	nou := len(sub.OrganizationalUnit)
+	nl := len(sub.Locality)
+	np := len(sub.Province)
+
+	n := max(nc, norg, nou, nl, np)
+
+	names := make([]Name, n)
+	for i := range names {
+		if i < nc {
+			names[i].C = sub.Country[i]
+		}
+		if i < norg {
+			names[i].O = sub.Organization[i]
+		}
+		if i < nou {
+			names[i].OU = sub.OrganizationalUnit[i]
+		}
+		if i < nl {
+			names[i].L = sub.Locality[i]
+		}
+		if i < np {
+			names[i].ST = sub.Province[i]
+		}
+	}
+	return names
+}
+
+// A Generator is responsible for validating certificate requests.
+type Generator struct {
+	Validator func(*CertificateRequest) error
+}
+
+// ProcessRequest validates and processes the incoming request. It is
+// a wrapper around a validator and the ParseRequest function.
+func (g *Generator) ProcessRequest(req *CertificateRequest) (csr, key []byte, err error) {
+
+	log.Info("generate received request")
+	err = g.Validator(req)
+	if err != nil {
+		log.Warningf("invalid request: %v", err)
+		return
+	}
+
+	csr, key, err = ParseRequest(req)
+	if err != nil {
+		return nil, nil, err
+	}
+	return
+}
+
+// IsNameEmpty returns true if the name has no identifying information in it.
+func IsNameEmpty(n Name) bool {
+	empty := func(s string) bool { return strings.TrimSpace(s) == "" }
+
+	if empty(n.C) && empty(n.ST) && empty(n.L) && empty(n.O) && empty(n.OU) {
+		return true
+	}
+	return false
+}
+
+// Regenerate uses the provided CSR as a template for signing a new
+// CSR using priv.
+func Regenerate(priv crypto.Signer, csr []byte) ([]byte, error) {
+	req, extra, err := helpers.ParseCSR(csr)
+	if err != nil {
+		return nil, err
+	} else if len(extra) > 0 {
+		return nil, errors.New("csr: trailing data in certificate request")
+	}
+
+	return x509.CreateCertificateRequest(rand.Reader, req, priv)
+}
+
+// Generate creates a new CSR from a CertificateRequest structure and
+// an existing key. The KeyRequest field is ignored.
+func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) {
+	sigAlgo := helpers.SignerAlgo(priv, crypto.SHA256)
+	if sigAlgo == x509.UnknownSignatureAlgorithm {
+		return nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable)
+	}
+
+	var tpl = x509.CertificateRequest{
+		Subject:            req.Name(),
+		SignatureAlgorithm: sigAlgo,
+	}
+
+	for i := range req.Hosts {
+		if ip := net.ParseIP(req.Hosts[i]); ip != nil {
+			tpl.IPAddresses = append(tpl.IPAddresses, ip)
+		} else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {
+			tpl.EmailAddresses = append(tpl.EmailAddresses, email.Address)
+		} else {
+			tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
+		}
+	}
+
+	csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
+	if err != nil {
+		log.Errorf("failed to generate a CSR: %v", err)
+		err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
+		return
+	}
+	block := pem.Block{
+		Type:  "CERTIFICATE REQUEST",
+		Bytes: csr,
+	}
+
+	log.Info("encoded CSR")
+	csr = pem.EncodeToMemory(&block)
+	return
+}

+ 46 - 0
vendor/src/github.com/cloudflare/cfssl/errors/doc.go

@@ -0,0 +1,46 @@
+/*
+Package errors provides error types returned in CF SSL.
+
+1. Type Error is intended for errors produced by CF SSL packages.
+It formats to a json object that consists of an error message and a 4-digit code for error reasoning.
+
+Example: {"code":1002, "message": "Failed to decode certificate"}
+
+The index of codes are listed below:
+	1XXX: CertificateError
+	    1000: Unknown
+	    1001: ReadFailed
+	    1002: DecodeFailed
+	    1003: ParseFailed
+	    1100: SelfSigned
+	    12XX: VerifyFailed
+	        121X: CertificateInvalid
+	            1210: NotAuthorizedToSign
+	            1211: Expired
+	            1212: CANotAuthorizedForThisName
+	            1213: TooManyIntermediates
+	            1214: IncompatibleUsage
+	        1220: UnknownAuthority
+	2XXX: PrivatekeyError
+	    2000: Unknown
+	    2001: ReadFailed
+	    2002: DecodeFailed
+	    2003: ParseFailed
+	    2100: Encrypted
+	    2200: NotRSA
+	    2300: KeyMismatch
+	    2400: GenerationFailed
+	    2500: Unavailable
+	3XXX: IntermediatesError
+	4XXX: RootError
+	5XXX: PolicyError
+	    5100: NoKeyUsages
+	    5200: InvalidPolicy
+	    5300: InvalidRequest
+	    5400: UnknownProfile
+	    6XXX: DialError
+
+2. Type HttpError is intended for CF SSL API to consume. It contains a HTTP status code that will be read and returned
+by the API server.
+*/
+package errors

+ 420 - 0
vendor/src/github.com/cloudflare/cfssl/errors/error.go

@@ -0,0 +1,420 @@
+package errors
+
+import (
+	"crypto/x509"
+	"encoding/json"
+	"fmt"
+)
+
+// Error is the error type usually returned by functions in CF SSL package.
+// It contains a 4-digit error code where the most significant digit
+// describes the category where the error occurred and the rest 3 digits
+// describe the specific error reason.
+type Error struct {
+	ErrorCode int    `json:"code"`
+	Message   string `json:"message"`
+}
+
+// Category is the most significant digit of the error code.
+type Category int
+
+// Reason is the last 3 digits of the error code.
+type Reason int
+
+const (
+	// Success indicates no error occurred.
+	Success Category = 1000 * iota // 0XXX
+
+	// CertificateError indicates a fault in a certificate.
+	CertificateError // 1XXX
+
+	// PrivateKeyError indicates a fault in a private key.
+	PrivateKeyError // 2XXX
+
+	// IntermediatesError indicates a fault in an intermediate.
+	IntermediatesError // 3XXX
+
+	// RootError indicates a fault in a root.
+	RootError // 4XXX
+
+	// PolicyError indicates an error arising from a malformed or
+	// non-existent policy, or a breach of policy.
+	PolicyError // 5XXX
+
+	// DialError indicates a network fault.
+	DialError // 6XXX
+
+	// APIClientError indicates a problem with the API client.
+	APIClientError // 7XXX
+
+	// OCSPError indicates a problem with OCSP signing
+	OCSPError // 8XXX
+
+	// CSRError indicates a problem with CSR parsing
+	CSRError // 9XXX
+
+	// CTError indicates a problem with the certificate transparency process
+	CTError // 10XXX
+
+	// CertStoreError indicates a problem with the certificate store
+	CertStoreError // 11XXX
+)
+
+// None is a non-specified error.
+const (
+	None Reason = iota
+)
+
+// Warning code for a success
+const (
+	BundleExpiringBit      int = 1 << iota // 0x01
+	BundleNotUbiquitousBit                 // 0x02
+)
+
+// Parsing errors
+const (
+	Unknown      Reason = iota // X000
+	ReadFailed                 // X001
+	DecodeFailed               // X002
+	ParseFailed                // X003
+)
+
+// The following represent certificate non-parsing errors, and must be
+// specified along with CertificateError.
+const (
+	// SelfSigned indicates that a certificate is self-signed and
+	// cannot be used in the manner being attempted.
+	SelfSigned Reason = 100 * (iota + 1) // Code 11XX
+
+	// VerifyFailed is an X.509 verification failure. The least two
+	// significant digits of 12XX is determined as the actual x509
+	// error is examined.
+	VerifyFailed // Code 12XX
+
+	// BadRequest indicates that the certificate request is invalid.
+	BadRequest // Code 13XX
+
+	// MissingSerial indicates that the profile specified
+	// 'ClientProvidesSerialNumbers', but the SignRequest did not include a serial
+	// number.
+	MissingSerial // Code 14XX
+)
+
+const (
+	certificateInvalid = 10 * (iota + 1) //121X
+	unknownAuthority                     //122x
+)
+
+// The following represent private-key non-parsing errors, and must be
+// specified with PrivateKeyError.
+const (
+	// Encrypted indicates that the private key is a PKCS #8 encrypted
+	// private key. At this time, CFSSL does not support decrypting
+	// these keys.
+	Encrypted Reason = 100 * (iota + 1) //21XX
+
+	// NotRSAOrECC indicates that they key is not an RSA or ECC
+	// private key; these are the only two private key types supported
+	// at this time by CFSSL.
+	NotRSAOrECC //22XX
+
+	// KeyMismatch indicates that the private key does not match
+	// the public key or certificate being presented with the key.
+	KeyMismatch //23XX
+
+	// GenerationFailed indicates that a private key could not
+	// be generated.
+	GenerationFailed //24XX
+
+	// Unavailable indicates that a private key mechanism (such as
+	// PKCS #11) was requested but support for that mechanism is
+	// not available.
+	Unavailable
+)
+
+// The following are policy-related non-parsing errors, and must be
+// specified along with PolicyError.
+const (
+	// NoKeyUsages indicates that the profile does not permit any
+	// key usages for the certificate.
+	NoKeyUsages Reason = 100 * (iota + 1) // 51XX
+
+	// InvalidPolicy indicates that policy being requested is not
+	// a valid policy or does not exist.
+	InvalidPolicy // 52XX
+
+	// InvalidRequest indicates a certificate request violated the
+	// constraints of the policy being applied to the request.
+	InvalidRequest // 53XX
+
+	// UnknownProfile indicates that the profile does not exist.
+	UnknownProfile // 54XX
+)
+
+// The following are API client related errors, and should be
+// specified with APIClientError.
+const (
+	// AuthenticationFailure occurs when the client is unable
+	// to obtain an authentication token for the request.
+	AuthenticationFailure Reason = 100 * (iota + 1)
+
+	// JSONError wraps an encoding/json error.
+	JSONError
+
+	// IOError wraps an io/ioutil error.
+	IOError
+
+	// ClientHTTPError wraps a net/http error.
+	ClientHTTPError
+
+	// ServerRequestFailed covers any other failures from the API
+	// client.
+	ServerRequestFailed
+)
+
+// The following are OCSP related errors, and should be
+// specified with OCSPError
+const (
+	// IssuerMismatch ocurs when the certificate in the OCSP signing
+	// request was not issued by the CA that this responder responds for.
+	IssuerMismatch Reason = 100 * (iota + 1) // 81XX
+
+	// InvalidStatus occurs when the OCSP signing requests includes an
+	// invalid value for the certificate status.
+	InvalidStatus
+)
+
+// Certificate transparency related errors specified with CTError
+const (
+	// PrecertSubmissionFailed occurs when submitting a precertificate to
+	// a log server fails
+	PrecertSubmissionFailed = 100 * (iota + 1)
+)
+
+// Certificate persistence related errors specified with CertStoreError
+const (
+	// InsertionFailed occurs when a SQL insert query failes to complete.
+	InsertionFailed = 100 * (iota + 1)
+	// RecordNotFound occurs when a SQL query targeting on one unique
+	// record failes to update the specified row in the table.
+	RecordNotFound
+)
+
+// The error interface implementation, which formats to a JSON object string.
+func (e *Error) Error() string {
+	marshaled, err := json.Marshal(e)
+	if err != nil {
+		panic(err)
+	}
+	return string(marshaled)
+
+}
+
+// New returns an error that contains  an error code and message derived from
+// the given category, reason. Currently, to avoid confusion, it is not
+// allowed to create an error of category Success
+func New(category Category, reason Reason) *Error {
+	errorCode := int(category) + int(reason)
+	var msg string
+	switch category {
+	case OCSPError:
+		switch reason {
+		case ReadFailed:
+			msg = "No certificate provided"
+		case IssuerMismatch:
+			msg = "Certificate not issued by this issuer"
+		case InvalidStatus:
+			msg = "Invalid revocation status"
+		}
+	case CertificateError:
+		switch reason {
+		case Unknown:
+			msg = "Unknown certificate error"
+		case ReadFailed:
+			msg = "Failed to read certificate"
+		case DecodeFailed:
+			msg = "Failed to decode certificate"
+		case ParseFailed:
+			msg = "Failed to parse certificate"
+		case SelfSigned:
+			msg = "Certificate is self signed"
+		case VerifyFailed:
+			msg = "Unable to verify certificate"
+		case BadRequest:
+			msg = "Invalid certificate request"
+		case MissingSerial:
+			msg = "Missing serial number in request"
+		default:
+			panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category CertificateError.",
+				reason))
+
+		}
+	case PrivateKeyError:
+		switch reason {
+		case Unknown:
+			msg = "Unknown private key error"
+		case ReadFailed:
+			msg = "Failed to read private key"
+		case DecodeFailed:
+			msg = "Failed to decode private key"
+		case ParseFailed:
+			msg = "Failed to parse private key"
+		case Encrypted:
+			msg = "Private key is encrypted."
+		case NotRSAOrECC:
+			msg = "Private key algorithm is not RSA or ECC"
+		case KeyMismatch:
+			msg = "Private key does not match public key"
+		case GenerationFailed:
+			msg = "Failed to new private key"
+		case Unavailable:
+			msg = "Private key is unavailable"
+		default:
+			panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PrivateKeyError.",
+				reason))
+		}
+	case IntermediatesError:
+		switch reason {
+		case Unknown:
+			msg = "Unknown intermediate certificate error"
+		case ReadFailed:
+			msg = "Failed to read intermediate certificate"
+		case DecodeFailed:
+			msg = "Failed to decode intermediate certificate"
+		case ParseFailed:
+			msg = "Failed to parse intermediate certificate"
+		default:
+			panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category IntermediatesError.",
+				reason))
+		}
+	case RootError:
+		switch reason {
+		case Unknown:
+			msg = "Unknown root certificate error"
+		case ReadFailed:
+			msg = "Failed to read root certificate"
+		case DecodeFailed:
+			msg = "Failed to decode root certificate"
+		case ParseFailed:
+			msg = "Failed to parse root certificate"
+		default:
+			panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category RootError.",
+				reason))
+		}
+	case PolicyError:
+		switch reason {
+		case Unknown:
+			msg = "Unknown policy error"
+		case NoKeyUsages:
+			msg = "Invalid policy: no key usage available"
+		case InvalidPolicy:
+			msg = "Invalid or unknown policy"
+		case InvalidRequest:
+			msg = "Policy violation request"
+		case UnknownProfile:
+			msg = "Unknown policy profile"
+		default:
+			panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PolicyError.",
+				reason))
+		}
+	case DialError:
+		switch reason {
+		case Unknown:
+			msg = "Failed to dial remote server"
+		default:
+			panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category DialError.",
+				reason))
+		}
+	case APIClientError:
+		switch reason {
+		case AuthenticationFailure:
+			msg = "API client authentication failure"
+		case JSONError:
+			msg = "API client JSON config error"
+		case ClientHTTPError:
+			msg = "API client HTTP error"
+		case IOError:
+			msg = "API client IO error"
+		case ServerRequestFailed:
+			msg = "API client error: Server request failed"
+		default:
+			panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category APIClientError.",
+				reason))
+		}
+	case CSRError:
+		switch reason {
+		case Unknown:
+			msg = "CSR parsing failed due to unknown error"
+		case ReadFailed:
+			msg = "CSR file read failed"
+		case ParseFailed:
+			msg = "CSR Parsing failed"
+		case DecodeFailed:
+			msg = "CSR Decode failed"
+		case BadRequest:
+			msg = "CSR Bad request"
+		default:
+			panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category APIClientError.", reason))
+		}
+	case CTError:
+		switch reason {
+		case Unknown:
+			msg = "Certificate transparency parsing failed due to unknown error"
+		case PrecertSubmissionFailed:
+			msg = "Certificate transparency precertificate submission failed"
+		default:
+			panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CTError.", reason))
+		}
+	case CertStoreError:
+		switch reason {
+		case Unknown:
+			msg = "Certificate store action failed due to unknown error"
+		default:
+			panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CertStoreError.", reason))
+		}
+
+	default:
+		panic(fmt.Sprintf("Unsupported CFSSL error type: %d.",
+			category))
+	}
+	return &Error{ErrorCode: errorCode, Message: msg}
+}
+
+// Wrap returns an error that contains the given error and an error code derived from
+// the given category, reason and the error. Currently, to avoid confusion, it is not
+// allowed to create an error of category Success
+func Wrap(category Category, reason Reason, err error) *Error {
+	errorCode := int(category) + int(reason)
+	if err == nil {
+		panic("Wrap needs a supplied error to initialize.")
+	}
+
+	// do not double wrap a error
+	switch err.(type) {
+	case *Error:
+		panic("Unable to wrap a wrapped error.")
+	}
+
+	switch category {
+	case CertificateError:
+		// given VerifyFailed , report the status with more detailed status code
+		// for some certificate errors we care.
+		if reason == VerifyFailed {
+			switch errorType := err.(type) {
+			case x509.CertificateInvalidError:
+				errorCode += certificateInvalid + int(errorType.Reason)
+			case x509.UnknownAuthorityError:
+				errorCode += unknownAuthority
+			}
+		}
+	case PrivateKeyError, IntermediatesError, RootError, PolicyError, DialError,
+		APIClientError, CSRError, CTError, CertStoreError:
+	// no-op, just use the error
+	default:
+		panic(fmt.Sprintf("Unsupported CFSSL error type: %d.",
+			category))
+	}
+
+	return &Error{ErrorCode: errorCode, Message: err.Error()}
+
+}

+ 47 - 0
vendor/src/github.com/cloudflare/cfssl/errors/http.go

@@ -0,0 +1,47 @@
+package errors
+
+import (
+	"errors"
+	"net/http"
+)
+
+// HTTPError is an augmented error with a HTTP status code.
+type HTTPError struct {
+	StatusCode int
+	error
+}
+
+// Error implements the error interface.
+func (e *HTTPError) Error() string {
+	return e.error.Error()
+}
+
+// NewMethodNotAllowed returns an appropriate error in the case that
+// an HTTP client uses an invalid method (i.e. a GET in place of a POST)
+// on an API endpoint.
+func NewMethodNotAllowed(method string) *HTTPError {
+	return &HTTPError{http.StatusMethodNotAllowed, errors.New(`Method is not allowed:"` + method + `"`)}
+}
+
+// NewBadRequest creates a HttpError with the given error and error code 400.
+func NewBadRequest(err error) *HTTPError {
+	return &HTTPError{http.StatusBadRequest, err}
+}
+
+// NewBadRequestString returns a HttpError with the supplied message
+// and error code 400.
+func NewBadRequestString(s string) *HTTPError {
+	return NewBadRequest(errors.New(s))
+}
+
+// NewBadRequestMissingParameter returns a 400 HttpError as a required
+// parameter is missing in the HTTP request.
+func NewBadRequestMissingParameter(s string) *HTTPError {
+	return NewBadRequestString(`Missing parameter "` + s + `"`)
+}
+
+// NewBadRequestUnwantedParameter returns a 400 HttpError as a unnecessary
+// parameter is present in the HTTP request.
+func NewBadRequestUnwantedParameter(s string) *HTTPError {
+	return NewBadRequestString(`Unwanted parameter "` + s + `"`)
+}

+ 42 - 0
vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go

@@ -0,0 +1,42 @@
+// Package derhelpers implements common functionality
+// on DER encoded data
+package derhelpers
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/rsa"
+	"crypto/x509"
+
+	cferr "github.com/cloudflare/cfssl/errors"
+)
+
+// ParsePrivateKeyDER parses a PKCS #1, PKCS #8, or elliptic curve
+// DER-encoded private key. The key must not be in PEM format.
+func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
+	generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
+	if err != nil {
+		generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
+		if err != nil {
+			generalKey, err = x509.ParseECPrivateKey(keyDER)
+			if err != nil {
+				// We don't include the actual error into
+				// the final error. The reason might be
+				// we don't want to leak any info about
+				// the private key.
+				return nil, cferr.New(cferr.PrivateKeyError,
+					cferr.ParseFailed)
+			}
+		}
+	}
+
+	switch generalKey.(type) {
+	case *rsa.PrivateKey:
+		return generalKey.(*rsa.PrivateKey), nil
+	case *ecdsa.PrivateKey:
+		return generalKey.(*ecdsa.PrivateKey), nil
+	}
+
+	// should never reach here
+	return nil, cferr.New(cferr.PrivateKeyError, cferr.ParseFailed)
+}

+ 478 - 0
vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go

@@ -0,0 +1,478 @@
+// Package helpers implements utility functionality common to many
+// CFSSL packages.
+package helpers
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/asn1"
+	"encoding/pem"
+	"errors"
+	"io/ioutil"
+	"math/big"
+
+	"strings"
+	"time"
+
+	"github.com/cloudflare/cfssl/crypto/pkcs7"
+	cferr "github.com/cloudflare/cfssl/errors"
+	"github.com/cloudflare/cfssl/helpers/derhelpers"
+	"github.com/cloudflare/cfssl/log"
+	"golang.org/x/crypto/pkcs12"
+)
+
+// OneYear is a time.Duration representing a year's worth of seconds.
+const OneYear = 8760 * time.Hour
+
+// OneDay is a time.Duration representing a day's worth of seconds.
+const OneDay = 24 * time.Hour
+
+// InclusiveDate returns the time.Time representation of a date - 1
+// nanosecond. This allows time.After to be used inclusively.
+func InclusiveDate(year int, month time.Month, day int) time.Time {
+	return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
+}
+
+// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
+// issuing certificates valid for more than 5 years.
+var Jul2012 = InclusiveDate(2012, time.July, 01)
+
+// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
+// issuing certificates valid for more than 39 months.
+var Apr2015 = InclusiveDate(2015, time.April, 01)
+
+// KeyLength returns the bit size of ECDSA or RSA PublicKey
+func KeyLength(key interface{}) int {
+	if key == nil {
+		return 0
+	}
+	if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok {
+		return ecdsaKey.Curve.Params().BitSize
+	} else if rsaKey, ok := key.(*rsa.PublicKey); ok {
+		return rsaKey.N.BitLen()
+	}
+
+	return 0
+}
+
+// ExpiryTime returns the time when the certificate chain is expired.
+func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
+	if len(chain) == 0 {
+		return
+	}
+
+	notAfter = chain[0].NotAfter
+	for _, cert := range chain {
+		if notAfter.After(cert.NotAfter) {
+			notAfter = cert.NotAfter
+		}
+	}
+	return
+}
+
+// MonthsValid returns the number of months for which a certificate is valid.
+func MonthsValid(c *x509.Certificate) int {
+	issued := c.NotBefore
+	expiry := c.NotAfter
+	years := (expiry.Year() - issued.Year())
+	months := years*12 + int(expiry.Month()) - int(issued.Month())
+
+	// Round up if valid for less than a full month
+	if expiry.Day() > issued.Day() {
+		months++
+	}
+	return months
+}
+
+// ValidExpiry determines if a certificate is valid for an acceptable
+// length of time per the CA/Browser Forum baseline requirements.
+// See https://cabforum.org/wp-content/uploads/CAB-Forum-BR-1.3.0.pdf
+func ValidExpiry(c *x509.Certificate) bool {
+	issued := c.NotBefore
+
+	var maxMonths int
+	switch {
+	case issued.After(Apr2015):
+		maxMonths = 39
+	case issued.After(Jul2012):
+		maxMonths = 60
+	case issued.Before(Jul2012):
+		maxMonths = 120
+	}
+
+	if MonthsValid(c) > maxMonths {
+		return false
+	}
+	return true
+}
+
+// SignatureString returns the TLS signature string corresponding to
+// an X509 signature algorithm.
+func SignatureString(alg x509.SignatureAlgorithm) string {
+	switch alg {
+	case x509.MD2WithRSA:
+		return "MD2WithRSA"
+	case x509.MD5WithRSA:
+		return "MD5WithRSA"
+	case x509.SHA1WithRSA:
+		return "SHA1WithRSA"
+	case x509.SHA256WithRSA:
+		return "SHA256WithRSA"
+	case x509.SHA384WithRSA:
+		return "SHA384WithRSA"
+	case x509.SHA512WithRSA:
+		return "SHA512WithRSA"
+	case x509.DSAWithSHA1:
+		return "DSAWithSHA1"
+	case x509.DSAWithSHA256:
+		return "DSAWithSHA256"
+	case x509.ECDSAWithSHA1:
+		return "ECDSAWithSHA1"
+	case x509.ECDSAWithSHA256:
+		return "ECDSAWithSHA256"
+	case x509.ECDSAWithSHA384:
+		return "ECDSAWithSHA384"
+	case x509.ECDSAWithSHA512:
+		return "ECDSAWithSHA512"
+	default:
+		return "Unknown Signature"
+	}
+}
+
+// HashAlgoString returns the hash algorithm name contains in the signature
+// method.
+func HashAlgoString(alg x509.SignatureAlgorithm) string {
+	switch alg {
+	case x509.MD2WithRSA:
+		return "MD2"
+	case x509.MD5WithRSA:
+		return "MD5"
+	case x509.SHA1WithRSA:
+		return "SHA1"
+	case x509.SHA256WithRSA:
+		return "SHA256"
+	case x509.SHA384WithRSA:
+		return "SHA384"
+	case x509.SHA512WithRSA:
+		return "SHA512"
+	case x509.DSAWithSHA1:
+		return "SHA1"
+	case x509.DSAWithSHA256:
+		return "SHA256"
+	case x509.ECDSAWithSHA1:
+		return "SHA1"
+	case x509.ECDSAWithSHA256:
+		return "SHA256"
+	case x509.ECDSAWithSHA384:
+		return "SHA384"
+	case x509.ECDSAWithSHA512:
+		return "SHA512"
+	default:
+		return "Unknown Hash Algorithm"
+	}
+}
+
+// EncodeCertificatesPEM encodes a number of x509 certficates to PEM
+func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
+	var buffer bytes.Buffer
+	for _, cert := range certs {
+		pem.Encode(&buffer, &pem.Block{
+			Type:  "CERTIFICATE",
+			Bytes: cert.Raw,
+		})
+	}
+
+	return buffer.Bytes()
+}
+
+// EncodeCertificatePEM encodes a single x509 certficates to PEM
+func EncodeCertificatePEM(cert *x509.Certificate) []byte {
+	return EncodeCertificatesPEM([]*x509.Certificate{cert})
+}
+
+// ParseCertificatesPEM parses a sequence of PEM-encoded certificate and returns them,
+// can handle PEM encoded PKCS #7 structures.
+func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
+	var certs []*x509.Certificate
+	var err error
+	certsPEM = bytes.TrimSpace(certsPEM)
+	for len(certsPEM) > 0 {
+		var cert []*x509.Certificate
+		cert, certsPEM, err = ParseOneCertificateFromPEM(certsPEM)
+		if err != nil {
+
+			return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed)
+		} else if cert == nil {
+			break
+		}
+
+		certs = append(certs, cert...)
+	}
+	if len(certsPEM) > 0 {
+		return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
+	}
+	return certs, nil
+}
+
+// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
+// either PKCS #7, PKCS #12, or raw x509.
+func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
+	certsDER = bytes.TrimSpace(certsDER)
+	pkcs7data, err := pkcs7.ParsePKCS7(certsDER)
+	if err != nil {
+		var pkcs12data interface{}
+		certs = make([]*x509.Certificate, 1)
+		pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password)
+		if err != nil {
+			certs, err = x509.ParseCertificates(certsDER)
+			if err != nil {
+				return nil, nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
+			}
+		} else {
+			key = pkcs12data.(crypto.Signer)
+		}
+	} else {
+		if pkcs7data.ContentInfo != "SignedData" {
+			return nil, nil, cferr.Wrap(cferr.CertificateError, cferr.DecodeFailed, errors.New("can only extract certificates from signed data content info"))
+		}
+		certs = pkcs7data.Content.SignedData.Certificates
+	}
+	if certs == nil {
+		return nil, key, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
+	}
+	return certs, key, nil
+}
+
+// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
+func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
+	cert, err := ParseCertificatePEM(certPEM)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
+		return nil, cferr.Wrap(cferr.CertificateError, cferr.VerifyFailed, err)
+	}
+	return cert, nil
+}
+
+// ParseCertificatePEM parses and returns a PEM-encoded certificate,
+// can handle PEM encoded PKCS #7 structures.
+func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
+	certPEM = bytes.TrimSpace(certPEM)
+	cert, rest, err := ParseOneCertificateFromPEM(certPEM)
+	if err != nil {
+		// Log the actual parsing error but throw a default parse error message.
+		log.Debugf("Certificate parsing error: %v", err)
+		return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed)
+	} else if cert == nil {
+		return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
+	} else if len(rest) > 0 {
+		return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PEM file should contain only one object"))
+	} else if len(cert) > 1 {
+		return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PKCS7 object in the PEM file should contain only one certificate"))
+	}
+	return cert[0], nil
+}
+
+// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
+// either a raw x509 certificate or a PKCS #7 structure possibly containing
+// multiple certificates, from the top of certsPEM, which itself may
+// contain multiple PEM encoded certificate objects.
+func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
+
+	block, rest := pem.Decode(certsPEM)
+	if block == nil {
+		return nil, rest, nil
+	}
+
+	cert, err := x509.ParseCertificate(block.Bytes)
+	if err != nil {
+		pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
+		if err != nil {
+			return nil, rest, err
+		}
+		if pkcs7data.ContentInfo != "SignedData" {
+			return nil, rest, errors.New("only PKCS #7 Signed Data Content Info supported for certificate parsing")
+		}
+		certs := pkcs7data.Content.SignedData.Certificates
+		if certs == nil {
+			return nil, rest, errors.New("PKCS #7 structure contains no certificates")
+		}
+		return certs, rest, nil
+	}
+	var certs = []*x509.Certificate{cert}
+	return certs, rest, nil
+}
+
+// LoadPEMCertPool loads a pool of PEM certificates from file.
+func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
+	pemCerts, err := ioutil.ReadFile(certsFile)
+	if err != nil {
+		return nil, err
+	}
+
+	certPool := x509.NewCertPool()
+	if !certPool.AppendCertsFromPEM(pemCerts) {
+		return nil, errors.New("failed to load cert pool")
+	}
+
+	return certPool, nil
+}
+
+// ParsePrivateKeyPEM parses and returns a PEM-encoded private
+// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
+// or elliptic private key.
+func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
+	return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
+}
+
+// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
+// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
+// or elliptic private key.
+func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
+	keyDER, err := GetKeyDERFromPEM(keyPEM, password)
+	if err != nil {
+		return nil, err
+	}
+
+	return derhelpers.ParsePrivateKeyDER(keyDER)
+}
+
+// GetKeyDERFromPEM parses a PEM-encoded private key and returns DER-format key bytes.
+func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
+	keyDER, _ := pem.Decode(in)
+	if keyDER != nil {
+		if procType, ok := keyDER.Headers["Proc-Type"]; ok {
+			if strings.Contains(procType, "ENCRYPTED") {
+				if password != nil {
+					return x509.DecryptPEMBlock(keyDER, password)
+				}
+				return nil, cferr.New(cferr.PrivateKeyError, cferr.Encrypted)
+			}
+		}
+		return keyDER.Bytes, nil
+	}
+
+	return nil, cferr.New(cferr.PrivateKeyError, cferr.DecodeFailed)
+}
+
+// CheckSignature verifies a signature made by the key on a CSR, such
+// as on the CSR itself.
+func CheckSignature(csr *x509.CertificateRequest, algo x509.SignatureAlgorithm, signed, signature []byte) error {
+	var hashType crypto.Hash
+
+	switch algo {
+	case x509.SHA1WithRSA, x509.ECDSAWithSHA1:
+		hashType = crypto.SHA1
+	case x509.SHA256WithRSA, x509.ECDSAWithSHA256:
+		hashType = crypto.SHA256
+	case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
+		hashType = crypto.SHA384
+	case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
+		hashType = crypto.SHA512
+	default:
+		return x509.ErrUnsupportedAlgorithm
+	}
+
+	if !hashType.Available() {
+		return x509.ErrUnsupportedAlgorithm
+	}
+	h := hashType.New()
+
+	h.Write(signed)
+	digest := h.Sum(nil)
+
+	switch pub := csr.PublicKey.(type) {
+	case *rsa.PublicKey:
+		return rsa.VerifyPKCS1v15(pub, hashType, digest, signature)
+	case *ecdsa.PublicKey:
+		ecdsaSig := new(struct{ R, S *big.Int })
+		if _, err := asn1.Unmarshal(signature, ecdsaSig); err != nil {
+			return err
+		}
+		if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
+			return errors.New("x509: ECDSA signature contained zero or negative values")
+		}
+		if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) {
+			return errors.New("x509: ECDSA verification failure")
+		}
+		return nil
+	}
+	return x509.ErrUnsupportedAlgorithm
+}
+
+// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
+func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
+	in = bytes.TrimSpace(in)
+	p, rest := pem.Decode(in)
+	if p != nil {
+		if p.Type != "CERTIFICATE REQUEST" {
+			return nil, rest, cferr.New(cferr.CSRError, cferr.BadRequest)
+		}
+
+		csr, err = x509.ParseCertificateRequest(p.Bytes)
+	} else {
+		csr, err = x509.ParseCertificateRequest(in)
+	}
+
+	if err != nil {
+		return nil, rest, err
+	}
+
+	err = CheckSignature(csr, csr.SignatureAlgorithm, csr.RawTBSCertificateRequest, csr.Signature)
+	if err != nil {
+		return nil, rest, err
+	}
+
+	return csr, rest, nil
+}
+
+// ParseCSRPEM parses a PEM-encoded certificiate signing request.
+// It does not check the signature. This is useful for dumping data from a CSR
+// locally.
+func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
+	block, _ := pem.Decode([]byte(csrPEM))
+	der := block.Bytes
+	csrObject, err := x509.ParseCertificateRequest(der)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return csrObject, nil
+}
+
+// SignerAlgo returns an X.509 signature algorithm corresponding to
+// the crypto.Hash provided from a crypto.Signer.
+func SignerAlgo(priv crypto.Signer, h crypto.Hash) x509.SignatureAlgorithm {
+	switch priv.Public().(type) {
+	case *rsa.PublicKey:
+		switch h {
+		case crypto.SHA512:
+			return x509.SHA512WithRSA
+		case crypto.SHA384:
+			return x509.SHA384WithRSA
+		case crypto.SHA256:
+			return x509.SHA256WithRSA
+		default:
+			return x509.SHA1WithRSA
+		}
+	case *ecdsa.PublicKey:
+		switch h {
+		case crypto.SHA512:
+			return x509.ECDSAWithSHA512
+		case crypto.SHA384:
+			return x509.ECDSAWithSHA384
+		case crypto.SHA256:
+			return x509.ECDSAWithSHA256
+		default:
+			return x509.ECDSAWithSHA1
+		}
+	default:
+		return x509.UnknownSignatureAlgorithm
+	}
+}

+ 15 - 0
vendor/src/github.com/cloudflare/cfssl/info/info.go

@@ -0,0 +1,15 @@
+// Package info contains the definitions for the info endpoint
+package info
+
+// Req is the request struct for an info API request.
+type Req struct {
+	Label   string `json:"label"`
+	Profile string `json:"profile"`
+}
+
+// Resp is the response for an Info API request.
+type Resp struct {
+	Certificate  string   `json:"certificate"`
+	Usage        []string `json:"usages"`
+	ExpiryString string   `json:"expiry"`
+}

+ 278 - 0
vendor/src/github.com/cloudflare/cfssl/initca/initca.go

@@ -0,0 +1,278 @@
+// Package initca contains code to initialise a certificate authority,
+// generating a new root key and certificate.
+package initca
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+	"io/ioutil"
+	"net"
+	"time"
+
+	"github.com/cloudflare/cfssl/config"
+	"github.com/cloudflare/cfssl/csr"
+	cferr "github.com/cloudflare/cfssl/errors"
+	"github.com/cloudflare/cfssl/helpers"
+	"github.com/cloudflare/cfssl/log"
+	"github.com/cloudflare/cfssl/signer"
+	"github.com/cloudflare/cfssl/signer/local"
+)
+
+// validator contains the default validation logic for certificate
+// authority certificates. The only requirement here is that the
+// certificate have a non-empty subject field.
+func validator(req *csr.CertificateRequest) error {
+	if req.CN != "" {
+		return nil
+	}
+
+	if len(req.Names) == 0 {
+		return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information"))
+	}
+
+	for i := range req.Names {
+		if csr.IsNameEmpty(req.Names[i]) {
+			return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information"))
+		}
+	}
+
+	return nil
+}
+
+// New creates a new root certificate from the certificate request.
+func New(req *csr.CertificateRequest) (cert, csrPEM, key []byte, err error) {
+	if req.CA != nil {
+		if req.CA.Expiry != "" {
+			CAPolicy.Default.ExpiryString = req.CA.Expiry
+			CAPolicy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry)
+		}
+
+		if req.CA.PathLength != 0 {
+			signer.MaxPathLen = req.CA.PathLength
+		}
+	}
+
+	g := &csr.Generator{Validator: validator}
+	csrPEM, key, err = g.ProcessRequest(req)
+	if err != nil {
+		log.Errorf("failed to process request: %v", err)
+		key = nil
+		return
+	}
+
+	priv, err := helpers.ParsePrivateKeyPEM(key)
+	if err != nil {
+		log.Errorf("failed to parse private key: %v", err)
+		return
+	}
+
+	s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), nil)
+	if err != nil {
+		log.Errorf("failed to create signer: %v", err)
+		return
+	}
+	s.SetPolicy(CAPolicy)
+
+	signReq := signer.SignRequest{Hosts: req.Hosts, Request: string(csrPEM)}
+	cert, err = s.Sign(signReq)
+
+	return
+
+}
+
+// NewFromPEM creates a new root certificate from the key file passed in.
+func NewFromPEM(req *csr.CertificateRequest, keyFile string) (cert, csrPEM []byte, err error) {
+	privData, err := ioutil.ReadFile(keyFile)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	priv, err := helpers.ParsePrivateKeyPEM(privData)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return NewFromSigner(req, priv)
+}
+
+// RenewFromPEM re-creates a root certificate from the CA cert and key
+// files. The resulting root certificate will have the input CA certificate
+// as the template and have the same expiry length. E.g. the exsiting CA
+// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate
+// will be valid from now and expire in one year as well.
+func RenewFromPEM(caFile, keyFile string) ([]byte, error) {
+	caBytes, err := ioutil.ReadFile(caFile)
+	if err != nil {
+		return nil, err
+	}
+
+	ca, err := helpers.ParseCertificatePEM(caBytes)
+	if err != nil {
+		return nil, err
+	}
+
+	keyBytes, err := ioutil.ReadFile(keyFile)
+	if err != nil {
+		return nil, err
+	}
+
+	key, err := helpers.ParsePrivateKeyPEM(keyBytes)
+	if err != nil {
+		return nil, err
+	}
+
+	return RenewFromSigner(ca, key)
+
+}
+
+// NewFromSigner creates a new root certificate from a crypto.Signer.
+func NewFromSigner(req *csr.CertificateRequest, priv crypto.Signer) (cert, csrPEM []byte, err error) {
+	if req.CA != nil {
+		if req.CA.Expiry != "" {
+			CAPolicy.Default.ExpiryString = req.CA.Expiry
+			CAPolicy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry)
+			if err != nil {
+				return nil, nil, err
+			}
+		}
+
+		if req.CA.PathLength != 0 {
+			signer.MaxPathLen = req.CA.PathLength
+		}
+	}
+
+	var sigAlgo x509.SignatureAlgorithm
+	switch pub := priv.Public().(type) {
+	case *rsa.PublicKey:
+		bitLength := pub.N.BitLen()
+		switch {
+		case bitLength >= 4096:
+			sigAlgo = x509.SHA512WithRSA
+		case bitLength >= 3072:
+			sigAlgo = x509.SHA384WithRSA
+		case bitLength >= 2048:
+			sigAlgo = x509.SHA256WithRSA
+		default:
+			sigAlgo = x509.SHA1WithRSA
+		}
+	case *ecdsa.PublicKey:
+		switch pub.Curve {
+		case elliptic.P521():
+			sigAlgo = x509.ECDSAWithSHA512
+		case elliptic.P384():
+			sigAlgo = x509.ECDSAWithSHA384
+		case elliptic.P256():
+			sigAlgo = x509.ECDSAWithSHA256
+		default:
+			sigAlgo = x509.ECDSAWithSHA1
+		}
+	default:
+		sigAlgo = x509.UnknownSignatureAlgorithm
+	}
+
+	var tpl = x509.CertificateRequest{
+		Subject:            req.Name(),
+		SignatureAlgorithm: sigAlgo,
+	}
+
+	for i := range req.Hosts {
+		if ip := net.ParseIP(req.Hosts[i]); ip != nil {
+			tpl.IPAddresses = append(tpl.IPAddresses, ip)
+		} else {
+			tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
+		}
+	}
+
+	return signWithCSR(&tpl, priv)
+}
+
+// signWithCSR creates a new root certificate from signing a X509.CertificateRequest
+// by a crypto.Signer.
+func signWithCSR(tpl *x509.CertificateRequest, priv crypto.Signer) (cert, csrPEM []byte, err error) {
+	csrPEM, err = x509.CreateCertificateRequest(rand.Reader, tpl, priv)
+	if err != nil {
+		log.Errorf("failed to generate a CSR: %v", err)
+		// The use of CertificateError was a matter of some
+		// debate; it is the one edge case in which a new
+		// error category specifically for CSRs might be
+		// useful, but it was deemed that one edge case did
+		// not a new category justify.
+		err = cferr.Wrap(cferr.CertificateError, cferr.BadRequest, err)
+		return
+	}
+
+	p := &pem.Block{
+		Type:  "CERTIFICATE REQUEST",
+		Bytes: csrPEM,
+	}
+	csrPEM = pem.EncodeToMemory(p)
+
+	s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), nil)
+	if err != nil {
+		log.Errorf("failed to create signer: %v", err)
+		return
+	}
+	s.SetPolicy(CAPolicy)
+
+	signReq := signer.SignRequest{Request: string(csrPEM)}
+	cert, err = s.Sign(signReq)
+	return
+}
+
+// RenewFromSigner re-creates a root certificate from the CA cert and crypto.Signer.
+// The resulting root certificate will have ca certificate
+// as the template and have the same expiry length. E.g. the exsiting CA
+// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate
+// will be valid from now and expire in one year as well.
+func RenewFromSigner(ca *x509.Certificate, priv crypto.Signer) ([]byte, error) {
+	if !ca.IsCA {
+		return nil, errors.New("input certificate is not a CA cert")
+	}
+
+	// matching certificate public key vs private key
+	switch {
+	case ca.PublicKeyAlgorithm == x509.RSA:
+
+		var rsaPublicKey *rsa.PublicKey
+		var ok bool
+		if rsaPublicKey, ok = priv.Public().(*rsa.PublicKey); !ok {
+			return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch)
+		}
+		if ca.PublicKey.(*rsa.PublicKey).N.Cmp(rsaPublicKey.N) != 0 {
+			return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch)
+		}
+	case ca.PublicKeyAlgorithm == x509.ECDSA:
+		var ecdsaPublicKey *ecdsa.PublicKey
+		var ok bool
+		if ecdsaPublicKey, ok = priv.Public().(*ecdsa.PublicKey); !ok {
+			return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch)
+		}
+		if ca.PublicKey.(*ecdsa.PublicKey).X.Cmp(ecdsaPublicKey.X) != 0 {
+			return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch)
+		}
+	default:
+		return nil, cferr.New(cferr.PrivateKeyError, cferr.NotRSAOrECC)
+	}
+
+	req := csr.ExtractCertificateRequest(ca)
+
+	cert, _, err := NewFromSigner(req, priv)
+	return cert, err
+
+}
+
+// CAPolicy contains the CA issuing policy as default policy.
+var CAPolicy = &config.Signing{
+	Default: &config.SigningProfile{
+		Usage:        []string{"cert sign", "crl sign"},
+		ExpiryString: "43800h",
+		Expiry:       5 * helpers.OneYear,
+		CA:           true,
+	},
+}

+ 174 - 0
vendor/src/github.com/cloudflare/cfssl/log/log.go

@@ -0,0 +1,174 @@
+// Package log implements a wrapper around the Go standard library's
+// logging package. Clients should set the current log level; only
+// messages below that level will actually be logged. For example, if
+// Level is set to LevelWarning, only log messages at the Warning,
+// Error, and Critical levels will be logged.
+package log
+
+import (
+	"flag"
+	"fmt"
+	"log"
+	"os"
+)
+
+// The following constants represent logging levels in increasing levels of seriousness.
+const (
+	// LevelDebug is the log level for Debug statements.
+	LevelDebug = iota
+	// LevelInfo is the log level for Info statements.
+	LevelInfo
+	// LevelWarning is the log level for Warning statements.
+	LevelWarning
+	// LevelError is the log level for Error statements.
+	LevelError
+	// LevelCritical is the log level for Critical statements.
+	LevelCritical
+	// LevelFatal is the log level for Fatal statements.
+	LevelFatal
+)
+
+var levelPrefix = [...]string{
+	LevelDebug:    "DEBUG",
+	LevelInfo:     "INFO",
+	LevelWarning:  "WARNING",
+	LevelError:    "ERROR",
+	LevelCritical: "CRITICAL",
+	LevelFatal:    "FATAL",
+}
+
+// Level stores the current logging level.
+var Level = LevelInfo
+
+// SyslogWriter specifies the necessary methods for an alternate output
+// destination passed in via SetLogger.
+//
+// SyslogWriter is satisfied by *syslog.Writer.
+type SyslogWriter interface {
+	Debug(string) error
+	Info(string) error
+	Warning(string) error
+	Err(string) error
+	Crit(string) error
+	Emerg(string) error
+}
+
+// syslogWriter stores the SetLogger() parameter.
+var syslogWriter SyslogWriter
+
+// SetLogger sets the output used for output by this package.
+// A *syslog.Writer is a good choice for the logger parameter.
+// Call with a nil parameter to revert to default behavior.
+func SetLogger(logger SyslogWriter) {
+	syslogWriter = logger
+}
+
+func init() {
+	// Only define loglevel flag once.
+	if flag.Lookup("loglevel") == nil {
+		flag.IntVar(&Level, "loglevel", LevelInfo, "Log level (0 = DEBUG, 5 = FATAL)")
+	}
+}
+
+func print(l int, msg string) {
+	if l >= Level {
+		if syslogWriter != nil {
+			var err error
+			switch l {
+			case LevelDebug:
+				err = syslogWriter.Debug(msg)
+			case LevelInfo:
+				err = syslogWriter.Info(msg)
+			case LevelWarning:
+				err = syslogWriter.Warning(msg)
+			case LevelError:
+				err = syslogWriter.Err(msg)
+			case LevelCritical:
+				err = syslogWriter.Crit(msg)
+			case LevelFatal:
+				err = syslogWriter.Emerg(msg)
+			}
+			if err != nil {
+				log.Printf("Unable to write syslog: %v for msg: %s\n", err, msg)
+			}
+		} else {
+			log.Printf("[%s] %s", levelPrefix[l], msg)
+		}
+	}
+}
+
+func outputf(l int, format string, v []interface{}) {
+	print(l, fmt.Sprintf(format, v...))
+}
+
+func output(l int, v []interface{}) {
+	print(l, fmt.Sprint(v...))
+}
+
+// Fatalf logs a formatted message at the "fatal" level and then exits. The
+// arguments are handled in the same manner as fmt.Printf.
+func Fatalf(format string, v ...interface{}) {
+	outputf(LevelFatal, format, v)
+	os.Exit(1)
+}
+
+// Fatal logs its arguments at the "fatal" level and then exits.
+func Fatal(v ...interface{}) {
+	output(LevelFatal, v)
+	os.Exit(1)
+}
+
+// Criticalf logs a formatted message at the "critical" level. The
+// arguments are handled in the same manner as fmt.Printf.
+func Criticalf(format string, v ...interface{}) {
+	outputf(LevelCritical, format, v)
+}
+
+// Critical logs its arguments at the "critical" level.
+func Critical(v ...interface{}) {
+	output(LevelCritical, v)
+}
+
+// Errorf logs a formatted message at the "error" level. The arguments
+// are handled in the same manner as fmt.Printf.
+func Errorf(format string, v ...interface{}) {
+	outputf(LevelError, format, v)
+}
+
+// Error logs its arguments at the "error" level.
+func Error(v ...interface{}) {
+	output(LevelError, v)
+}
+
+// Warningf logs a formatted message at the "warning" level. The
+// arguments are handled in the same manner as fmt.Printf.
+func Warningf(format string, v ...interface{}) {
+	outputf(LevelWarning, format, v)
+}
+
+// Warning logs its arguments at the "warning" level.
+func Warning(v ...interface{}) {
+	output(LevelWarning, v)
+}
+
+// Infof logs a formatted message at the "info" level. The arguments
+// are handled in the same manner as fmt.Printf.
+func Infof(format string, v ...interface{}) {
+	outputf(LevelInfo, format, v)
+}
+
+// Info logs its arguments at the "info" level.
+func Info(v ...interface{}) {
+	output(LevelInfo, v)
+}
+
+// Debugf logs a formatted message at the "debug" level. The arguments
+// are handled in the same manner as fmt.Printf.
+func Debugf(format string, v ...interface{}) {
+	outputf(LevelDebug, format, v)
+}
+
+// Debug logs its arguments at the "debug" level.
+func Debug(v ...interface{}) {
+	output(LevelDebug, v)
+}

+ 13 - 0
vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go

@@ -0,0 +1,13 @@
+// Package config in the ocsp directory provides configuration data for an OCSP
+// signer.
+package config
+
+import "time"
+
+// Config contains configuration information required to set up an OCSP signer.
+type Config struct {
+	CACertFile        string
+	ResponderCertFile string
+	KeyFile           string
+	Interval          time.Duration
+}

+ 447 - 0
vendor/src/github.com/cloudflare/cfssl/signer/local/local.go

@@ -0,0 +1,447 @@
+// Package local implements certificate signature functionality for CFSSL.
+package local
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/rand"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/asn1"
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/pem"
+	"errors"
+	"io"
+	"io/ioutil"
+	"math/big"
+	"net"
+	"net/mail"
+	"os"
+
+	"github.com/cloudflare/cfssl/certdb"
+	"github.com/cloudflare/cfssl/config"
+	cferr "github.com/cloudflare/cfssl/errors"
+	"github.com/cloudflare/cfssl/helpers"
+	"github.com/cloudflare/cfssl/info"
+	"github.com/cloudflare/cfssl/log"
+	"github.com/cloudflare/cfssl/signer"
+	"github.com/google/certificate-transparency/go"
+	"github.com/google/certificate-transparency/go/client"
+)
+
+// Signer contains a signer that uses the standard library to
+// support both ECDSA and RSA CA keys.
+type Signer struct {
+	ca         *x509.Certificate
+	priv       crypto.Signer
+	policy     *config.Signing
+	sigAlgo    x509.SignatureAlgorithm
+	dbAccessor certdb.Accessor
+}
+
+// NewSigner creates a new Signer directly from a
+// private key and certificate, with optional policy.
+func NewSigner(priv crypto.Signer, cert *x509.Certificate, sigAlgo x509.SignatureAlgorithm, policy *config.Signing) (*Signer, error) {
+	if policy == nil {
+		policy = &config.Signing{
+			Profiles: map[string]*config.SigningProfile{},
+			Default:  config.DefaultConfig()}
+	}
+
+	if !policy.Valid() {
+		return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+	}
+
+	return &Signer{
+		ca:      cert,
+		priv:    priv,
+		sigAlgo: sigAlgo,
+		policy:  policy,
+	}, nil
+}
+
+// NewSignerFromFile generates a new local signer from a caFile
+// and a caKey file, both PEM encoded.
+func NewSignerFromFile(caFile, caKeyFile string, policy *config.Signing) (*Signer, error) {
+	log.Debug("Loading CA: ", caFile)
+	ca, err := ioutil.ReadFile(caFile)
+	if err != nil {
+		return nil, err
+	}
+	log.Debug("Loading CA key: ", caKeyFile)
+	cakey, err := ioutil.ReadFile(caKeyFile)
+	if err != nil {
+		return nil, cferr.Wrap(cferr.CertificateError, cferr.ReadFailed, err)
+	}
+
+	parsedCa, err := helpers.ParseCertificatePEM(ca)
+	if err != nil {
+		return nil, err
+	}
+
+	strPassword := os.Getenv("CFSSL_CA_PK_PASSWORD")
+	password := []byte(strPassword)
+	if strPassword == "" {
+		password = nil
+	}
+
+	priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password)
+	if err != nil {
+		log.Debug("Malformed private key %v", err)
+		return nil, err
+	}
+
+	return NewSigner(priv, parsedCa, signer.DefaultSigAlgo(priv), policy)
+}
+
+func (s *Signer) sign(template *x509.Certificate, profile *config.SigningProfile) (cert []byte, err error) {
+	err = signer.FillTemplate(template, s.policy.Default, profile)
+	if err != nil {
+		return
+	}
+
+	var initRoot bool
+	if s.ca == nil {
+		if !template.IsCA {
+			err = cferr.New(cferr.PolicyError, cferr.InvalidRequest)
+			return
+		}
+		template.DNSNames = nil
+		template.EmailAddresses = nil
+		s.ca = template
+		initRoot = true
+		template.MaxPathLen = signer.MaxPathLen
+	} else if template.IsCA {
+		template.MaxPathLen = 1
+		template.DNSNames = nil
+		template.EmailAddresses = nil
+	}
+
+	derBytes, err := x509.CreateCertificate(rand.Reader, template, s.ca, template.PublicKey, s.priv)
+	if err != nil {
+		return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err)
+	}
+	if initRoot {
+		s.ca, err = x509.ParseCertificate(derBytes)
+		if err != nil {
+			return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+		}
+	}
+
+	cert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
+	log.Infof("signed certificate with serial number %d", template.SerialNumber)
+	return
+}
+
+// replaceSliceIfEmpty replaces the contents of replaced with newContents if
+// the slice referenced by replaced is empty
+func replaceSliceIfEmpty(replaced, newContents *[]string) {
+	if len(*replaced) == 0 {
+		*replaced = *newContents
+	}
+}
+
+// PopulateSubjectFromCSR has functionality similar to Name, except
+// it fills the fields of the resulting pkix.Name with req's if the
+// subject's corresponding fields are empty
+func PopulateSubjectFromCSR(s *signer.Subject, req pkix.Name) pkix.Name {
+	// if no subject, use req
+	if s == nil {
+		return req
+	}
+
+	name := s.Name()
+
+	if name.CommonName == "" {
+		name.CommonName = req.CommonName
+	}
+
+	replaceSliceIfEmpty(&name.Country, &req.Country)
+	replaceSliceIfEmpty(&name.Province, &req.Province)
+	replaceSliceIfEmpty(&name.Locality, &req.Locality)
+	replaceSliceIfEmpty(&name.Organization, &req.Organization)
+	replaceSliceIfEmpty(&name.OrganizationalUnit, &req.OrganizationalUnit)
+	if name.SerialNumber == "" {
+		name.SerialNumber = req.SerialNumber
+	}
+	return name
+}
+
+// OverrideHosts fills template's IPAddresses, EmailAddresses, and DNSNames with the
+// content of hosts, if it is not nil.
+func OverrideHosts(template *x509.Certificate, hosts []string) {
+	if hosts != nil {
+		template.IPAddresses = []net.IP{}
+		template.EmailAddresses = []string{}
+		template.DNSNames = []string{}
+	}
+
+	for i := range hosts {
+		if ip := net.ParseIP(hosts[i]); ip != nil {
+			template.IPAddresses = append(template.IPAddresses, ip)
+		} else if email, err := mail.ParseAddress(hosts[i]); err == nil && email != nil {
+			template.EmailAddresses = append(template.EmailAddresses, email.Address)
+		} else {
+			template.DNSNames = append(template.DNSNames, hosts[i])
+		}
+	}
+
+}
+
+// Sign signs a new certificate based on the PEM-encoded client
+// certificate or certificate request with the signing profile,
+// specified by profileName.
+func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) {
+	profile, err := signer.Profile(s, req.Profile)
+	if err != nil {
+		return
+	}
+
+	block, _ := pem.Decode([]byte(req.Request))
+	if block == nil {
+		return nil, cferr.New(cferr.CSRError, cferr.DecodeFailed)
+	}
+
+	if block.Type != "CERTIFICATE REQUEST" {
+		return nil, cferr.Wrap(cferr.CSRError,
+			cferr.BadRequest, errors.New("not a certificate or csr"))
+	}
+
+	csrTemplate, err := signer.ParseCertificateRequest(s, block.Bytes)
+	if err != nil {
+		return nil, err
+	}
+
+	// Copy out only the fields from the CSR authorized by policy.
+	safeTemplate := x509.Certificate{}
+	// If the profile contains no explicit whitelist, assume that all fields
+	// should be copied from the CSR.
+	if profile.CSRWhitelist == nil {
+		safeTemplate = *csrTemplate
+	} else {
+		if profile.CSRWhitelist.Subject {
+			safeTemplate.Subject = csrTemplate.Subject
+		}
+		if profile.CSRWhitelist.PublicKeyAlgorithm {
+			safeTemplate.PublicKeyAlgorithm = csrTemplate.PublicKeyAlgorithm
+		}
+		if profile.CSRWhitelist.PublicKey {
+			safeTemplate.PublicKey = csrTemplate.PublicKey
+		}
+		if profile.CSRWhitelist.SignatureAlgorithm {
+			safeTemplate.SignatureAlgorithm = csrTemplate.SignatureAlgorithm
+		}
+		if profile.CSRWhitelist.DNSNames {
+			safeTemplate.DNSNames = csrTemplate.DNSNames
+		}
+		if profile.CSRWhitelist.IPAddresses {
+			safeTemplate.IPAddresses = csrTemplate.IPAddresses
+		}
+		if profile.CSRWhitelist.EmailAddresses {
+			safeTemplate.EmailAddresses = csrTemplate.EmailAddresses
+		}
+	}
+
+	OverrideHosts(&safeTemplate, req.Hosts)
+	safeTemplate.Subject = PopulateSubjectFromCSR(req.Subject, safeTemplate.Subject)
+
+	// If there is a whitelist, ensure that both the Common Name and SAN DNSNames match
+	if profile.NameWhitelist != nil {
+		if safeTemplate.Subject.CommonName != "" {
+			if profile.NameWhitelist.Find([]byte(safeTemplate.Subject.CommonName)) == nil {
+				return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+			}
+		}
+		for _, name := range safeTemplate.DNSNames {
+			if profile.NameWhitelist.Find([]byte(name)) == nil {
+				return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+			}
+		}
+		for _, name := range safeTemplate.EmailAddresses {
+			if profile.NameWhitelist.Find([]byte(name)) == nil {
+				return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+			}
+		}
+	}
+
+	if profile.ClientProvidesSerialNumbers {
+		if req.Serial == nil {
+			return nil, cferr.New(cferr.CertificateError, cferr.MissingSerial)
+		}
+		safeTemplate.SerialNumber = req.Serial
+	} else {
+		// RFC 5280 4.1.2.2:
+		// Certificate users MUST be able to handle serialNumber
+		// values up to 20 octets.  Conforming CAs MUST NOT use
+		// serialNumber values longer than 20 octets.
+		//
+		// If CFSSL is providing the serial numbers, it makes
+		// sense to use the max supported size.
+		serialNumber := make([]byte, 20)
+		_, err = io.ReadFull(rand.Reader, serialNumber)
+		if err != nil {
+			return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err)
+		}
+
+		// SetBytes interprets buf as the bytes of a big-endian
+		// unsigned integer. The leading byte should be masked
+		// off to ensure it isn't negative.
+		serialNumber[0] &= 0x7F
+
+		safeTemplate.SerialNumber = new(big.Int).SetBytes(serialNumber)
+	}
+
+	if len(req.Extensions) > 0 {
+		for _, ext := range req.Extensions {
+			oid := asn1.ObjectIdentifier(ext.ID)
+			if !profile.ExtensionWhitelist[oid.String()] {
+				return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest)
+			}
+
+			rawValue, err := hex.DecodeString(ext.Value)
+			if err != nil {
+				return nil, cferr.Wrap(cferr.CertificateError, cferr.InvalidRequest, err)
+			}
+
+			safeTemplate.ExtraExtensions = append(safeTemplate.ExtraExtensions, pkix.Extension{
+				Id:       oid,
+				Critical: ext.Critical,
+				Value:    rawValue,
+			})
+		}
+	}
+
+	var certTBS = safeTemplate
+
+	if len(profile.CTLogServers) > 0 {
+		// Add a poison extension which prevents validation
+		var poisonExtension = pkix.Extension{Id: signer.CTPoisonOID, Critical: true, Value: []byte{0x05, 0x00}}
+		var poisonedPreCert = certTBS
+		poisonedPreCert.ExtraExtensions = append(safeTemplate.ExtraExtensions, poisonExtension)
+		cert, err = s.sign(&poisonedPreCert, profile)
+		if err != nil {
+			return
+		}
+
+		derCert, _ := pem.Decode(cert)
+		prechain := []ct.ASN1Cert{derCert.Bytes, s.ca.Raw}
+		var sctList []ct.SignedCertificateTimestamp
+
+		for _, server := range profile.CTLogServers {
+			log.Infof("submitting poisoned precertificate to %s", server)
+			var ctclient = client.New(server)
+			var resp *ct.SignedCertificateTimestamp
+			resp, err = ctclient.AddPreChain(prechain)
+			if err != nil {
+				return nil, cferr.Wrap(cferr.CTError, cferr.PrecertSubmissionFailed, err)
+			}
+			sctList = append(sctList, *resp)
+		}
+
+		var serializedSCTList []byte
+		serializedSCTList, err = serializeSCTList(sctList)
+		if err != nil {
+			return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err)
+		}
+
+		// Serialize again as an octet string before embedding
+		serializedSCTList, err = asn1.Marshal(serializedSCTList)
+		if err != nil {
+			return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err)
+		}
+
+		var SCTListExtension = pkix.Extension{Id: signer.SCTListOID, Critical: false, Value: serializedSCTList}
+		certTBS.ExtraExtensions = append(certTBS.ExtraExtensions, SCTListExtension)
+	}
+	var signedCert []byte
+	signedCert, err = s.sign(&certTBS, profile)
+	if err != nil {
+		return nil, err
+	}
+
+	if s.dbAccessor != nil {
+		var certRecord = certdb.CertificateRecord{
+			Serial: certTBS.SerialNumber.String(),
+			// this relies on the specific behavior of x509.CreateCertificate
+			// which updates certTBS AuthorityKeyId from the signer's SubjectKeyId
+			AKI:     hex.EncodeToString(certTBS.AuthorityKeyId),
+			CALabel: req.Label,
+			Status:  "good",
+			Expiry:  certTBS.NotAfter,
+			PEM:     string(signedCert),
+		}
+
+		err = s.dbAccessor.InsertCertificate(certRecord)
+		if err != nil {
+			return nil, err
+		}
+		log.Debug("saved certificate with serial number ", certTBS.SerialNumber)
+	}
+
+	return signedCert, nil
+}
+
+func serializeSCTList(sctList []ct.SignedCertificateTimestamp) ([]byte, error) {
+	var buf bytes.Buffer
+	for _, sct := range sctList {
+		sct, err := ct.SerializeSCT(sct)
+		if err != nil {
+			return nil, err
+		}
+		binary.Write(&buf, binary.BigEndian, uint16(len(sct)))
+		buf.Write(sct)
+	}
+
+	var sctListLengthField = make([]byte, 2)
+	binary.BigEndian.PutUint16(sctListLengthField, uint16(buf.Len()))
+	return bytes.Join([][]byte{sctListLengthField, buf.Bytes()}, nil), nil
+}
+
+// Info return a populated info.Resp struct or an error.
+func (s *Signer) Info(req info.Req) (resp *info.Resp, err error) {
+	cert, err := s.Certificate(req.Label, req.Profile)
+	if err != nil {
+		return
+	}
+
+	profile, err := signer.Profile(s, req.Profile)
+	if err != nil {
+		return
+	}
+
+	resp = new(info.Resp)
+	if cert.Raw != nil {
+		resp.Certificate = string(bytes.TrimSpace(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})))
+	}
+	resp.Usage = profile.Usage
+	resp.ExpiryString = profile.ExpiryString
+
+	return
+}
+
+// SigAlgo returns the RSA signer's signature algorithm.
+func (s *Signer) SigAlgo() x509.SignatureAlgorithm {
+	return s.sigAlgo
+}
+
+// Certificate returns the signer's certificate.
+func (s *Signer) Certificate(label, profile string) (*x509.Certificate, error) {
+	cert := *s.ca
+	return &cert, nil
+}
+
+// SetPolicy sets the signer's signature policy.
+func (s *Signer) SetPolicy(policy *config.Signing) {
+	s.policy = policy
+}
+
+// SetDBAccessor sets the signers' cert db accessor
+func (s *Signer) SetDBAccessor(dba certdb.Accessor) {
+	s.dbAccessor = dba
+}
+
+// Policy returns the signer's policy.
+func (s *Signer) Policy() *config.Signing {
+	return s.policy
+}

+ 385 - 0
vendor/src/github.com/cloudflare/cfssl/signer/signer.go

@@ -0,0 +1,385 @@
+// Package signer implements certificate signature functionality for CFSSL.
+package signer
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"crypto/rsa"
+	"crypto/sha1"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/asn1"
+	"errors"
+	"math/big"
+	"strings"
+	"time"
+
+	"github.com/cloudflare/cfssl/certdb"
+	"github.com/cloudflare/cfssl/config"
+	"github.com/cloudflare/cfssl/csr"
+	cferr "github.com/cloudflare/cfssl/errors"
+	"github.com/cloudflare/cfssl/helpers"
+	"github.com/cloudflare/cfssl/info"
+)
+
+// MaxPathLen is the default path length for a new CA certificate.
+var MaxPathLen = 2
+
+// Subject contains the information that should be used to override the
+// subject information when signing a certificate.
+type Subject struct {
+	CN           string
+	Names        []csr.Name `json:"names"`
+	SerialNumber string
+}
+
+// Extension represents a raw extension to be included in the certificate.  The
+// "value" field must be hex encoded.
+type Extension struct {
+	ID       config.OID `json:"id"`
+	Critical bool       `json:"critical"`
+	Value    string     `json:"value"`
+}
+
+// SignRequest stores a signature request, which contains the hostname,
+// the CSR, optional subject information, and the signature profile.
+//
+// Extensions provided in the signRequest are copied into the certificate, as
+// long as they are in the ExtensionWhitelist for the signer's policy.
+// Extensions requested in the CSR are ignored, except for those processed by
+// ParseCertificateRequest (mainly subjectAltName).
+type SignRequest struct {
+	Hosts      []string    `json:"hosts"`
+	Request    string      `json:"certificate_request"`
+	Subject    *Subject    `json:"subject,omitempty"`
+	Profile    string      `json:"profile"`
+	Label      string      `json:"label"`
+	Serial     *big.Int    `json:"serial,omitempty"`
+	Extensions []Extension `json:"extensions,omitempty"`
+}
+
+// appendIf appends to a if s is not an empty string.
+func appendIf(s string, a *[]string) {
+	if s != "" {
+		*a = append(*a, s)
+	}
+}
+
+// Name returns the PKIX name for the subject.
+func (s *Subject) Name() pkix.Name {
+	var name pkix.Name
+	name.CommonName = s.CN
+
+	for _, n := range s.Names {
+		appendIf(n.C, &name.Country)
+		appendIf(n.ST, &name.Province)
+		appendIf(n.L, &name.Locality)
+		appendIf(n.O, &name.Organization)
+		appendIf(n.OU, &name.OrganizationalUnit)
+	}
+	name.SerialNumber = s.SerialNumber
+	return name
+}
+
+// SplitHosts takes a comma-spearated list of hosts and returns a slice
+// with the hosts split
+func SplitHosts(hostList string) []string {
+	if hostList == "" {
+		return nil
+	}
+
+	return strings.Split(hostList, ",")
+}
+
+// A Signer contains a CA's certificate and private key for signing
+// certificates, a Signing policy to refer to and a SignatureAlgorithm.
+type Signer interface {
+	Info(info.Req) (*info.Resp, error)
+	Policy() *config.Signing
+	SetDBAccessor(certdb.Accessor)
+	SetPolicy(*config.Signing)
+	SigAlgo() x509.SignatureAlgorithm
+	Sign(req SignRequest) (cert []byte, err error)
+}
+
+// Profile gets the specific profile from the signer
+func Profile(s Signer, profile string) (*config.SigningProfile, error) {
+	var p *config.SigningProfile
+	policy := s.Policy()
+	if policy != nil && policy.Profiles != nil && profile != "" {
+		p = policy.Profiles[profile]
+	}
+
+	if p == nil && policy != nil {
+		p = policy.Default
+	}
+
+	if p == nil {
+		return nil, cferr.Wrap(cferr.APIClientError, cferr.ClientHTTPError, errors.New("profile must not be nil"))
+	}
+	return p, nil
+}
+
+// DefaultSigAlgo returns an appropriate X.509 signature algorithm given
+// the CA's private key.
+func DefaultSigAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
+	pub := priv.Public()
+	switch pub := pub.(type) {
+	case *rsa.PublicKey:
+		keySize := pub.N.BitLen()
+		switch {
+		case keySize >= 4096:
+			return x509.SHA512WithRSA
+		case keySize >= 3072:
+			return x509.SHA384WithRSA
+		case keySize >= 2048:
+			return x509.SHA256WithRSA
+		default:
+			return x509.SHA1WithRSA
+		}
+	case *ecdsa.PublicKey:
+		switch pub.Curve {
+		case elliptic.P256():
+			return x509.ECDSAWithSHA256
+		case elliptic.P384():
+			return x509.ECDSAWithSHA384
+		case elliptic.P521():
+			return x509.ECDSAWithSHA512
+		default:
+			return x509.ECDSAWithSHA1
+		}
+	default:
+		return x509.UnknownSignatureAlgorithm
+	}
+}
+
+// ParseCertificateRequest takes an incoming certificate request and
+// builds a certificate template from it.
+func ParseCertificateRequest(s Signer, csrBytes []byte) (template *x509.Certificate, err error) {
+	csr, err := x509.ParseCertificateRequest(csrBytes)
+	if err != nil {
+		err = cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err)
+		return
+	}
+
+	err = helpers.CheckSignature(csr, csr.SignatureAlgorithm, csr.RawTBSCertificateRequest, csr.Signature)
+	if err != nil {
+		err = cferr.Wrap(cferr.CSRError, cferr.KeyMismatch, err)
+		return
+	}
+
+	template = &x509.Certificate{
+		Subject:            csr.Subject,
+		PublicKeyAlgorithm: csr.PublicKeyAlgorithm,
+		PublicKey:          csr.PublicKey,
+		SignatureAlgorithm: s.SigAlgo(),
+		DNSNames:           csr.DNSNames,
+		IPAddresses:        csr.IPAddresses,
+		EmailAddresses:     csr.EmailAddresses,
+	}
+
+	return
+}
+
+type subjectPublicKeyInfo struct {
+	Algorithm        pkix.AlgorithmIdentifier
+	SubjectPublicKey asn1.BitString
+}
+
+// ComputeSKI derives an SKI from the certificate's public key in a
+// standard manner. This is done by computing the SHA-1 digest of the
+// SubjectPublicKeyInfo component of the certificate.
+func ComputeSKI(template *x509.Certificate) ([]byte, error) {
+	pub := template.PublicKey
+	encodedPub, err := x509.MarshalPKIXPublicKey(pub)
+	if err != nil {
+		return nil, err
+	}
+
+	var subPKI subjectPublicKeyInfo
+	_, err = asn1.Unmarshal(encodedPub, &subPKI)
+	if err != nil {
+		return nil, err
+	}
+
+	pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes)
+	return pubHash[:], nil
+}
+
+// FillTemplate is a utility function that tries to load as much of
+// the certificate template as possible from the profiles and current
+// template. It fills in the key uses, expiration, revocation URLs
+// and SKI.
+func FillTemplate(template *x509.Certificate, defaultProfile, profile *config.SigningProfile) error {
+	ski, err := ComputeSKI(template)
+
+	var (
+		eku             []x509.ExtKeyUsage
+		ku              x509.KeyUsage
+		backdate        time.Duration
+		expiry          time.Duration
+		notBefore       time.Time
+		notAfter        time.Time
+		crlURL, ocspURL string
+	)
+
+	// The third value returned from Usages is a list of unknown key usages.
+	// This should be used when validating the profile at load, and isn't used
+	// here.
+	ku, eku, _ = profile.Usages()
+	if profile.IssuerURL == nil {
+		profile.IssuerURL = defaultProfile.IssuerURL
+	}
+
+	if ku == 0 && len(eku) == 0 {
+		return cferr.New(cferr.PolicyError, cferr.NoKeyUsages)
+	}
+
+	if expiry = profile.Expiry; expiry == 0 {
+		expiry = defaultProfile.Expiry
+	}
+
+	if crlURL = profile.CRL; crlURL == "" {
+		crlURL = defaultProfile.CRL
+	}
+	if ocspURL = profile.OCSP; ocspURL == "" {
+		ocspURL = defaultProfile.OCSP
+	}
+	if backdate = profile.Backdate; backdate == 0 {
+		backdate = -5 * time.Minute
+	} else {
+		backdate = -1 * profile.Backdate
+	}
+
+	if !profile.NotBefore.IsZero() {
+		notBefore = profile.NotBefore.UTC()
+	} else {
+		notBefore = time.Now().Round(time.Minute).Add(backdate).UTC()
+	}
+
+	if !profile.NotAfter.IsZero() {
+		notAfter = profile.NotAfter.UTC()
+	} else {
+		notAfter = notBefore.Add(expiry).UTC()
+	}
+
+	template.NotBefore = notBefore
+	template.NotAfter = notAfter
+	template.KeyUsage = ku
+	template.ExtKeyUsage = eku
+	template.BasicConstraintsValid = true
+	template.IsCA = profile.CA
+	template.SubjectKeyId = ski
+
+	if ocspURL != "" {
+		template.OCSPServer = []string{ocspURL}
+	}
+	if crlURL != "" {
+		template.CRLDistributionPoints = []string{crlURL}
+	}
+
+	if len(profile.IssuerURL) != 0 {
+		template.IssuingCertificateURL = profile.IssuerURL
+	}
+	if len(profile.Policies) != 0 {
+		err = addPolicies(template, profile.Policies)
+		if err != nil {
+			return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
+		}
+	}
+	if profile.OCSPNoCheck {
+		ocspNoCheckExtension := pkix.Extension{
+			Id:       asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 5},
+			Critical: false,
+			Value:    []byte{0x05, 0x00},
+		}
+		template.ExtraExtensions = append(template.ExtraExtensions, ocspNoCheckExtension)
+	}
+
+	return nil
+}
+
+type policyInformation struct {
+	PolicyIdentifier asn1.ObjectIdentifier
+	Qualifiers       []interface{} `asn1:"tag:optional,omitempty"`
+}
+
+type cpsPolicyQualifier struct {
+	PolicyQualifierID asn1.ObjectIdentifier
+	Qualifier         string `asn1:"tag:optional,ia5"`
+}
+
+type userNotice struct {
+	ExplicitText string `asn1:"tag:optional,utf8"`
+}
+type userNoticePolicyQualifier struct {
+	PolicyQualifierID asn1.ObjectIdentifier
+	Qualifier         userNotice
+}
+
+var (
+	// Per https://tools.ietf.org/html/rfc3280.html#page-106, this represents:
+	// iso(1) identified-organization(3) dod(6) internet(1) security(5)
+	//   mechanisms(5) pkix(7) id-qt(2) id-qt-cps(1)
+	iDQTCertificationPracticeStatement = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1}
+	// iso(1) identified-organization(3) dod(6) internet(1) security(5)
+	//   mechanisms(5) pkix(7) id-qt(2) id-qt-unotice(2)
+	iDQTUserNotice = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2}
+
+	// CTPoisonOID is the object ID of the critical poison extension for precertificates
+	// https://tools.ietf.org/html/rfc6962#page-9
+	CTPoisonOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
+
+	// SCTListOID is the object ID for the Signed Certificate Timestamp certificate extension
+	// https://tools.ietf.org/html/rfc6962#page-14
+	SCTListOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
+)
+
+// addPolicies adds Certificate Policies and optional Policy Qualifiers to a
+// certificate, based on the input config. Go's x509 library allows setting
+// Certificate Policies easily, but does not support nested Policy Qualifiers
+// under those policies. So we need to construct the ASN.1 structure ourselves.
+func addPolicies(template *x509.Certificate, policies []config.CertificatePolicy) error {
+	asn1PolicyList := []policyInformation{}
+
+	for _, policy := range policies {
+		pi := policyInformation{
+			// The PolicyIdentifier is an OID assigned to a given issuer.
+			PolicyIdentifier: asn1.ObjectIdentifier(policy.ID),
+		}
+		for _, qualifier := range policy.Qualifiers {
+			switch qualifier.Type {
+			case "id-qt-unotice":
+				pi.Qualifiers = append(pi.Qualifiers,
+					userNoticePolicyQualifier{
+						PolicyQualifierID: iDQTUserNotice,
+						Qualifier: userNotice{
+							ExplicitText: qualifier.Value,
+						},
+					})
+			case "id-qt-cps":
+				pi.Qualifiers = append(pi.Qualifiers,
+					cpsPolicyQualifier{
+						PolicyQualifierID: iDQTCertificationPracticeStatement,
+						Qualifier:         qualifier.Value,
+					})
+			default:
+				return errors.New("Invalid qualifier type in Policies " + qualifier.Type)
+			}
+		}
+		asn1PolicyList = append(asn1PolicyList, pi)
+	}
+
+	asn1Bytes, err := asn1.Marshal(asn1PolicyList)
+	if err != nil {
+		return err
+	}
+
+	template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{
+		Id:       asn1.ObjectIdentifier{2, 5, 29, 32},
+		Critical: false,
+		Value:    asn1Bytes,
+	})
+	return nil
+}

+ 13 - 0
vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE

@@ -0,0 +1,13 @@
+Copyright (c) 2014 Kyle Isom <kyle@gokyle.org>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above 
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 

+ 43 - 0
vendor/src/github.com/coreos/etcd/pkg/crc/crc.go

@@ -0,0 +1,43 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package crc provides utility function for cyclic redundancy check
+// algorithms.
+package crc
+
+import (
+	"hash"
+	"hash/crc32"
+)
+
+// The size of a CRC-32 checksum in bytes.
+const Size = 4
+
+type digest struct {
+	crc uint32
+	tab *crc32.Table
+}
+
+// New creates a new hash.Hash32 computing the CRC-32 checksum
+// using the polynomial represented by the Table.
+// Modified by xiangli to take a prevcrc.
+func New(prev uint32, tab *crc32.Table) hash.Hash32 { return &digest{prev, tab} }
+
+func (d *digest) Size() int { return Size }
+
+func (d *digest) BlockSize() int { return 1 }
+
+func (d *digest) Reset() { d.crc = 0 }
+
+func (d *digest) Write(p []byte) (n int, err error) {
+	d.crc = crc32.Update(d.crc, d.tab, p)
+	return len(p), nil
+}
+
+func (d *digest) Sum32() uint32 { return d.crc }
+
+func (d *digest) Sum(in []byte) []byte {
+	s := d.Sum32()
+	return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
+}

+ 75 - 0
vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go

@@ -0,0 +1,75 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package fileutil implements utility functions related to files and paths.
+package fileutil
+
+import (
+	"io/ioutil"
+	"os"
+	"path"
+	"sort"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+const (
+	privateFileMode = 0600
+	// owner can make/remove files inside the directory
+	privateDirMode = 0700
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "fileutil")
+)
+
+// IsDirWriteable checks if dir is writable by writing and removing a file
+// to dir. It returns nil if dir is writable.
+func IsDirWriteable(dir string) error {
+	f := path.Join(dir, ".touch")
+	if err := ioutil.WriteFile(f, []byte(""), privateFileMode); err != nil {
+		return err
+	}
+	return os.Remove(f)
+}
+
+// ReadDir returns the filenames in the given directory in sorted order.
+func ReadDir(dirpath string) ([]string, error) {
+	dir, err := os.Open(dirpath)
+	if err != nil {
+		return nil, err
+	}
+	defer dir.Close()
+	names, err := dir.Readdirnames(-1)
+	if err != nil {
+		return nil, err
+	}
+	sort.Strings(names)
+	return names, nil
+}
+
+// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
+// does not exists. TouchDirAll also ensures the given directory is writable.
+func TouchDirAll(dir string) error {
+	err := os.MkdirAll(dir, privateDirMode)
+	if err != nil && err != os.ErrExist {
+		return err
+	}
+	return IsDirWriteable(dir)
+}
+
+func Exist(name string) bool {
+	_, err := os.Stat(name)
+	return err == nil
+}

+ 29 - 0
vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go

@@ -0,0 +1,29 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+type Lock interface {
+	// Name returns the name of the file.
+	Name() string
+	// TryLock acquires exclusivity on the lock without blocking.
+	TryLock() error
+	// Lock acquires exclusivity on the lock.
+	Lock() error
+	// Unlock unlocks the lock.
+	Unlock() error
+	// Destroy should be called after Unlock to clean up
+	// the resources.
+	Destroy() error
+}

+ 79 - 0
vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go

@@ -0,0 +1,79 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"errors"
+	"os"
+	"syscall"
+	"time"
+)
+
+var (
+	ErrLocked = errors.New("file already locked")
+)
+
+type lock struct {
+	fname string
+	file  *os.File
+}
+
+func (l *lock) Name() string {
+	return l.fname
+}
+
+func (l *lock) TryLock() error {
+	err := os.Chmod(l.fname, syscall.DMEXCL|0600)
+	if err != nil {
+		return err
+	}
+
+	f, err := os.Open(l.fname)
+	if err != nil {
+		return ErrLocked
+	}
+
+	l.file = f
+	return nil
+}
+
+func (l *lock) Lock() error {
+	err := os.Chmod(l.fname, syscall.DMEXCL|0600)
+	if err != nil {
+		return err
+	}
+
+	for {
+		f, err := os.Open(l.fname)
+		if err == nil {
+			l.file = f
+			return nil
+		}
+		time.Sleep(10 * time.Millisecond)
+	}
+}
+
+func (l *lock) Unlock() error {
+	return l.file.Close()
+}
+
+func (l *lock) Destroy() error {
+	return nil
+}
+
+func NewLock(file string) (Lock, error) {
+	l := &lock{fname: file}
+	return l, nil
+}

+ 87 - 0
vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go

@@ -0,0 +1,87 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build solaris
+
+package fileutil
+
+import (
+	"errors"
+	"os"
+	"syscall"
+)
+
+var (
+	ErrLocked = errors.New("file already locked")
+)
+
+type lock struct {
+	fd   int
+	file *os.File
+}
+
+func (l *lock) Name() string {
+	return l.file.Name()
+}
+
+func (l *lock) TryLock() error {
+	var lock syscall.Flock_t
+	lock.Start = 0
+	lock.Len = 0
+	lock.Pid = 0
+	lock.Type = syscall.F_WRLCK
+	lock.Whence = 0
+	lock.Pid = 0
+	err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
+	if err != nil && err == syscall.EAGAIN {
+		return ErrLocked
+	}
+	return err
+}
+
+func (l *lock) Lock() error {
+	var lock syscall.Flock_t
+	lock.Start = 0
+	lock.Len = 0
+	lock.Type = syscall.F_WRLCK
+	lock.Whence = 0
+	lock.Pid = 0
+	return syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
+}
+
+func (l *lock) Unlock() error {
+	var lock syscall.Flock_t
+	lock.Start = 0
+	lock.Len = 0
+	lock.Type = syscall.F_UNLCK
+	lock.Whence = 0
+	err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
+	if err != nil && err == syscall.EAGAIN {
+		return ErrLocked
+	}
+	return err
+}
+
+func (l *lock) Destroy() error {
+	return l.file.Close()
+}
+
+func NewLock(file string) (Lock, error) {
+	f, err := os.OpenFile(file, os.O_WRONLY, 0600)
+	if err != nil {
+		return nil, err
+	}
+	l := &lock{int(f.Fd()), f}
+	return l, nil
+}

+ 65 - 0
vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go

@@ -0,0 +1,65 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!plan9,!solaris
+
+package fileutil
+
+import (
+	"errors"
+	"os"
+	"syscall"
+)
+
+var (
+	ErrLocked = errors.New("file already locked")
+)
+
+type lock struct {
+	fd   int
+	file *os.File
+}
+
+func (l *lock) Name() string {
+	return l.file.Name()
+}
+
+func (l *lock) TryLock() error {
+	err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB)
+	if err != nil && err == syscall.EWOULDBLOCK {
+		return ErrLocked
+	}
+	return err
+}
+
+func (l *lock) Lock() error {
+	return syscall.Flock(l.fd, syscall.LOCK_EX)
+}
+
+func (l *lock) Unlock() error {
+	return syscall.Flock(l.fd, syscall.LOCK_UN)
+}
+
+func (l *lock) Destroy() error {
+	return l.file.Close()
+}
+
+func NewLock(file string) (Lock, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return nil, err
+	}
+	l := &lock{int(f.Fd()), f}
+	return l, nil
+}

+ 60 - 0
vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go

@@ -0,0 +1,60 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package fileutil
+
+import (
+	"errors"
+	"os"
+)
+
+var (
+	ErrLocked = errors.New("file already locked")
+)
+
+type lock struct {
+	fd   int
+	file *os.File
+}
+
+func (l *lock) Name() string {
+	return l.file.Name()
+}
+
+func (l *lock) TryLock() error {
+	return nil
+}
+
+func (l *lock) Lock() error {
+	return nil
+}
+
+func (l *lock) Unlock() error {
+	return nil
+}
+
+func (l *lock) Destroy() error {
+	return l.file.Close()
+}
+
+func NewLock(file string) (Lock, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return nil, err
+	}
+	l := &lock{int(f.Fd()), f}
+	return l, nil
+}

+ 28 - 0
vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go

@@ -0,0 +1,28 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux
+
+package fileutil
+
+import "os"
+
+// Preallocate tries to allocate the space for given
+// file. This operation is only supported on linux by a
+// few filesystems (btrfs, ext4, etc.).
+// If the operation is unsupported, no error will be returned.
+// Otherwise, the error encountered will be returned.
+func Preallocate(f *os.File, sizeInBytes int) error {
+	return nil
+}

+ 42 - 0
vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go

@@ -0,0 +1,42 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+)
+
+// Preallocate tries to allocate the space for given
+// file. This operation is only supported on linux by a
+// few filesystems (btrfs, ext4, etc.).
+// If the operation is unsupported, no error will be returned.
+// Otherwise, the error encountered will be returned.
+func Preallocate(f *os.File, sizeInBytes int) error {
+	// use mode = 1 to keep size
+	// see FALLOC_FL_KEEP_SIZE
+	err := syscall.Fallocate(int(f.Fd()), 1, 0, int64(sizeInBytes))
+	if err != nil {
+		errno, ok := err.(syscall.Errno)
+		// treat not support as nil error
+		if ok && errno == syscall.ENOTSUP {
+			return nil
+		}
+		return err
+	}
+	return nil
+}

+ 80 - 0
vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go

@@ -0,0 +1,80 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"os"
+	"path"
+	"sort"
+	"strings"
+	"time"
+)
+
+func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
+	errC := make(chan error, 1)
+	go func() {
+		for {
+			fnames, err := ReadDir(dirname)
+			if err != nil {
+				errC <- err
+				return
+			}
+			newfnames := make([]string, 0)
+			for _, fname := range fnames {
+				if strings.HasSuffix(fname, suffix) {
+					newfnames = append(newfnames, fname)
+				}
+			}
+			sort.Strings(newfnames)
+			for len(newfnames) > int(max) {
+				f := path.Join(dirname, newfnames[0])
+				l, err := NewLock(f)
+				if err != nil {
+					errC <- err
+					return
+				}
+				err = l.TryLock()
+				if err != nil {
+					break
+				}
+				err = os.Remove(f)
+				if err != nil {
+					errC <- err
+					return
+				}
+				err = l.Unlock()
+				if err != nil {
+					plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
+					errC <- err
+					return
+				}
+				err = l.Destroy()
+				if err != nil {
+					plog.Errorf("error destroying lock %s when purging file (%v)", l.Name(), err)
+					errC <- err
+					return
+				}
+				plog.Infof("purged file %s successfully", f)
+				newfnames = newfnames[1:]
+			}
+			select {
+			case <-time.After(interval):
+			case <-stop:
+				return
+			}
+		}
+	}()
+	return errC
+}

+ 26 - 0
vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go

@@ -0,0 +1,26 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux
+
+package fileutil
+
+import "os"
+
+// Fdatasync is similar to fsync(), but does not flush modified metadata
+// unless that metadata is needed in order to allow a subsequent data retrieval
+// to be correctly handled.
+func Fdatasync(f *os.File) error {
+	return f.Sync()
+}

+ 29 - 0
vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go

@@ -0,0 +1,29 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+)
+
+// Fdatasync is similar to fsync(), but does not flush modified metadata
+// unless that metadata is needed in order to allow a subsequent data retrieval
+// to be correctly handled.
+func Fdatasync(f *os.File) error {
+	return syscall.Fdatasync(int(f.Fd()))
+}

+ 78 - 0
vendor/src/github.com/coreos/etcd/pkg/idutil/id.go

@@ -0,0 +1,78 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package idutil implements utility functions for generating unique,
+// randomized ids.
+package idutil
+
+import (
+	"math"
+	"sync"
+	"time"
+)
+
+const (
+	tsLen     = 5 * 8
+	cntLen    = 8
+	suffixLen = tsLen + cntLen
+)
+
+// Generator generates unique identifiers based on counters, timestamps, and
+// a node member ID.
+//
+// The initial id is in this format:
+// High order byte is memberID, next 5 bytes are from timestamp,
+// and low order 2 bytes are 0s.
+// | prefix   | suffix              |
+// | 2 bytes  | 5 bytes   | 1 byte  |
+// | memberID | timestamp | cnt     |
+//
+// The timestamp 5 bytes is different when the machine is restart
+// after 1 ms and before 35 years.
+//
+// It increases suffix to generate the next id.
+// The count field may overflow to timestamp field, which is intentional.
+// It helps to extend the event window to 2^56. This doesn't break that
+// id generated after restart is unique because etcd throughput is <<
+// 256req/ms(250k reqs/second).
+type Generator struct {
+	mu sync.Mutex
+	// high order 2 bytes
+	prefix uint64
+	// low order 6 bytes
+	suffix uint64
+}
+
+func NewGenerator(memberID uint16, now time.Time) *Generator {
+	prefix := uint64(memberID) << suffixLen
+	unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond)
+	suffix := lowbit(unixMilli, tsLen) << cntLen
+	return &Generator{
+		prefix: prefix,
+		suffix: suffix,
+	}
+}
+
+// Next generates a id that is unique.
+func (g *Generator) Next() uint64 {
+	g.mu.Lock()
+	defer g.mu.Unlock()
+	g.suffix++
+	id := g.prefix | lowbit(g.suffix, suffixLen)
+	return id
+}
+
+func lowbit(x uint64, n uint) uint64 {
+	return x & (math.MaxUint64 >> (64 - n))
+}

+ 60 - 0
vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go

@@ -0,0 +1,60 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil defines interfaces for handling Protocol Buffer objects.
+package pbutil
+
+import "github.com/coreos/pkg/capnslog"
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "flags")
+)
+
+type Marshaler interface {
+	Marshal() (data []byte, err error)
+}
+
+type Unmarshaler interface {
+	Unmarshal(data []byte) error
+}
+
+func MustMarshal(m Marshaler) []byte {
+	d, err := m.Marshal()
+	if err != nil {
+		plog.Panicf("marshal should never fail (%v)", err)
+	}
+	return d
+}
+
+func MustUnmarshal(um Unmarshaler, data []byte) {
+	if err := um.Unmarshal(data); err != nil {
+		plog.Panicf("unmarshal should never fail (%v)", err)
+	}
+}
+
+func MaybeUnmarshal(um Unmarshaler, data []byte) bool {
+	if err := um.Unmarshal(data); err != nil {
+		return false
+	}
+	return true
+}
+
+func GetBool(v *bool) (vv bool, set bool) {
+	if v == nil {
+		return false, false
+	}
+	return *v, true
+}
+
+func Boolp(b bool) *bool { return &b }

+ 57 - 0
vendor/src/github.com/coreos/etcd/raft/design.md

@@ -0,0 +1,57 @@
+## Progress
+
+Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress. 
+
+`replication message` is a `msgApp` with log entries.
+
+A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`.
+
+A progress is in one of the three state: `probe`, `replicate`, `snapshot`. 
+
+```
+                            +--------------------------------------------------------+          
+                            |                  send snapshot                         |          
+                            |                                                        |          
+                  +---------+----------+                                  +----------v---------+
+              +--->       probe        |                                  |      snapshot      |
+              |   |  max inflight = 1  <----------------------------------+  max inflight = 0  |
+              |   +---------+----------+                                  +--------------------+
+              |             |            1. snapshot success                                    
+              |             |               (next=snapshot.index + 1)                           
+              |             |            2. snapshot failure                                    
+              |             |               (no change)                                         
+              |             |            3. receives msgAppResp(rej=false&&index>lastsnap.index)
+              |             |               (match=m.index,next=match+1)                        
+receives msgAppResp(rej=true)                                                                   
+(next=match+1)|             |                                                                   
+              |             |                                                                   
+              |             |                                                                   
+              |             |   receives msgAppResp(rej=false&&index>match)                     
+              |             |   (match=m.index,next=match+1)                                    
+              |             |                                                                   
+              |             |                                                                   
+              |             |                                                                   
+              |   +---------v----------+                                                        
+              |   |     replicate      |                                                        
+              +---+  max inflight = n  |                                                        
+                  +--------------------+                                                        
+```
+
+When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`.
+
+When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower.
+
+When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`.
+
+A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress.
+
+A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low.  see open question)
+
+A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied.
+
+### Flow Control
+
+1. limit the max size of message sent per message. Max should be configurable.
+Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next`
+
+2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly. 

+ 293 - 0
vendor/src/github.com/coreos/etcd/raft/doc.go

@@ -0,0 +1,293 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package raft sends and receives messages in the Protocol Buffer format
+defined in the raftpb package.
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+A simple example application, _raftexample_, is also available to help illustrate
+how to use this package in practice:
+https://github.com/coreos/etcd/tree/master/contrib/raftexample
+
+Usage
+
+The primary object in raft is a Node. You either start a Node from scratch
+using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a node from scratch:
+
+  storage := raft.NewMemoryStorage()
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+  n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
+
+To restart a node from previous state:
+
+  storage := raft.NewMemoryStorage()
+
+  // recover the in-memory storage from persistent
+  // snapshot, state and entries.
+  storage.ApplySnapshot(snapshot)
+  storage.SetHardState(state)
+  storage.Append(entries)
+
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+
+  // restart raft without peer information.
+  // peer information is already included in the storage.
+  n := raft.RestartNode(c)
+
+Now that you are holding onto a Node you have a few responsibilities:
+
+First, you must read from the Node.Ready() channel and process the updates
+it contains. These steps may be performed in parallel, except as noted in step
+2.
+
+1. Write HardState, Entries, and Snapshot to persistent storage if they are
+not empty. Note that when writing an Entry with Index i, any
+previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that
+no messages be sent until after the latest HardState has been persisted to disk,
+and all Entries written by any previous Ready batch (Messages may be sent while
+entries from the same batch are being persisted). To reduce the I/O latency, an
+optimization can be applied to make leader write to disk in parallel with its
+followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
+MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
+large).
+
+Note: Marshalling messages is not thread-safe; it is important that you
+make sure that no new entries are persisted while marshalling.
+The easiest way to achieve this is to serialise the messages directly inside
+your main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine.
+If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
+to apply it to the node. The configuration change may be cancelled at this point
+by setting the NodeID field to zero before calling ApplyConfChange
+(but ApplyConfChange must be called one way or the other, and the decision to cancel
+must be based solely on the state machine and not external information such as
+the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates.
+This may be done at any time after step 1, although all updates must be processed
+in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an
+implementation of the Storage interface. The provided MemoryStorage
+type can be used for this (if you repopulate its state upon a
+restart), or you can supply your own disk-backed implementation.
+
+Third, when you receive a message from another node, pass it to Node.Step:
+
+	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+		n.Step(ctx, m)
+	}
+
+Finally, you need to call Node.Tick() at regular intervals (probably
+via a time.Ticker). Raft has two important timeouts: heartbeat and the
+election timeout. However, internally to the raft package time is
+represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+  for {
+    select {
+    case <-s.Ticker:
+      n.Tick()
+    case rd := <-s.Node.Ready():
+      saveToStorage(rd.State, rd.Entries, rd.Snapshot)
+      send(rd.Messages)
+      if !raft.IsEmptySnap(rd.Snapshot) {
+        processSnapshot(rd.Snapshot)
+      }
+      for _, entry := range rd.CommittedEntries {
+        process(entry)
+        if entry.Type == raftpb.EntryConfChange {
+          var cc raftpb.ConfChange
+          cc.Unmarshal(entry.Data)
+          s.Node.ApplyConfChange(cc)
+        }
+      s.Node.Advance()
+    case <-s.done:
+      return
+    }
+  }
+
+To propose changes to the state machine from your node take your application
+data, serialize it into a byte slice and call:
+
+	n.Propose(ctx, data)
+
+If the proposal is committed, data will appear in committed entries with type
+raftpb.EntryNormal. There is no guarantee that a proposed command will be
+committed; you may have to re-propose after a timeout.
+
+To add or remove node in a cluster, build ConfChange struct 'cc' and call:
+
+	n.ProposeConfChange(ctx, cc)
+
+After config change is committed, some committed entry with type
+raftpb.EntryConfChange will be returned. You must apply it to node through:
+
+	var cc raftpb.ConfChange
+	cc.Unmarshal(data)
+	n.ApplyConfChange(cc)
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+Implementation notes
+
+This implementation is up to date with the final Raft thesis
+(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our
+implementation of the membership change protocol differs somewhat from
+that described in chapter 4. The key invariant that membership changes
+happen one node at a time is preserved, but in our implementation the
+membership change takes effect when its entry is applied, not when it
+is added to the log (so the entry is committed under the old
+membership instead of the new). This is equivalent in terms of safety,
+since the old and new configurations are guaranteed to overlap.
+
+To ensure that we do not attempt to commit two membership changes at
+once by matching log positions (which would be unsafe since they
+should have different quorum requirements), we simply disallow any
+proposed membership change while any uncommitted change appears in
+the leader's log.
+
+This approach introduces a problem when you try to remove a member
+from a two-member cluster: If one of the members dies before the
+other one receives the commit of the confchange entry, then the member
+cannot be removed any more since the cluster cannot make progress.
+For this reason it is highly recommended to use three or more nodes in
+every cluster.
+
+MessageType
+
+Package raft sends and receives message in Protocol Buffer format (defined
+in raftpb package). Each state (follower, candidate, leader) implements its
+own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
+advancing with the given raftpb.Message. Each step is determined by its
+raftpb.MessageType. Note that every step is checked by one common method
+'Step' that safety-checks the terms of node and incoming message to prevent
+stale log entries:
+
+	'MsgHup' is used for election. If a node is a follower or candidate, the
+	'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
+	candidate has not received any heartbeat before the election timeout, it
+	passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
+	start a new election.
+
+	'MsgBeat' is an internal type that signals leaders to send a heartbeat of
+	the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
+	the 'raft' struct is set as 'tickHeartbeat', and sends periodic heartbeat
+	messages of the 'MsgBeat' type to its followers.
+
+	'MsgProp' proposes to append data to its log entries. This is a special
+	type to redirect proposals to leader. Therefore, send method overwrites
+	raftpb.Message's term with its HardState's term to avoid attaching its
+	local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
+	method, the leader first calls the 'appendEntry' method to append entries
+	to its log, and then calls 'bcastAppend' method to send those entries to
+	its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
+	follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
+	method. It is stored with sender's ID and later forwarded to leader by
+	rafthttp package.
+
+	'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
+	which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
+	type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
+	back to follower, because it indicates that there is a valid leader sending
+	'MsgApp' messages. Candidate and follower respond to this message in
+	'MsgAppResp' type.
+
+	'MsgAppResp' is response to log replication request('MsgApp'). When
+	'MsgApp' is passed to candidate or follower's Step method, it responds by
+	calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
+	mailbox.
+
+	'MsgVote' requests votes for election. When a node is a follower or
+	candidate and 'MsgHup' is passed to its Step method, then the node calls
+	'campaign' method to campaign itself to become a leader. Once 'campaign'
+	method is called, the node becomes candidate and sends 'MsgVote' to peers
+	in cluster to request votes. When passed to leader or candidate's Step
+	method and the message's Term is lower than leader's or candidate's,
+	'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
+	If leader or candidate receives 'MsgVote' with higher term, it will revert
+	back to follower. When 'MsgVote' is passed to follower, it votes for the
+	sender only when sender's last term is greater than MsgVote's term or
+	sender's last term is equal to MsgVote's term but sender's last committed
+	index is greater than or equal to follower's.
+
+	'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
+	passed to candidate, the candidate calculates how many votes it has won. If
+	it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
+	If candidate receives majority of votes of denials, it reverts back to
+	follower.
+
+	'MsgSnap' requests to install a snapshot message. When a node has just
+	become a leader or the leader receives 'MsgProp' message, it calls
+	'bcastAppend' method, which then calls 'sendAppend' method to each
+	follower. In 'sendAppend', if a leader fails to get term or entries,
+	the leader requests snapshot by sending 'MsgSnap' type message.
+
+	'MsgSnapStatus' tells the result of snapshot install message. When a
+	follower rejected 'MsgSnap', it indicates the snapshot request with
+	'MsgSnap' had failed from network issues which causes the network layer
+	to fail to send out snapshots to its followers. Then leader considers
+	follower's progress as probe. When 'MsgSnap' were not rejected, it
+	indicates that the snapshot succeeded and the leader sets follower's
+	progress to probe and resumes its log replication.
+
+	'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
+	to candidate and message's term is higher than candidate's, the candidate
+	reverts back to follower and updates its committed index from the one in
+	this heartbeat. And it sends the message to its mailbox. When
+	'MsgHeartbeat' is passed to follower's Step method and message's term is
+	higher than follower's, the follower updates its leaderID with the ID
+	from the message.
+
+	'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
+	is passed to leader's Step method, the leader knows which follower
+	responded. And only when the leader's last committed index is greater than
+	follower's Match index, the leader runs 'sendAppend` method.
+
+	'MsgUnreachable' tells that request(message) wasn't delivered. When
+	'MsgUnreachable' is passed to leader's Step method, the leader discovers
+	that the follower that sent this 'MsgUnreachable' is not reachable, often
+	indicating 'MsgApp' is lost. When follower's progress state is replicate,
+	the leader sets it back to probe.
+
+*/
+package raft

+ 361 - 0
vendor/src/github.com/coreos/etcd/raft/log.go

@@ -0,0 +1,361 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+	"log"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type raftLog struct {
+	// storage contains all stable entries since the last snapshot.
+	storage Storage
+
+	// unstable contains all unstable entries and snapshot.
+	// they will be saved into storage.
+	unstable unstable
+
+	// committed is the highest log position that is known to be in
+	// stable storage on a quorum of nodes.
+	committed uint64
+	// applied is the highest log position that the application has
+	// been instructed to apply to its state machine.
+	// Invariant: applied <= committed
+	applied uint64
+
+	logger Logger
+}
+
+// newLog returns log using the given storage. It recovers the log to the state
+// that it just commits and applies the latest snapshot.
+func newLog(storage Storage, logger Logger) *raftLog {
+	if storage == nil {
+		log.Panic("storage must not be nil")
+	}
+	log := &raftLog{
+		storage: storage,
+		logger:  logger,
+	}
+	firstIndex, err := storage.FirstIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	lastIndex, err := storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	log.unstable.offset = lastIndex + 1
+	log.unstable.logger = logger
+	// Initialize our committed and applied pointers to the time of the last compaction.
+	log.committed = firstIndex - 1
+	log.applied = firstIndex - 1
+
+	return log
+}
+
+func (l *raftLog) String() string {
+	return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries))
+}
+
+// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
+// it returns (last index of new entries, true).
+func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) {
+	lastnewi = index + uint64(len(ents))
+	if l.matchTerm(index, logTerm) {
+		ci := l.findConflict(ents)
+		switch {
+		case ci == 0:
+		case ci <= l.committed:
+			l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
+		default:
+			offset := index + 1
+			l.append(ents[ci-offset:]...)
+		}
+		l.commitTo(min(committed, lastnewi))
+		return lastnewi, true
+	}
+	return 0, false
+}
+
+func (l *raftLog) append(ents ...pb.Entry) uint64 {
+	if len(ents) == 0 {
+		return l.lastIndex()
+	}
+	if after := ents[0].Index - 1; after < l.committed {
+		l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
+	}
+	l.unstable.truncateAndAppend(ents)
+	return l.lastIndex()
+}
+
+// findConflict finds the index of the conflict.
+// It returns the first pair of conflicting entries between the existing
+// entries and the given entries, if there are any.
+// If there is no conflicting entries, and the existing entries contains
+// all the given entries, zero will be returned.
+// If there is no conflicting entries, but the given entries contains new
+// entries, the index of the first new entry will be returned.
+// An entry is considered to be conflicting if it has the same index but
+// a different term.
+// The first entry MUST have an index equal to the argument 'from'.
+// The index of the given entries MUST be continuously increasing.
+func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
+	for _, ne := range ents {
+		if !l.matchTerm(ne.Index, ne.Term) {
+			if ne.Index <= l.lastIndex() {
+				l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
+					ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term)
+			}
+			return ne.Index
+		}
+	}
+	return 0
+}
+
+func (l *raftLog) unstableEntries() []pb.Entry {
+	if len(l.unstable.entries) == 0 {
+		return nil
+	}
+	return l.unstable.entries
+}
+
+// nextEnts returns all the available entries for execution.
+// If applied is smaller than the index of snapshot, it returns all committed
+// entries after the index of snapshot.
+func (l *raftLog) nextEnts() (ents []pb.Entry) {
+	off := max(l.applied+1, l.firstIndex())
+	if l.committed+1 > off {
+		ents, err := l.slice(off, l.committed+1, noLimit)
+		if err != nil {
+			l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
+		}
+		return ents
+	}
+	return nil
+}
+
+// hasNextEnts returns if there is any available entries for execution. This
+// is a fast check without heavy raftLog.slice() in raftLog.nextEnts().
+func (l *raftLog) hasNextEnts() bool {
+	off := max(l.applied+1, l.firstIndex())
+	if l.committed+1 > off {
+		return true
+	}
+	return false
+}
+
+func (l *raftLog) snapshot() (pb.Snapshot, error) {
+	if l.unstable.snapshot != nil {
+		return *l.unstable.snapshot, nil
+	}
+	return l.storage.Snapshot()
+}
+
+func (l *raftLog) firstIndex() uint64 {
+	if i, ok := l.unstable.maybeFirstIndex(); ok {
+		return i
+	}
+	index, err := l.storage.FirstIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	return index
+}
+
+func (l *raftLog) lastIndex() uint64 {
+	if i, ok := l.unstable.maybeLastIndex(); ok {
+		return i
+	}
+	i, err := l.storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	return i
+}
+
+func (l *raftLog) commitTo(tocommit uint64) {
+	// never decrease commit
+	if l.committed < tocommit {
+		if l.lastIndex() < tocommit {
+			l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
+		}
+		l.committed = tocommit
+	}
+}
+
+func (l *raftLog) appliedTo(i uint64) {
+	if i == 0 {
+		return
+	}
+	if l.committed < i || i < l.applied {
+		l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
+	}
+	l.applied = i
+}
+
+func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) }
+
+func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
+
+func (l *raftLog) lastTerm() uint64 {
+	t, err := l.term(l.lastIndex())
+	if err != nil {
+		l.logger.Panicf("unexpected error when getting the last term (%v)", err)
+	}
+	return t
+}
+
+func (l *raftLog) term(i uint64) (uint64, error) {
+	// the valid term range is [index of dummy entry, last index]
+	dummyIndex := l.firstIndex() - 1
+	if i < dummyIndex || i > l.lastIndex() {
+		// TODO: return an error instead?
+		return 0, nil
+	}
+
+	if t, ok := l.unstable.maybeTerm(i); ok {
+		return t, nil
+	}
+
+	t, err := l.storage.Term(i)
+	if err == nil {
+		return t, nil
+	}
+	if err == ErrCompacted {
+		return 0, err
+	}
+	panic(err) // TODO(bdarnell)
+}
+
+func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) {
+	if i > l.lastIndex() {
+		return nil, nil
+	}
+	return l.slice(i, l.lastIndex()+1, maxsize)
+}
+
+// allEntries returns all entries in the log.
+func (l *raftLog) allEntries() []pb.Entry {
+	ents, err := l.entries(l.firstIndex(), noLimit)
+	if err == nil {
+		return ents
+	}
+	if err == ErrCompacted { // try again if there was a racing compaction
+		return l.allEntries()
+	}
+	// TODO (xiangli): handle error?
+	panic(err)
+}
+
+// isUpToDate determines if the given (lastIndex,term) log is more up-to-date
+// by comparing the index and term of the last entries in the existing logs.
+// If the logs have last entries with different terms, then the log with the
+// later term is more up-to-date. If the logs end with the same term, then
+// whichever log has the larger lastIndex is more up-to-date. If the logs are
+// the same, the given log is up-to-date.
+func (l *raftLog) isUpToDate(lasti, term uint64) bool {
+	return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex())
+}
+
+func (l *raftLog) matchTerm(i, term uint64) bool {
+	t, err := l.term(i)
+	if err != nil {
+		return false
+	}
+	return t == term
+}
+
+func (l *raftLog) maybeCommit(maxIndex, term uint64) bool {
+	if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term {
+		l.commitTo(maxIndex)
+		return true
+	}
+	return false
+}
+
+func (l *raftLog) restore(s pb.Snapshot) {
+	l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
+	l.committed = s.Metadata.Index
+	l.unstable.restore(s)
+}
+
+// slice returns a slice of log entries from lo through hi-1, inclusive.
+func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+	err := l.mustCheckOutOfBounds(lo, hi)
+	if err != nil {
+		return nil, err
+	}
+	if lo == hi {
+		return nil, nil
+	}
+	var ents []pb.Entry
+	if lo < l.unstable.offset {
+		storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize)
+		if err == ErrCompacted {
+			return nil, err
+		} else if err == ErrUnavailable {
+			l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset))
+		} else if err != nil {
+			panic(err) // TODO(bdarnell)
+		}
+
+		// check if ents has reached the size limitation
+		if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo {
+			return storedEnts, nil
+		}
+
+		ents = storedEnts
+	}
+	if hi > l.unstable.offset {
+		unstable := l.unstable.slice(max(lo, l.unstable.offset), hi)
+		if len(ents) > 0 {
+			ents = append([]pb.Entry{}, ents...)
+			ents = append(ents, unstable...)
+		} else {
+			ents = unstable
+		}
+	}
+	return limitSize(ents, maxSize), nil
+}
+
+// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
+func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
+	if lo > hi {
+		l.logger.Panicf("invalid slice %d > %d", lo, hi)
+	}
+	fi := l.firstIndex()
+	if lo < fi {
+		return ErrCompacted
+	}
+
+	length := l.lastIndex() - fi + 1
+	if lo < fi || hi > fi+length {
+		l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
+	}
+	return nil
+}
+
+func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 {
+	if err == nil {
+		return t
+	}
+	if err == ErrCompacted {
+		return 0
+	}
+	l.logger.Panicf("unexpected error (%v)", err)
+	return 0
+}

+ 139 - 0
vendor/src/github.com/coreos/etcd/raft/log_unstable.go

@@ -0,0 +1,139 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "github.com/coreos/etcd/raft/raftpb"
+
+// unstable.entries[i] has raft log position i+unstable.offset.
+// Note that unstable.offset may be less than the highest log
+// position in storage; this means that the next write to storage
+// might need to truncate the log before persisting unstable.entries.
+type unstable struct {
+	// the incoming unstable snapshot, if any.
+	snapshot *pb.Snapshot
+	// all entries that have not yet been written to storage.
+	entries []pb.Entry
+	offset  uint64
+
+	logger Logger
+}
+
+// maybeFirstIndex returns the index of the first possible entry in entries
+// if it has a snapshot.
+func (u *unstable) maybeFirstIndex() (uint64, bool) {
+	if u.snapshot != nil {
+		return u.snapshot.Metadata.Index + 1, true
+	}
+	return 0, false
+}
+
+// maybeLastIndex returns the last index if it has at least one
+// unstable entry or snapshot.
+func (u *unstable) maybeLastIndex() (uint64, bool) {
+	if l := len(u.entries); l != 0 {
+		return u.offset + uint64(l) - 1, true
+	}
+	if u.snapshot != nil {
+		return u.snapshot.Metadata.Index, true
+	}
+	return 0, false
+}
+
+// maybeTerm returns the term of the entry at index i, if there
+// is any.
+func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
+	if i < u.offset {
+		if u.snapshot == nil {
+			return 0, false
+		}
+		if u.snapshot.Metadata.Index == i {
+			return u.snapshot.Metadata.Term, true
+		}
+		return 0, false
+	}
+
+	last, ok := u.maybeLastIndex()
+	if !ok {
+		return 0, false
+	}
+	if i > last {
+		return 0, false
+	}
+	return u.entries[i-u.offset].Term, true
+}
+
+func (u *unstable) stableTo(i, t uint64) {
+	gt, ok := u.maybeTerm(i)
+	if !ok {
+		return
+	}
+	// if i < offset, term is matched with the snapshot
+	// only update the unstable entries if term is matched with
+	// an unstable entry.
+	if gt == t && i >= u.offset {
+		u.entries = u.entries[i+1-u.offset:]
+		u.offset = i + 1
+	}
+}
+
+func (u *unstable) stableSnapTo(i uint64) {
+	if u.snapshot != nil && u.snapshot.Metadata.Index == i {
+		u.snapshot = nil
+	}
+}
+
+func (u *unstable) restore(s pb.Snapshot) {
+	u.offset = s.Metadata.Index + 1
+	u.entries = nil
+	u.snapshot = &s
+}
+
+func (u *unstable) truncateAndAppend(ents []pb.Entry) {
+	after := ents[0].Index - 1
+	switch {
+	case after == u.offset+uint64(len(u.entries))-1:
+		// after is the last index in the u.entries
+		// directly append
+		u.entries = append(u.entries, ents...)
+	case after < u.offset:
+		u.logger.Infof("replace the unstable entries from index %d", after+1)
+		// The log is being truncated to before our current offset
+		// portion, so set the offset and replace the entries
+		u.offset = after + 1
+		u.entries = ents
+	default:
+		// truncate to after and copy to u.entries
+		// then append
+		u.logger.Infof("truncate the unstable entries to index %d", after)
+		u.entries = append([]pb.Entry{}, u.slice(u.offset, after+1)...)
+		u.entries = append(u.entries, ents...)
+	}
+}
+
+func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
+	u.mustCheckOutOfBounds(lo, hi)
+	return u.entries[lo-u.offset : hi-u.offset]
+}
+
+// u.offset <= lo <= hi <= u.offset+len(u.offset)
+func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
+	if lo > hi {
+		u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
+	}
+	upper := u.offset + uint64(len(u.entries))
+	if lo < u.offset || hi > upper {
+		u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
+	}
+}

+ 126 - 0
vendor/src/github.com/coreos/etcd/raft/logger.go

@@ -0,0 +1,126 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+)
+
+type Logger interface {
+	Debug(v ...interface{})
+	Debugf(format string, v ...interface{})
+
+	Error(v ...interface{})
+	Errorf(format string, v ...interface{})
+
+	Info(v ...interface{})
+	Infof(format string, v ...interface{})
+
+	Warning(v ...interface{})
+	Warningf(format string, v ...interface{})
+
+	Fatal(v ...interface{})
+	Fatalf(format string, v ...interface{})
+
+	Panic(v ...interface{})
+	Panicf(format string, v ...interface{})
+}
+
+func SetLogger(l Logger) { raftLogger = l }
+
+var (
+	defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
+	discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
+	raftLogger    = Logger(defaultLogger)
+)
+
+const (
+	calldepth = 2
+)
+
+// DefaultLogger is a default implementation of the Logger interface.
+type DefaultLogger struct {
+	*log.Logger
+	debug bool
+}
+
+func (l *DefaultLogger) EnableTimestamps() {
+	l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
+}
+
+func (l *DefaultLogger) EnableDebug() {
+	l.debug = true
+}
+
+func (l *DefaultLogger) Debug(v ...interface{}) {
+	if l.debug {
+		l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
+	}
+}
+
+func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
+	if l.debug {
+		l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
+	}
+}
+
+func (l *DefaultLogger) Info(v ...interface{}) {
+	l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Infof(format string, v ...interface{}) {
+	l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Error(v ...interface{}) {
+	l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
+	l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Warning(v ...interface{}) {
+	l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
+	l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Fatal(v ...interface{}) {
+	l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
+	os.Exit(1)
+}
+
+func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
+	l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
+	os.Exit(1)
+}
+
+func (l *DefaultLogger) Panic(v ...interface{}) {
+	l.Logger.Panic(v)
+}
+
+func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
+	l.Logger.Panicf(format, v...)
+}
+
+func header(lvl, msg string) string {
+	return fmt.Sprintf("%s: %s", lvl, msg)
+}

+ 488 - 0
vendor/src/github.com/coreos/etcd/raft/node.go

@@ -0,0 +1,488 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+
+	"golang.org/x/net/context"
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type SnapshotStatus int
+
+const (
+	SnapshotFinish  SnapshotStatus = 1
+	SnapshotFailure SnapshotStatus = 2
+)
+
+var (
+	emptyState = pb.HardState{}
+
+	// ErrStopped is returned by methods on Nodes that have been stopped.
+	ErrStopped = errors.New("raft: stopped")
+)
+
+// SoftState provides state that is useful for logging and debugging.
+// The state is volatile and does not need to be persisted to the WAL.
+type SoftState struct {
+	Lead      uint64
+	RaftState StateType
+}
+
+func (a *SoftState) equal(b *SoftState) bool {
+	return a.Lead == b.Lead && a.RaftState == b.RaftState
+}
+
+// Ready encapsulates the entries and messages that are ready to read,
+// be saved to stable storage, committed or sent to other peers.
+// All fields in Ready are read-only.
+type Ready struct {
+	// The current volatile state of a Node.
+	// SoftState will be nil if there is no update.
+	// It is not required to consume or store SoftState.
+	*SoftState
+
+	// The current state of a Node to be saved to stable storage BEFORE
+	// Messages are sent.
+	// HardState will be equal to empty state if there is no update.
+	pb.HardState
+
+	// Entries specifies entries to be saved to stable storage BEFORE
+	// Messages are sent.
+	Entries []pb.Entry
+
+	// Snapshot specifies the snapshot to be saved to stable storage.
+	Snapshot pb.Snapshot
+
+	// CommittedEntries specifies entries to be committed to a
+	// store/state-machine. These have previously been committed to stable
+	// store.
+	CommittedEntries []pb.Entry
+
+	// Messages specifies outbound messages to be sent AFTER Entries are
+	// committed to stable storage.
+	// If it contains a MsgSnap message, the application MUST report back to raft
+	// when the snapshot has been received or has failed by calling ReportSnapshot.
+	Messages []pb.Message
+}
+
+func isHardStateEqual(a, b pb.HardState) bool {
+	return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
+}
+
+// IsEmptyHardState returns true if the given HardState is empty.
+func IsEmptyHardState(st pb.HardState) bool {
+	return isHardStateEqual(st, emptyState)
+}
+
+// IsEmptySnap returns true if the given Snapshot is empty.
+func IsEmptySnap(sp pb.Snapshot) bool {
+	return sp.Metadata.Index == 0
+}
+
+func (rd Ready) containsUpdates() bool {
+	return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
+		!IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
+		len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0
+}
+
+// Node represents a node in a raft cluster.
+type Node interface {
+	// Tick increments the internal logical clock for the Node by a single tick. Election
+	// timeouts and heartbeat timeouts are in units of ticks.
+	Tick()
+	// Campaign causes the Node to transition to candidate state and start campaigning to become leader.
+	Campaign(ctx context.Context) error
+	// Propose proposes that data be appended to the log.
+	Propose(ctx context.Context, data []byte) error
+	// ProposeConfChange proposes config change.
+	// At most one ConfChange can be in the process of going through consensus.
+	// Application needs to call ApplyConfChange when applying EntryConfChange type entry.
+	ProposeConfChange(ctx context.Context, cc pb.ConfChange) error
+	// Step advances the state machine using the given message. ctx.Err() will be returned, if any.
+	Step(ctx context.Context, msg pb.Message) error
+
+	// Ready returns a channel that returns the current point-in-time state.
+	// Users of the Node must call Advance after retrieving the state returned by Ready.
+	//
+	// NOTE: No committed entries from the next Ready may be applied until all committed entries
+	// and snapshots from the previous one have finished.
+	Ready() <-chan Ready
+
+	// Advance notifies the Node that the application has saved progress up to the last Ready.
+	// It prepares the node to return the next available Ready.
+	//
+	// The application should generally call Advance after it applies the entries in last Ready.
+	//
+	// However, as an optimization, the application may call Advance while it is applying the
+	// commands. For example. when the last Ready contains a snapshot, the application might take
+	// a long time to apply the snapshot data. To continue receiving Ready without blocking raft
+	// progress, it can call Advance before finish applying the last ready. To make this optimization
+	// work safely, when the application receives a Ready with softState.RaftState equal to Candidate
+	// it MUST apply all pending configuration changes if there is any.
+	//
+	// Here is a simple solution that waiting for ALL pending entries to get applied.
+	// ```
+	// ...
+	// rd := <-n.Ready()
+	// go apply(rd.CommittedEntries) // optimization to apply asynchronously in FIFO order.
+	// if rd.SoftState.RaftState == StateCandidate {
+	//     waitAllApplied()
+	// }
+	// n.Advance()
+	// ...
+	//```
+	Advance()
+	// ApplyConfChange applies config change to the local node.
+	// Returns an opaque ConfState protobuf which must be recorded
+	// in snapshots. Will never return nil; it returns a pointer only
+	// to match MemoryStorage.Compact.
+	ApplyConfChange(cc pb.ConfChange) *pb.ConfState
+	// Status returns the current status of the raft state machine.
+	Status() Status
+	// ReportUnreachable reports the given node is not reachable for the last send.
+	ReportUnreachable(id uint64)
+	// ReportSnapshot reports the status of the sent snapshot.
+	ReportSnapshot(id uint64, status SnapshotStatus)
+	// Stop performs any necessary termination of the Node.
+	Stop()
+}
+
+type Peer struct {
+	ID      uint64
+	Context []byte
+}
+
+// StartNode returns a new Node given configuration and a list of raft peers.
+// It appends a ConfChangeAddNode entry for each given peer to the initial log.
+func StartNode(c *Config, peers []Peer) Node {
+	r := newRaft(c)
+	// become the follower at term 1 and apply initial configuration
+	// entries of term 1
+	r.becomeFollower(1, None)
+	for _, peer := range peers {
+		cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+		d, err := cc.Marshal()
+		if err != nil {
+			panic("unexpected marshal error")
+		}
+		e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d}
+		r.raftLog.append(e)
+	}
+	// Mark these initial entries as committed.
+	// TODO(bdarnell): These entries are still unstable; do we need to preserve
+	// the invariant that committed < unstable?
+	r.raftLog.committed = r.raftLog.lastIndex()
+	// Now apply them, mainly so that the application can call Campaign
+	// immediately after StartNode in tests. Note that these nodes will
+	// be added to raft twice: here and when the application's Ready
+	// loop calls ApplyConfChange. The calls to addNode must come after
+	// all calls to raftLog.append so progress.next is set after these
+	// bootstrapping entries (it is an error if we try to append these
+	// entries since they have already been committed).
+	// We do not set raftLog.applied so the application will be able
+	// to observe all conf changes via Ready.CommittedEntries.
+	for _, peer := range peers {
+		r.addNode(peer.ID)
+	}
+
+	n := newNode()
+	go n.run(r)
+	return &n
+}
+
+// RestartNode is similar to StartNode but does not take a list of peers.
+// The current membership of the cluster will be restored from the Storage.
+// If the caller has an existing state machine, pass in the last log index that
+// has been applied to it; otherwise use zero.
+func RestartNode(c *Config) Node {
+	r := newRaft(c)
+
+	n := newNode()
+	go n.run(r)
+	return &n
+}
+
+// node is the canonical implementation of the Node interface
+type node struct {
+	propc      chan pb.Message
+	recvc      chan pb.Message
+	confc      chan pb.ConfChange
+	confstatec chan pb.ConfState
+	readyc     chan Ready
+	advancec   chan struct{}
+	tickc      chan struct{}
+	done       chan struct{}
+	stop       chan struct{}
+	status     chan chan Status
+}
+
+func newNode() node {
+	return node{
+		propc:      make(chan pb.Message),
+		recvc:      make(chan pb.Message),
+		confc:      make(chan pb.ConfChange),
+		confstatec: make(chan pb.ConfState),
+		readyc:     make(chan Ready),
+		advancec:   make(chan struct{}),
+		tickc:      make(chan struct{}),
+		done:       make(chan struct{}),
+		stop:       make(chan struct{}),
+		status:     make(chan chan Status),
+	}
+}
+
+func (n *node) Stop() {
+	select {
+	case n.stop <- struct{}{}:
+		// Not already stopped, so trigger it
+	case <-n.done:
+		// Node has already been stopped - no need to do anything
+		return
+	}
+	// Block until the stop has been acknowledged by run()
+	<-n.done
+}
+
+func (n *node) run(r *raft) {
+	var propc chan pb.Message
+	var readyc chan Ready
+	var advancec chan struct{}
+	var prevLastUnstablei, prevLastUnstablet uint64
+	var havePrevLastUnstablei bool
+	var prevSnapi uint64
+	var rd Ready
+
+	lead := None
+	prevSoftSt := r.softState()
+	prevHardSt := emptyState
+
+	for {
+		if advancec != nil {
+			readyc = nil
+		} else {
+			rd = newReady(r, prevSoftSt, prevHardSt)
+			if rd.containsUpdates() {
+				readyc = n.readyc
+			} else {
+				readyc = nil
+			}
+		}
+
+		if lead != r.lead {
+			if r.hasLeader() {
+				if lead == None {
+					r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
+				} else {
+					r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
+				}
+				propc = n.propc
+			} else {
+				r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
+				propc = nil
+			}
+			lead = r.lead
+		}
+
+		select {
+		// TODO: maybe buffer the config propose if there exists one (the way
+		// described in raft dissertation)
+		// Currently it is dropped in Step silently.
+		case m := <-propc:
+			m.From = r.id
+			r.Step(m)
+		case m := <-n.recvc:
+			// filter out response message from unknown From.
+			if _, ok := r.prs[m.From]; ok || !IsResponseMsg(m) {
+				r.Step(m) // raft never returns an error
+			}
+		case cc := <-n.confc:
+			if cc.NodeID == None {
+				r.resetPendingConf()
+				select {
+				case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
+				case <-n.done:
+				}
+				break
+			}
+			switch cc.Type {
+			case pb.ConfChangeAddNode:
+				r.addNode(cc.NodeID)
+			case pb.ConfChangeRemoveNode:
+				// block incoming proposal when local node is
+				// removed
+				if cc.NodeID == r.id {
+					n.propc = nil
+				}
+				r.removeNode(cc.NodeID)
+			case pb.ConfChangeUpdateNode:
+				r.resetPendingConf()
+			default:
+				panic("unexpected conf type")
+			}
+			select {
+			case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
+			case <-n.done:
+			}
+		case <-n.tickc:
+			r.tick()
+		case readyc <- rd:
+			if rd.SoftState != nil {
+				prevSoftSt = rd.SoftState
+			}
+			if len(rd.Entries) > 0 {
+				prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index
+				prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term
+				havePrevLastUnstablei = true
+			}
+			if !IsEmptyHardState(rd.HardState) {
+				prevHardSt = rd.HardState
+			}
+			if !IsEmptySnap(rd.Snapshot) {
+				prevSnapi = rd.Snapshot.Metadata.Index
+			}
+			r.msgs = nil
+			advancec = n.advancec
+		case <-advancec:
+			if prevHardSt.Commit != 0 {
+				r.raftLog.appliedTo(prevHardSt.Commit)
+			}
+			if havePrevLastUnstablei {
+				r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet)
+				havePrevLastUnstablei = false
+			}
+			r.raftLog.stableSnapTo(prevSnapi)
+			advancec = nil
+		case c := <-n.status:
+			c <- getStatus(r)
+		case <-n.stop:
+			close(n.done)
+			return
+		}
+	}
+}
+
+// Tick increments the internal logical clock for this Node. Election timeouts
+// and heartbeat timeouts are in units of ticks.
+func (n *node) Tick() {
+	select {
+	case n.tickc <- struct{}{}:
+	case <-n.done:
+	}
+}
+
+func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
+
+func (n *node) Propose(ctx context.Context, data []byte) error {
+	return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
+}
+
+func (n *node) Step(ctx context.Context, m pb.Message) error {
+	// ignore unexpected local messages receiving over network
+	if IsLocalMsg(m) {
+		// TODO: return an error?
+		return nil
+	}
+	return n.step(ctx, m)
+}
+
+func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error {
+	data, err := cc.Marshal()
+	if err != nil {
+		return err
+	}
+	return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}})
+}
+
+// Step advances the state machine using msgs. The ctx.Err() will be returned,
+// if any.
+func (n *node) step(ctx context.Context, m pb.Message) error {
+	ch := n.recvc
+	if m.Type == pb.MsgProp {
+		ch = n.propc
+	}
+
+	select {
+	case ch <- m:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-n.done:
+		return ErrStopped
+	}
+}
+
+func (n *node) Ready() <-chan Ready { return n.readyc }
+
+func (n *node) Advance() {
+	select {
+	case n.advancec <- struct{}{}:
+	case <-n.done:
+	}
+}
+
+func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
+	var cs pb.ConfState
+	select {
+	case n.confc <- cc:
+	case <-n.done:
+	}
+	select {
+	case cs = <-n.confstatec:
+	case <-n.done:
+	}
+	return &cs
+}
+
+func (n *node) Status() Status {
+	c := make(chan Status)
+	n.status <- c
+	return <-c
+}
+
+func (n *node) ReportUnreachable(id uint64) {
+	select {
+	case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
+	case <-n.done:
+	}
+}
+
+func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
+	rej := status == SnapshotFailure
+
+	select {
+	case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
+	case <-n.done:
+	}
+}
+
+func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
+	rd := Ready{
+		Entries:          r.raftLog.unstableEntries(),
+		CommittedEntries: r.raftLog.nextEnts(),
+		Messages:         r.msgs,
+	}
+	if softSt := r.softState(); !softSt.equal(prevSoftSt) {
+		rd.SoftState = softSt
+	}
+	if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
+		rd.HardState = hardSt
+	}
+	if r.raftLog.unstable.snapshot != nil {
+		rd.Snapshot = *r.raftLog.unstable.snapshot
+	}
+	return rd
+}

+ 245 - 0
vendor/src/github.com/coreos/etcd/raft/progress.go

@@ -0,0 +1,245 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import "fmt"
+
+const (
+	ProgressStateProbe ProgressStateType = iota
+	ProgressStateReplicate
+	ProgressStateSnapshot
+)
+
+type ProgressStateType uint64
+
+var prstmap = [...]string{
+	"ProgressStateProbe",
+	"ProgressStateReplicate",
+	"ProgressStateSnapshot",
+}
+
+func (st ProgressStateType) String() string { return prstmap[uint64(st)] }
+
+// Progress represents a follower’s progress in the view of the leader. Leader maintains
+// progresses of all followers, and sends entries to the follower based on its progress.
+type Progress struct {
+	Match, Next uint64
+	// State defines how the leader should interact with the follower.
+	//
+	// When in ProgressStateProbe, leader sends at most one replication message
+	// per heartbeat interval. It also probes actual progress of the follower.
+	//
+	// When in ProgressStateReplicate, leader optimistically increases next
+	// to the latest entry sent after sending replication message. This is
+	// an optimized state for fast replicating log entries to the follower.
+	//
+	// When in ProgressStateSnapshot, leader should have sent out snapshot
+	// before and stops sending any replication message.
+	State ProgressStateType
+	// Paused is used in ProgressStateProbe.
+	// When Paused is true, raft should pause sending replication message to this peer.
+	Paused bool
+	// PendingSnapshot is used in ProgressStateSnapshot.
+	// If there is a pending snapshot, the pendingSnapshot will be set to the
+	// index of the snapshot. If pendingSnapshot is set, the replication process of
+	// this Progress will be paused. raft will not resend snapshot until the pending one
+	// is reported to be failed.
+	PendingSnapshot uint64
+
+	// RecentActive is true if the progress is recently active. Receiving any messages
+	// from the corresponding follower indicates the progress is active.
+	// RecentActive can be reset to false after an election timeout.
+	RecentActive bool
+
+	// inflights is a sliding window for the inflight messages.
+	// When inflights is full, no more message should be sent.
+	// When a leader sends out a message, the index of the last
+	// entry should be added to inflights. The index MUST be added
+	// into inflights in order.
+	// When a leader receives a reply, the previous inflights should
+	// be freed by calling inflights.freeTo.
+	ins *inflights
+}
+
+func (pr *Progress) resetState(state ProgressStateType) {
+	pr.Paused = false
+	pr.RecentActive = false
+	pr.PendingSnapshot = 0
+	pr.State = state
+	pr.ins.reset()
+}
+
+func (pr *Progress) becomeProbe() {
+	// If the original state is ProgressStateSnapshot, progress knows that
+	// the pending snapshot has been sent to this peer successfully, then
+	// probes from pendingSnapshot + 1.
+	if pr.State == ProgressStateSnapshot {
+		pendingSnapshot := pr.PendingSnapshot
+		pr.resetState(ProgressStateProbe)
+		pr.Next = max(pr.Match+1, pendingSnapshot+1)
+	} else {
+		pr.resetState(ProgressStateProbe)
+		pr.Next = pr.Match + 1
+	}
+}
+
+func (pr *Progress) becomeReplicate() {
+	pr.resetState(ProgressStateReplicate)
+	pr.Next = pr.Match + 1
+}
+
+func (pr *Progress) becomeSnapshot(snapshoti uint64) {
+	pr.resetState(ProgressStateSnapshot)
+	pr.PendingSnapshot = snapshoti
+}
+
+// maybeUpdate returns false if the given n index comes from an outdated message.
+// Otherwise it updates the progress and returns true.
+func (pr *Progress) maybeUpdate(n uint64) bool {
+	var updated bool
+	if pr.Match < n {
+		pr.Match = n
+		updated = true
+		pr.resume()
+	}
+	if pr.Next < n+1 {
+		pr.Next = n + 1
+	}
+	return updated
+}
+
+func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 }
+
+// maybeDecrTo returns false if the given to index comes from an out of order message.
+// Otherwise it decreases the progress next index to min(rejected, last) and returns true.
+func (pr *Progress) maybeDecrTo(rejected, last uint64) bool {
+	if pr.State == ProgressStateReplicate {
+		// the rejection must be stale if the progress has matched and "rejected"
+		// is smaller than "match".
+		if rejected <= pr.Match {
+			return false
+		}
+		// directly decrease next to match + 1
+		pr.Next = pr.Match + 1
+		return true
+	}
+
+	// the rejection must be stale if "rejected" does not match next - 1
+	if pr.Next-1 != rejected {
+		return false
+	}
+
+	if pr.Next = min(rejected, last+1); pr.Next < 1 {
+		pr.Next = 1
+	}
+	pr.resume()
+	return true
+}
+
+func (pr *Progress) pause()  { pr.Paused = true }
+func (pr *Progress) resume() { pr.Paused = false }
+
+// isPaused returns whether progress stops sending message.
+func (pr *Progress) isPaused() bool {
+	switch pr.State {
+	case ProgressStateProbe:
+		return pr.Paused
+	case ProgressStateReplicate:
+		return pr.ins.full()
+	case ProgressStateSnapshot:
+		return true
+	default:
+		panic("unexpected state")
+	}
+}
+
+func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 }
+
+// maybeSnapshotAbort unsets pendingSnapshot if Match is equal or higher than
+// the pendingSnapshot
+func (pr *Progress) maybeSnapshotAbort() bool {
+	return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot
+}
+
+func (pr *Progress) String() string {
+	return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.isPaused(), pr.PendingSnapshot)
+}
+
+type inflights struct {
+	// the starting index in the buffer
+	start int
+	// number of inflights in the buffer
+	count int
+
+	// the size of the buffer
+	size   int
+	buffer []uint64
+}
+
+func newInflights(size int) *inflights {
+	return &inflights{
+		size:   size,
+		buffer: make([]uint64, size),
+	}
+}
+
+// add adds an inflight into inflights
+func (in *inflights) add(inflight uint64) {
+	if in.full() {
+		panic("cannot add into a full inflights")
+	}
+	next := in.start + in.count
+	if next >= in.size {
+		next -= in.size
+	}
+	in.buffer[next] = inflight
+	in.count++
+}
+
+// freeTo frees the inflights smaller or equal to the given `to` flight.
+func (in *inflights) freeTo(to uint64) {
+	if in.count == 0 || to < in.buffer[in.start] {
+		// out of the left side of the window
+		return
+	}
+
+	i, idx := 0, in.start
+	for i = 0; i < in.count; i++ {
+		if to < in.buffer[idx] { // found the first large inflight
+			break
+		}
+
+		// increase index and maybe rotate
+		if idx++; idx >= in.size {
+			idx -= in.size
+		}
+	}
+	// free i inflights and set new start index
+	in.count -= i
+	in.start = idx
+}
+
+func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }
+
+// full returns true if the inflights is full.
+func (in *inflights) full() bool {
+	return in.count == in.size
+}
+
+// resets frees all inflights.
+func (in *inflights) reset() {
+	in.count = 0
+	in.start = 0
+}

+ 898 - 0
vendor/src/github.com/coreos/etcd/raft/raft.go

@@ -0,0 +1,898 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"math/rand"
+	"sort"
+	"strings"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// None is a placeholder node ID used when there is no leader.
+const None uint64 = 0
+const noLimit = math.MaxUint64
+
+var errNoLeader = errors.New("no leader")
+
+var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
+
+// Possible values for StateType.
+const (
+	StateFollower StateType = iota
+	StateCandidate
+	StateLeader
+)
+
+// StateType represents the role of a node in a cluster.
+type StateType uint64
+
+var stmap = [...]string{
+	"StateFollower",
+	"StateCandidate",
+	"StateLeader",
+}
+
+func (st StateType) String() string {
+	return stmap[uint64(st)]
+}
+
+// Config contains the parameters to start a raft.
+type Config struct {
+	// ID is the identity of the local raft. ID cannot be 0.
+	ID uint64
+
+	// peers contains the IDs of all nodes (including self) in the raft cluster. It
+	// should only be set when starting a new raft cluster. Restarting raft from
+	// previous configuration will panic if peers is set. peer is private and only
+	// used for testing right now.
+	peers []uint64
+
+	// ElectionTick is the number of Node.Tick invocations that must pass between
+	// elections. That is, if a follower does not receive any message from the
+	// leader of current term before ElectionTick has elapsed, it will become
+	// candidate and start an election. ElectionTick must be greater than
+	// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
+	// unnecessary leader switching.
+	ElectionTick int
+	// HeartbeatTick is the number of Node.Tick invocations that must pass between
+	// heartbeats. That is, a leader sends heartbeat messages to maintain its
+	// leadership every HeartbeatTick ticks.
+	HeartbeatTick int
+
+	// Storage is the storage for raft. raft generates entries and states to be
+	// stored in storage. raft reads the persisted entries and states out of
+	// Storage when it needs. raft reads out the previous state and configuration
+	// out of storage when restarting.
+	Storage Storage
+	// Applied is the last applied index. It should only be set when restarting
+	// raft. raft will not return entries to the application smaller or equal to
+	// Applied. If Applied is unset when restarting, raft might return previous
+	// applied entries. This is a very application dependent configuration.
+	Applied uint64
+
+	// MaxSizePerMsg limits the max size of each append message. Smaller value
+	// lowers the raft recovery cost(initial probing and message lost during normal
+	// operation). On the other side, it might affect the throughput during normal
+	// replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per
+	// message.
+	MaxSizePerMsg uint64
+	// MaxInflightMsgs limits the max number of in-flight append messages during
+	// optimistic replication phase. The application transportation layer usually
+	// has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
+	// overflowing that sending buffer. TODO (xiangli): feedback to application to
+	// limit the proposal rate?
+	MaxInflightMsgs int
+
+	// CheckQuorum specifies if the leader should check quorum activity. Leader
+	// steps down when quorum is not active for an electionTimeout.
+	CheckQuorum bool
+
+	// Logger is the logger used for raft log. For multinode which can host
+	// multiple raft group, each raft group can have its own logger
+	Logger Logger
+}
+
+func (c *Config) validate() error {
+	if c.ID == None {
+		return errors.New("cannot use none as id")
+	}
+
+	if c.HeartbeatTick <= 0 {
+		return errors.New("heartbeat tick must be greater than 0")
+	}
+
+	if c.ElectionTick <= c.HeartbeatTick {
+		return errors.New("election tick must be greater than heartbeat tick")
+	}
+
+	if c.Storage == nil {
+		return errors.New("storage cannot be nil")
+	}
+
+	if c.MaxInflightMsgs <= 0 {
+		return errors.New("max inflight messages must be greater than 0")
+	}
+
+	if c.Logger == nil {
+		c.Logger = raftLogger
+	}
+
+	return nil
+}
+
+type raft struct {
+	id uint64
+
+	Term uint64
+	Vote uint64
+
+	// the log
+	raftLog *raftLog
+
+	maxInflight int
+	maxMsgSize  uint64
+	prs         map[uint64]*Progress
+
+	state StateType
+
+	votes map[uint64]bool
+
+	msgs []pb.Message
+
+	// the leader id
+	lead uint64
+
+	// New configuration is ignored if there exists unapplied configuration.
+	pendingConf bool
+
+	// number of ticks since it reached last electionTimeout when it is leader
+	// or candidate.
+	// number of ticks since it reached last electionTimeout or received a
+	// valid message from current leader when it is a follower.
+	electionElapsed int
+
+	// number of ticks since it reached last heartbeatTimeout.
+	// only leader keeps heartbeatElapsed.
+	heartbeatElapsed int
+
+	checkQuorum bool
+
+	heartbeatTimeout int
+	electionTimeout  int
+	rand             *rand.Rand
+	tick             func()
+	step             stepFunc
+
+	logger Logger
+}
+
+func newRaft(c *Config) *raft {
+	if err := c.validate(); err != nil {
+		panic(err.Error())
+	}
+	raftlog := newLog(c.Storage, c.Logger)
+	hs, cs, err := c.Storage.InitialState()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	peers := c.peers
+	if len(cs.Nodes) > 0 {
+		if len(peers) > 0 {
+			// TODO(bdarnell): the peers argument is always nil except in
+			// tests; the argument should be removed and these tests should be
+			// updated to specify their nodes through a snapshot.
+			panic("cannot specify both newRaft(peers) and ConfState.Nodes)")
+		}
+		peers = cs.Nodes
+	}
+	r := &raft{
+		id:               c.ID,
+		lead:             None,
+		raftLog:          raftlog,
+		maxMsgSize:       c.MaxSizePerMsg,
+		maxInflight:      c.MaxInflightMsgs,
+		prs:              make(map[uint64]*Progress),
+		electionTimeout:  c.ElectionTick,
+		heartbeatTimeout: c.HeartbeatTick,
+		logger:           c.Logger,
+		checkQuorum:      c.CheckQuorum,
+	}
+	r.rand = rand.New(rand.NewSource(int64(c.ID)))
+	for _, p := range peers {
+		r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
+	}
+	if !isHardStateEqual(hs, emptyState) {
+		r.loadState(hs)
+	}
+	if c.Applied > 0 {
+		raftlog.appliedTo(c.Applied)
+	}
+	r.becomeFollower(r.Term, None)
+
+	nodesStrs := make([]string, 0)
+	for _, n := range r.nodes() {
+		nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
+	}
+
+	r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
+		r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm())
+	return r
+}
+
+func (r *raft) hasLeader() bool { return r.lead != None }
+
+func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} }
+
+func (r *raft) hardState() pb.HardState {
+	return pb.HardState{
+		Term:   r.Term,
+		Vote:   r.Vote,
+		Commit: r.raftLog.committed,
+	}
+}
+
+func (r *raft) quorum() int { return len(r.prs)/2 + 1 }
+
+func (r *raft) nodes() []uint64 {
+	nodes := make([]uint64, 0, len(r.prs))
+	for id := range r.prs {
+		nodes = append(nodes, id)
+	}
+	sort.Sort(uint64Slice(nodes))
+	return nodes
+}
+
+// send persists state to stable storage and then sends to its mailbox.
+func (r *raft) send(m pb.Message) {
+	m.From = r.id
+	// do not attach term to MsgProp
+	// proposals are a way to forward to the leader and
+	// should be treated as local message.
+	if m.Type != pb.MsgProp {
+		m.Term = r.Term
+	}
+	r.msgs = append(r.msgs, m)
+}
+
+// sendAppend sends RPC, with entries to the given peer.
+func (r *raft) sendAppend(to uint64) {
+	pr := r.prs[to]
+	if pr.isPaused() {
+		return
+	}
+	m := pb.Message{}
+	m.To = to
+
+	term, errt := r.raftLog.term(pr.Next - 1)
+	ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
+
+	if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
+		if !pr.RecentActive {
+			r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
+			return
+		}
+
+		m.Type = pb.MsgSnap
+		snapshot, err := r.raftLog.snapshot()
+		if err != nil {
+			if err == ErrSnapshotTemporarilyUnavailable {
+				r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
+				return
+			}
+			panic(err) // TODO(bdarnell)
+		}
+		if IsEmptySnap(snapshot) {
+			panic("need non-empty snapshot")
+		}
+		m.Snapshot = snapshot
+		sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
+		r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
+			r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
+		pr.becomeSnapshot(sindex)
+		r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
+	} else {
+		m.Type = pb.MsgApp
+		m.Index = pr.Next - 1
+		m.LogTerm = term
+		m.Entries = ents
+		m.Commit = r.raftLog.committed
+		if n := len(m.Entries); n != 0 {
+			switch pr.State {
+			// optimistically increase the next when in ProgressStateReplicate
+			case ProgressStateReplicate:
+				last := m.Entries[n-1].Index
+				pr.optimisticUpdate(last)
+				pr.ins.add(last)
+			case ProgressStateProbe:
+				pr.pause()
+			default:
+				r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State)
+			}
+		}
+	}
+	r.send(m)
+}
+
+// sendHeartbeat sends an empty MsgApp
+func (r *raft) sendHeartbeat(to uint64) {
+	// Attach the commit as min(to.matched, r.committed).
+	// When the leader sends out heartbeat message,
+	// the receiver(follower) might not be matched with the leader
+	// or it might not have all the committed entries.
+	// The leader MUST NOT forward the follower's commit to
+	// an unmatched index.
+	commit := min(r.prs[to].Match, r.raftLog.committed)
+	m := pb.Message{
+		To:     to,
+		Type:   pb.MsgHeartbeat,
+		Commit: commit,
+	}
+	r.send(m)
+}
+
+// bcastAppend sends RPC, with entries to all peers that are not up-to-date
+// according to the progress recorded in r.prs.
+func (r *raft) bcastAppend() {
+	for id := range r.prs {
+		if id == r.id {
+			continue
+		}
+		r.sendAppend(id)
+	}
+}
+
+// bcastHeartbeat sends RPC, without entries to all the peers.
+func (r *raft) bcastHeartbeat() {
+	for id := range r.prs {
+		if id == r.id {
+			continue
+		}
+		r.sendHeartbeat(id)
+		r.prs[id].resume()
+	}
+}
+
+// maybeCommit attempts to advance the commit index. Returns true if
+// the commit index changed (in which case the caller should call
+// r.bcastAppend).
+func (r *raft) maybeCommit() bool {
+	// TODO(bmizerany): optimize.. Currently naive
+	mis := make(uint64Slice, 0, len(r.prs))
+	for id := range r.prs {
+		mis = append(mis, r.prs[id].Match)
+	}
+	sort.Sort(sort.Reverse(mis))
+	mci := mis[r.quorum()-1]
+	return r.raftLog.maybeCommit(mci, r.Term)
+}
+
+func (r *raft) reset(term uint64) {
+	if r.Term != term {
+		r.Term = term
+		r.Vote = None
+	}
+	r.lead = None
+
+	r.electionElapsed = 0
+	r.heartbeatElapsed = 0
+
+	r.votes = make(map[uint64]bool)
+	for id := range r.prs {
+		r.prs[id] = &Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight)}
+		if id == r.id {
+			r.prs[id].Match = r.raftLog.lastIndex()
+		}
+	}
+	r.pendingConf = false
+}
+
+func (r *raft) appendEntry(es ...pb.Entry) {
+	li := r.raftLog.lastIndex()
+	for i := range es {
+		es[i].Term = r.Term
+		es[i].Index = li + 1 + uint64(i)
+	}
+	r.raftLog.append(es...)
+	r.prs[r.id].maybeUpdate(r.raftLog.lastIndex())
+	// Regardless of maybeCommit's return, our caller will call bcastAppend.
+	r.maybeCommit()
+}
+
+// tickElection is run by followers and candidates after r.electionTimeout.
+func (r *raft) tickElection() {
+	if !r.promotable() {
+		r.electionElapsed = 0
+		return
+	}
+	r.electionElapsed++
+	if r.isElectionTimeout() {
+		r.electionElapsed = 0
+		r.Step(pb.Message{From: r.id, Type: pb.MsgHup})
+	}
+}
+
+// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
+func (r *raft) tickHeartbeat() {
+	r.heartbeatElapsed++
+	r.electionElapsed++
+
+	if r.electionElapsed >= r.electionTimeout {
+		r.electionElapsed = 0
+		if r.checkQuorum {
+			r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
+		}
+	}
+
+	if r.state != StateLeader {
+		return
+	}
+
+	if r.heartbeatElapsed >= r.heartbeatTimeout {
+		r.heartbeatElapsed = 0
+		r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
+	}
+}
+
+func (r *raft) becomeFollower(term uint64, lead uint64) {
+	r.step = stepFollower
+	r.reset(term)
+	r.tick = r.tickElection
+	r.lead = lead
+	r.state = StateFollower
+	r.logger.Infof("%x became follower at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeCandidate() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateLeader {
+		panic("invalid transition [leader -> candidate]")
+	}
+	r.step = stepCandidate
+	r.reset(r.Term + 1)
+	r.tick = r.tickElection
+	r.Vote = r.id
+	r.state = StateCandidate
+	r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeLeader() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateFollower {
+		panic("invalid transition [follower -> leader]")
+	}
+	r.step = stepLeader
+	r.reset(r.Term)
+	r.tick = r.tickHeartbeat
+	r.lead = r.id
+	r.state = StateLeader
+	ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit)
+	if err != nil {
+		r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err)
+	}
+
+	for _, e := range ents {
+		if e.Type != pb.EntryConfChange {
+			continue
+		}
+		if r.pendingConf {
+			panic("unexpected double uncommitted config entry")
+		}
+		r.pendingConf = true
+	}
+	r.appendEntry(pb.Entry{Data: nil})
+	r.logger.Infof("%x became leader at term %d", r.id, r.Term)
+}
+
+func (r *raft) campaign() {
+	r.becomeCandidate()
+	if r.quorum() == r.poll(r.id, true) {
+		r.becomeLeader()
+		return
+	}
+	for id := range r.prs {
+		if id == r.id {
+			continue
+		}
+		r.logger.Infof("%x [logterm: %d, index: %d] sent vote request to %x at term %d",
+			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), id, r.Term)
+		r.send(pb.Message{To: id, Type: pb.MsgVote, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm()})
+	}
+}
+
+func (r *raft) poll(id uint64, v bool) (granted int) {
+	if v {
+		r.logger.Infof("%x received vote from %x at term %d", r.id, id, r.Term)
+	} else {
+		r.logger.Infof("%x received vote rejection from %x at term %d", r.id, id, r.Term)
+	}
+	if _, ok := r.votes[id]; !ok {
+		r.votes[id] = v
+	}
+	for _, vv := range r.votes {
+		if vv {
+			granted++
+		}
+	}
+	return granted
+}
+
+func (r *raft) Step(m pb.Message) error {
+	if m.Type == pb.MsgHup {
+		if r.state != StateLeader {
+			r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
+			r.campaign()
+		} else {
+			r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
+		}
+		return nil
+	}
+
+	switch {
+	case m.Term == 0:
+		// local message
+	case m.Term > r.Term:
+		lead := m.From
+		if m.Type == pb.MsgVote {
+			lead = None
+		}
+		r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
+			r.id, r.Term, m.Type, m.From, m.Term)
+		r.becomeFollower(m.Term, lead)
+	case m.Term < r.Term:
+		// ignore
+		r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
+			r.id, r.Term, m.Type, m.From, m.Term)
+		return nil
+	}
+	r.step(r, m)
+	return nil
+}
+
+type stepFunc func(r *raft, m pb.Message)
+
+func stepLeader(r *raft, m pb.Message) {
+
+	// These message types do not require any progress for m.From.
+	switch m.Type {
+	case pb.MsgBeat:
+		r.bcastHeartbeat()
+		return
+	case pb.MsgCheckQuorum:
+		if !r.checkQuorumActive() {
+			r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
+			r.becomeFollower(r.Term, None)
+		}
+		return
+	case pb.MsgProp:
+		if len(m.Entries) == 0 {
+			r.logger.Panicf("%x stepped empty MsgProp", r.id)
+		}
+		if _, ok := r.prs[r.id]; !ok {
+			// If we are not currently a member of the range (i.e. this node
+			// was removed from the configuration while serving as leader),
+			// drop any new proposals.
+			return
+		}
+		for i, e := range m.Entries {
+			if e.Type == pb.EntryConfChange {
+				if r.pendingConf {
+					m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
+				}
+				r.pendingConf = true
+			}
+		}
+		r.appendEntry(m.Entries...)
+		r.bcastAppend()
+		return
+	case pb.MsgVote:
+		r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d",
+			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term)
+		r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true})
+		return
+	}
+
+	// All other message types require a progress for m.From (pr).
+	pr, prOk := r.prs[m.From]
+	if !prOk {
+		r.logger.Debugf("no progress available for %x", m.From)
+		return
+	}
+	switch m.Type {
+	case pb.MsgAppResp:
+		pr.RecentActive = true
+
+		if m.Reject {
+			r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d",
+				r.id, m.RejectHint, m.From, m.Index)
+			if pr.maybeDecrTo(m.Index, m.RejectHint) {
+				r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
+				if pr.State == ProgressStateReplicate {
+					pr.becomeProbe()
+				}
+				r.sendAppend(m.From)
+			}
+		} else {
+			oldPaused := pr.isPaused()
+			if pr.maybeUpdate(m.Index) {
+				switch {
+				case pr.State == ProgressStateProbe:
+					pr.becomeReplicate()
+				case pr.State == ProgressStateSnapshot && pr.maybeSnapshotAbort():
+					r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+					pr.becomeProbe()
+				case pr.State == ProgressStateReplicate:
+					pr.ins.freeTo(m.Index)
+				}
+
+				if r.maybeCommit() {
+					r.bcastAppend()
+				} else if oldPaused {
+					// update() reset the wait state on this node. If we had delayed sending
+					// an update before, send it now.
+					r.sendAppend(m.From)
+				}
+			}
+		}
+	case pb.MsgHeartbeatResp:
+		pr.RecentActive = true
+
+		// free one slot for the full inflights window to allow progress.
+		if pr.State == ProgressStateReplicate && pr.ins.full() {
+			pr.ins.freeFirstOne()
+		}
+		if pr.Match < r.raftLog.lastIndex() {
+			r.sendAppend(m.From)
+		}
+	case pb.MsgSnapStatus:
+		if pr.State != ProgressStateSnapshot {
+			return
+		}
+		if !m.Reject {
+			pr.becomeProbe()
+			r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+		} else {
+			pr.snapshotFailure()
+			pr.becomeProbe()
+			r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+		}
+		// If snapshot finish, wait for the msgAppResp from the remote node before sending
+		// out the next msgApp.
+		// If snapshot failure, wait for a heartbeat interval before next try
+		pr.pause()
+	case pb.MsgUnreachable:
+		// During optimistic replication, if the remote becomes unreachable,
+		// there is huge probability that a MsgApp is lost.
+		if pr.State == ProgressStateReplicate {
+			pr.becomeProbe()
+		}
+		r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
+	}
+}
+
+func stepCandidate(r *raft, m pb.Message) {
+	switch m.Type {
+	case pb.MsgProp:
+		r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+		return
+	case pb.MsgApp:
+		r.becomeFollower(r.Term, m.From)
+		r.handleAppendEntries(m)
+	case pb.MsgHeartbeat:
+		r.becomeFollower(r.Term, m.From)
+		r.handleHeartbeat(m)
+	case pb.MsgSnap:
+		r.becomeFollower(m.Term, m.From)
+		r.handleSnapshot(m)
+	case pb.MsgVote:
+		r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d",
+			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term)
+		r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true})
+	case pb.MsgVoteResp:
+		gr := r.poll(m.From, !m.Reject)
+		r.logger.Infof("%x [quorum:%d] has received %d votes and %d vote rejections", r.id, r.quorum(), gr, len(r.votes)-gr)
+		switch r.quorum() {
+		case gr:
+			r.becomeLeader()
+			r.bcastAppend()
+		case len(r.votes) - gr:
+			r.becomeFollower(r.Term, None)
+		}
+	}
+}
+
+func stepFollower(r *raft, m pb.Message) {
+	switch m.Type {
+	case pb.MsgProp:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+			return
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgApp:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleAppendEntries(m)
+	case pb.MsgHeartbeat:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleHeartbeat(m)
+	case pb.MsgSnap:
+		r.electionElapsed = 0
+		r.handleSnapshot(m)
+	case pb.MsgVote:
+		if (r.Vote == None || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
+			r.electionElapsed = 0
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] voted for %x [logterm: %d, index: %d] at term %d",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term)
+			r.Vote = m.From
+			r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp})
+		} else {
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term)
+			r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true})
+		}
+	}
+}
+
+func (r *raft) handleAppendEntries(m pb.Message) {
+	if m.Index < r.raftLog.committed {
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+		return
+	}
+
+	if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok {
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
+	} else {
+		r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x",
+			r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()})
+	}
+}
+
+func (r *raft) handleHeartbeat(m pb.Message) {
+	r.raftLog.commitTo(m.Commit)
+	r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp})
+}
+
+func (r *raft) handleSnapshot(m pb.Message) {
+	sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term
+	if r.restore(m.Snapshot) {
+		r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, sindex, sterm)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
+	} else {
+		r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, sindex, sterm)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+	}
+}
+
+// restore recovers the state machine from a snapshot. It restores the log and the
+// configuration of state machine.
+func (r *raft) restore(s pb.Snapshot) bool {
+	if s.Metadata.Index <= r.raftLog.committed {
+		return false
+	}
+	if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) {
+		r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
+		r.raftLog.commitTo(s.Metadata.Index)
+		return false
+	}
+
+	r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]",
+		r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
+
+	r.raftLog.restore(s)
+	r.prs = make(map[uint64]*Progress)
+	for _, n := range s.Metadata.ConfState.Nodes {
+		match, next := uint64(0), uint64(r.raftLog.lastIndex())+1
+		if n == r.id {
+			match = next - 1
+		} else {
+			match = 0
+		}
+		r.setProgress(n, match, next)
+		r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.prs[n])
+	}
+	return true
+}
+
+// promotable indicates whether state machine can be promoted to leader,
+// which is true when its own id is in progress list.
+func (r *raft) promotable() bool {
+	_, ok := r.prs[r.id]
+	return ok
+}
+
+func (r *raft) addNode(id uint64) {
+	if _, ok := r.prs[id]; ok {
+		// Ignore any redundant addNode calls (which can happen because the
+		// initial bootstrapping entries are applied twice).
+		return
+	}
+
+	r.setProgress(id, 0, r.raftLog.lastIndex()+1)
+	r.pendingConf = false
+}
+
+func (r *raft) removeNode(id uint64) {
+	r.delProgress(id)
+	r.pendingConf = false
+	// The quorum size is now smaller, so see if any pending entries can
+	// be committed.
+	if r.maybeCommit() {
+		r.bcastAppend()
+	}
+}
+
+func (r *raft) resetPendingConf() { r.pendingConf = false }
+
+func (r *raft) setProgress(id, match, next uint64) {
+	r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
+}
+
+func (r *raft) delProgress(id uint64) {
+	delete(r.prs, id)
+}
+
+func (r *raft) loadState(state pb.HardState) {
+	if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
+		r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
+	}
+	r.raftLog.committed = state.Commit
+	r.Term = state.Term
+	r.Vote = state.Vote
+}
+
+// isElectionTimeout returns true if r.electionElapsed is greater than the
+// randomized election timeout in (electiontimeout, 2 * electiontimeout - 1).
+// Otherwise, it returns false.
+func (r *raft) isElectionTimeout() bool {
+	d := r.electionElapsed - r.electionTimeout
+	if d < 0 {
+		return false
+	}
+	return d > r.rand.Int()%r.electionTimeout
+}
+
+// checkQuorumActive returns true if the quorum is active from
+// the view of the local raft state machine. Otherwise, it returns
+// false.
+// checkQuorumActive also resets all RecentActive to false.
+func (r *raft) checkQuorumActive() bool {
+	var act int
+
+	for id := range r.prs {
+		if id == r.id { // self is always active
+			act++
+			continue
+		}
+
+		if r.prs[id].RecentActive {
+			act++
+		}
+
+		r.prs[id].RecentActive = false
+	}
+
+	return act >= r.quorum()
+}

+ 1768 - 0
vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go

@@ -0,0 +1,1768 @@
+// Code generated by protoc-gen-gogo.
+// source: raft.proto
+// DO NOT EDIT!
+
+/*
+	Package raftpb is a generated protocol buffer package.
+
+	It is generated from these files:
+		raft.proto
+
+	It has these top-level messages:
+		Entry
+		SnapshotMetadata
+		Snapshot
+		Message
+		HardState
+		ConfState
+		ConfChange
+*/
+package raftpb
+
+import (
+	"fmt"
+
+	proto "github.com/gogo/protobuf/proto"
+)
+
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type EntryType int32
+
+const (
+	EntryNormal     EntryType = 0
+	EntryConfChange EntryType = 1
+)
+
+var EntryType_name = map[int32]string{
+	0: "EntryNormal",
+	1: "EntryConfChange",
+}
+var EntryType_value = map[string]int32{
+	"EntryNormal":     0,
+	"EntryConfChange": 1,
+}
+
+func (x EntryType) Enum() *EntryType {
+	p := new(EntryType)
+	*p = x
+	return p
+}
+func (x EntryType) String() string {
+	return proto.EnumName(EntryType_name, int32(x))
+}
+func (x *EntryType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType")
+	if err != nil {
+		return err
+	}
+	*x = EntryType(value)
+	return nil
+}
+
+type MessageType int32
+
+const (
+	MsgHup           MessageType = 0
+	MsgBeat          MessageType = 1
+	MsgProp          MessageType = 2
+	MsgApp           MessageType = 3
+	MsgAppResp       MessageType = 4
+	MsgVote          MessageType = 5
+	MsgVoteResp      MessageType = 6
+	MsgSnap          MessageType = 7
+	MsgHeartbeat     MessageType = 8
+	MsgHeartbeatResp MessageType = 9
+	MsgUnreachable   MessageType = 10
+	MsgSnapStatus    MessageType = 11
+	MsgCheckQuorum   MessageType = 12
+)
+
+var MessageType_name = map[int32]string{
+	0:  "MsgHup",
+	1:  "MsgBeat",
+	2:  "MsgProp",
+	3:  "MsgApp",
+	4:  "MsgAppResp",
+	5:  "MsgVote",
+	6:  "MsgVoteResp",
+	7:  "MsgSnap",
+	8:  "MsgHeartbeat",
+	9:  "MsgHeartbeatResp",
+	10: "MsgUnreachable",
+	11: "MsgSnapStatus",
+	12: "MsgCheckQuorum",
+}
+var MessageType_value = map[string]int32{
+	"MsgHup":           0,
+	"MsgBeat":          1,
+	"MsgProp":          2,
+	"MsgApp":           3,
+	"MsgAppResp":       4,
+	"MsgVote":          5,
+	"MsgVoteResp":      6,
+	"MsgSnap":          7,
+	"MsgHeartbeat":     8,
+	"MsgHeartbeatResp": 9,
+	"MsgUnreachable":   10,
+	"MsgSnapStatus":    11,
+	"MsgCheckQuorum":   12,
+}
+
+func (x MessageType) Enum() *MessageType {
+	p := new(MessageType)
+	*p = x
+	return p
+}
+func (x MessageType) String() string {
+	return proto.EnumName(MessageType_name, int32(x))
+}
+func (x *MessageType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType")
+	if err != nil {
+		return err
+	}
+	*x = MessageType(value)
+	return nil
+}
+
+type ConfChangeType int32
+
+const (
+	ConfChangeAddNode    ConfChangeType = 0
+	ConfChangeRemoveNode ConfChangeType = 1
+	ConfChangeUpdateNode ConfChangeType = 2
+)
+
+var ConfChangeType_name = map[int32]string{
+	0: "ConfChangeAddNode",
+	1: "ConfChangeRemoveNode",
+	2: "ConfChangeUpdateNode",
+}
+var ConfChangeType_value = map[string]int32{
+	"ConfChangeAddNode":    0,
+	"ConfChangeRemoveNode": 1,
+	"ConfChangeUpdateNode": 2,
+}
+
+func (x ConfChangeType) Enum() *ConfChangeType {
+	p := new(ConfChangeType)
+	*p = x
+	return p
+}
+func (x ConfChangeType) String() string {
+	return proto.EnumName(ConfChangeType_name, int32(x))
+}
+func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType")
+	if err != nil {
+		return err
+	}
+	*x = ConfChangeType(value)
+	return nil
+}
+
+type Entry struct {
+	Type             EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
+	Term             uint64    `protobuf:"varint,2,opt,name=Term" json:"Term"`
+	Index            uint64    `protobuf:"varint,3,opt,name=Index" json:"Index"`
+	Data             []byte    `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
+	XXX_unrecognized []byte    `json:"-"`
+}
+
+func (m *Entry) Reset()         { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage()    {}
+
+type SnapshotMetadata struct {
+	ConfState        ConfState `protobuf:"bytes,1,opt,name=conf_state" json:"conf_state"`
+	Index            uint64    `protobuf:"varint,2,opt,name=index" json:"index"`
+	Term             uint64    `protobuf:"varint,3,opt,name=term" json:"term"`
+	XXX_unrecognized []byte    `json:"-"`
+}
+
+func (m *SnapshotMetadata) Reset()         { *m = SnapshotMetadata{} }
+func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
+func (*SnapshotMetadata) ProtoMessage()    {}
+
+type Snapshot struct {
+	Data             []byte           `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
+	Metadata         SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
+	XXX_unrecognized []byte           `json:"-"`
+}
+
+func (m *Snapshot) Reset()         { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()    {}
+
+type Message struct {
+	Type             MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
+	To               uint64      `protobuf:"varint,2,opt,name=to" json:"to"`
+	From             uint64      `protobuf:"varint,3,opt,name=from" json:"from"`
+	Term             uint64      `protobuf:"varint,4,opt,name=term" json:"term"`
+	LogTerm          uint64      `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
+	Index            uint64      `protobuf:"varint,6,opt,name=index" json:"index"`
+	Entries          []Entry     `protobuf:"bytes,7,rep,name=entries" json:"entries"`
+	Commit           uint64      `protobuf:"varint,8,opt,name=commit" json:"commit"`
+	Snapshot         Snapshot    `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"`
+	Reject           bool        `protobuf:"varint,10,opt,name=reject" json:"reject"`
+	RejectHint       uint64      `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
+	XXX_unrecognized []byte      `json:"-"`
+}
+
+func (m *Message) Reset()         { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage()    {}
+
+type HardState struct {
+	Term             uint64 `protobuf:"varint,1,opt,name=term" json:"term"`
+	Vote             uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"`
+	Commit           uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *HardState) Reset()         { *m = HardState{} }
+func (m *HardState) String() string { return proto.CompactTextString(m) }
+func (*HardState) ProtoMessage()    {}
+
+type ConfState struct {
+	Nodes            []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *ConfState) Reset()         { *m = ConfState{} }
+func (m *ConfState) String() string { return proto.CompactTextString(m) }
+func (*ConfState) ProtoMessage()    {}
+
+type ConfChange struct {
+	ID               uint64         `protobuf:"varint,1,opt,name=ID" json:"ID"`
+	Type             ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"`
+	NodeID           uint64         `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"`
+	Context          []byte         `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"`
+	XXX_unrecognized []byte         `json:"-"`
+}
+
+func (m *ConfChange) Reset()         { *m = ConfChange{} }
+func (m *ConfChange) String() string { return proto.CompactTextString(m) }
+func (*ConfChange) ProtoMessage()    {}
+
+func init() {
+	proto.RegisterType((*Entry)(nil), "raftpb.Entry")
+	proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
+	proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
+	proto.RegisterType((*Message)(nil), "raftpb.Message")
+	proto.RegisterType((*HardState)(nil), "raftpb.HardState")
+	proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
+	proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
+	proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
+	proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
+	proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
+}
+func (m *Entry) Marshal() (data []byte, err error) {
+	size := m.Size()
+	data = make([]byte, size)
+	n, err := m.MarshalTo(data)
+	if err != nil {
+		return nil, err
+	}
+	return data[:n], nil
+}
+
+func (m *Entry) MarshalTo(data []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	data[i] = 0x8
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Type))
+	data[i] = 0x10
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Term))
+	data[i] = 0x18
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Index))
+	if m.Data != nil {
+		data[i] = 0x22
+		i++
+		i = encodeVarintRaft(data, i, uint64(len(m.Data)))
+		i += copy(data[i:], m.Data)
+	}
+	if m.XXX_unrecognized != nil {
+		i += copy(data[i:], m.XXX_unrecognized)
+	}
+	return i, nil
+}
+
+func (m *SnapshotMetadata) Marshal() (data []byte, err error) {
+	size := m.Size()
+	data = make([]byte, size)
+	n, err := m.MarshalTo(data)
+	if err != nil {
+		return nil, err
+	}
+	return data[:n], nil
+}
+
+func (m *SnapshotMetadata) MarshalTo(data []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	data[i] = 0xa
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.ConfState.Size()))
+	n1, err := m.ConfState.MarshalTo(data[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n1
+	data[i] = 0x10
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Index))
+	data[i] = 0x18
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Term))
+	if m.XXX_unrecognized != nil {
+		i += copy(data[i:], m.XXX_unrecognized)
+	}
+	return i, nil
+}
+
+func (m *Snapshot) Marshal() (data []byte, err error) {
+	size := m.Size()
+	data = make([]byte, size)
+	n, err := m.MarshalTo(data)
+	if err != nil {
+		return nil, err
+	}
+	return data[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(data []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Data != nil {
+		data[i] = 0xa
+		i++
+		i = encodeVarintRaft(data, i, uint64(len(m.Data)))
+		i += copy(data[i:], m.Data)
+	}
+	data[i] = 0x12
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Metadata.Size()))
+	n2, err := m.Metadata.MarshalTo(data[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n2
+	if m.XXX_unrecognized != nil {
+		i += copy(data[i:], m.XXX_unrecognized)
+	}
+	return i, nil
+}
+
+func (m *Message) Marshal() (data []byte, err error) {
+	size := m.Size()
+	data = make([]byte, size)
+	n, err := m.MarshalTo(data)
+	if err != nil {
+		return nil, err
+	}
+	return data[:n], nil
+}
+
+func (m *Message) MarshalTo(data []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	data[i] = 0x8
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Type))
+	data[i] = 0x10
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.To))
+	data[i] = 0x18
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.From))
+	data[i] = 0x20
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Term))
+	data[i] = 0x28
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.LogTerm))
+	data[i] = 0x30
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Index))
+	if len(m.Entries) > 0 {
+		for _, msg := range m.Entries {
+			data[i] = 0x3a
+			i++
+			i = encodeVarintRaft(data, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(data[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	data[i] = 0x40
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Commit))
+	data[i] = 0x4a
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Snapshot.Size()))
+	n3, err := m.Snapshot.MarshalTo(data[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n3
+	data[i] = 0x50
+	i++
+	if m.Reject {
+		data[i] = 1
+	} else {
+		data[i] = 0
+	}
+	i++
+	data[i] = 0x58
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.RejectHint))
+	if m.XXX_unrecognized != nil {
+		i += copy(data[i:], m.XXX_unrecognized)
+	}
+	return i, nil
+}
+
+func (m *HardState) Marshal() (data []byte, err error) {
+	size := m.Size()
+	data = make([]byte, size)
+	n, err := m.MarshalTo(data)
+	if err != nil {
+		return nil, err
+	}
+	return data[:n], nil
+}
+
+func (m *HardState) MarshalTo(data []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	data[i] = 0x8
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Term))
+	data[i] = 0x10
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Vote))
+	data[i] = 0x18
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Commit))
+	if m.XXX_unrecognized != nil {
+		i += copy(data[i:], m.XXX_unrecognized)
+	}
+	return i, nil
+}
+
+func (m *ConfState) Marshal() (data []byte, err error) {
+	size := m.Size()
+	data = make([]byte, size)
+	n, err := m.MarshalTo(data)
+	if err != nil {
+		return nil, err
+	}
+	return data[:n], nil
+}
+
+func (m *ConfState) MarshalTo(data []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Nodes) > 0 {
+		for _, num := range m.Nodes {
+			data[i] = 0x8
+			i++
+			i = encodeVarintRaft(data, i, uint64(num))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		i += copy(data[i:], m.XXX_unrecognized)
+	}
+	return i, nil
+}
+
+func (m *ConfChange) Marshal() (data []byte, err error) {
+	size := m.Size()
+	data = make([]byte, size)
+	n, err := m.MarshalTo(data)
+	if err != nil {
+		return nil, err
+	}
+	return data[:n], nil
+}
+
+func (m *ConfChange) MarshalTo(data []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	data[i] = 0x8
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.ID))
+	data[i] = 0x10
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.Type))
+	data[i] = 0x18
+	i++
+	i = encodeVarintRaft(data, i, uint64(m.NodeID))
+	if m.Context != nil {
+		data[i] = 0x22
+		i++
+		i = encodeVarintRaft(data, i, uint64(len(m.Context)))
+		i += copy(data[i:], m.Context)
+	}
+	if m.XXX_unrecognized != nil {
+		i += copy(data[i:], m.XXX_unrecognized)
+	}
+	return i, nil
+}
+
+func encodeFixed64Raft(data []byte, offset int, v uint64) int {
+	data[offset] = uint8(v)
+	data[offset+1] = uint8(v >> 8)
+	data[offset+2] = uint8(v >> 16)
+	data[offset+3] = uint8(v >> 24)
+	data[offset+4] = uint8(v >> 32)
+	data[offset+5] = uint8(v >> 40)
+	data[offset+6] = uint8(v >> 48)
+	data[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Raft(data []byte, offset int, v uint32) int {
+	data[offset] = uint8(v)
+	data[offset+1] = uint8(v >> 8)
+	data[offset+2] = uint8(v >> 16)
+	data[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintRaft(data []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		data[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	data[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Entry) Size() (n int) {
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.Term))
+	n += 1 + sovRaft(uint64(m.Index))
+	if m.Data != nil {
+		l = len(m.Data)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *SnapshotMetadata) Size() (n int) {
+	var l int
+	_ = l
+	l = m.ConfState.Size()
+	n += 1 + l + sovRaft(uint64(l))
+	n += 1 + sovRaft(uint64(m.Index))
+	n += 1 + sovRaft(uint64(m.Term))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Snapshot) Size() (n int) {
+	var l int
+	_ = l
+	if m.Data != nil {
+		l = len(m.Data)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	l = m.Metadata.Size()
+	n += 1 + l + sovRaft(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Message) Size() (n int) {
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.To))
+	n += 1 + sovRaft(uint64(m.From))
+	n += 1 + sovRaft(uint64(m.Term))
+	n += 1 + sovRaft(uint64(m.LogTerm))
+	n += 1 + sovRaft(uint64(m.Index))
+	if len(m.Entries) > 0 {
+		for _, e := range m.Entries {
+			l = e.Size()
+			n += 1 + l + sovRaft(uint64(l))
+		}
+	}
+	n += 1 + sovRaft(uint64(m.Commit))
+	l = m.Snapshot.Size()
+	n += 1 + l + sovRaft(uint64(l))
+	n += 2
+	n += 1 + sovRaft(uint64(m.RejectHint))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HardState) Size() (n int) {
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Term))
+	n += 1 + sovRaft(uint64(m.Vote))
+	n += 1 + sovRaft(uint64(m.Commit))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *ConfState) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Nodes) > 0 {
+		for _, e := range m.Nodes {
+			n += 1 + sovRaft(uint64(e))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *ConfChange) Size() (n int) {
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.ID))
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.NodeID))
+	if m.Context != nil {
+		l = len(m.Context)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovRaft(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozRaft(x uint64) (n int) {
+	return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Entry) Unmarshal(data []byte) error {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Entry: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Type |= (EntryType(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Term |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Index |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				byteLen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(data[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SnapshotMetadata) Unmarshal(data []byte) error {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ConfState.Unmarshal(data[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Index |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Term |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(data[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Snapshot) Unmarshal(data []byte) error {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				byteLen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Metadata.Unmarshal(data[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(data[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Message) Unmarshal(data []byte) error {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Message: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Type |= (MessageType(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+			}
+			m.To = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.To |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+			}
+			m.From = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.From |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Term |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType)
+			}
+			m.LogTerm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.LogTerm |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Index |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Entries = append(m.Entries, Entry{})
+			if err := m.Entries[len(m.Entries)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+			}
+			m.Commit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Commit |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Snapshot.Unmarshal(data[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Reject = bool(v != 0)
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType)
+			}
+			m.RejectHint = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.RejectHint |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(data[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HardState) Unmarshal(data []byte) error {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HardState: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Term |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
+			}
+			m.Vote = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Vote |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+			}
+			m.Commit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Commit |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(data[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfState) Unmarshal(data []byte) error {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfState: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
+			}
+			var v uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				v |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Nodes = append(m.Nodes, v)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(data[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfChange) Unmarshal(data []byte) error {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfChange: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.ID |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Type |= (ConfChangeType(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+			}
+			m.NodeID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.NodeID |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				byteLen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Context = append(m.Context[:0], data[iNdEx:postIndex]...)
+			if m.Context == nil {
+				m.Context = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(data[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRaft(data []byte) (n int, err error) {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if data[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthRaft
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := data[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipRaft(data[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRaft   = fmt.Errorf("proto: integer overflow")
+)

+ 86 - 0
vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto

@@ -0,0 +1,86 @@
+syntax = "proto2";
+package raftpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+enum EntryType {
+	EntryNormal     = 0;
+	EntryConfChange = 1;
+}
+
+message Entry {
+	optional EntryType  Type  = 1 [(gogoproto.nullable) = false];
+	optional uint64     Term  = 2 [(gogoproto.nullable) = false];
+	optional uint64     Index = 3 [(gogoproto.nullable) = false];
+	optional bytes      Data  = 4;
+}
+
+message SnapshotMetadata {
+	optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
+	optional uint64    index      = 2 [(gogoproto.nullable) = false];
+	optional uint64    term       = 3 [(gogoproto.nullable) = false];
+}
+
+message Snapshot {
+	optional bytes            data     = 1;
+	optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
+}
+
+enum MessageType {
+	MsgHup             = 0;
+	MsgBeat            = 1;
+	MsgProp            = 2;
+	MsgApp             = 3;
+	MsgAppResp         = 4;
+	MsgVote            = 5;
+	MsgVoteResp        = 6;
+	MsgSnap            = 7;
+	MsgHeartbeat       = 8;
+	MsgHeartbeatResp   = 9;
+	MsgUnreachable     = 10;
+	MsgSnapStatus      = 11;
+	MsgCheckQuorum     = 12;
+}
+
+message Message {
+	optional MessageType type        = 1  [(gogoproto.nullable) = false];
+	optional uint64      to          = 2  [(gogoproto.nullable) = false];
+	optional uint64      from        = 3  [(gogoproto.nullable) = false];
+	optional uint64      term        = 4  [(gogoproto.nullable) = false];
+	optional uint64      logTerm     = 5  [(gogoproto.nullable) = false];
+	optional uint64      index       = 6  [(gogoproto.nullable) = false];
+	repeated Entry       entries     = 7  [(gogoproto.nullable) = false];
+	optional uint64      commit      = 8  [(gogoproto.nullable) = false];
+	optional Snapshot    snapshot    = 9  [(gogoproto.nullable) = false];
+	optional bool        reject      = 10 [(gogoproto.nullable) = false];
+	optional uint64      rejectHint  = 11 [(gogoproto.nullable) = false];
+}
+
+message HardState {
+	optional uint64 term   = 1 [(gogoproto.nullable) = false];
+	optional uint64 vote   = 2 [(gogoproto.nullable) = false];
+	optional uint64 commit = 3 [(gogoproto.nullable) = false];
+}
+
+message ConfState {
+	repeated uint64 nodes = 1;
+}
+
+enum ConfChangeType {
+	ConfChangeAddNode    = 0;
+	ConfChangeRemoveNode = 1;
+	ConfChangeUpdateNode = 2;
+}
+
+message ConfChange {
+	optional uint64          ID      = 1 [(gogoproto.nullable) = false];
+	optional ConfChangeType  Type    = 2 [(gogoproto.nullable) = false];
+	optional uint64          NodeID  = 3 [(gogoproto.nullable) = false];
+	optional bytes           Context = 4;
+}

+ 228 - 0
vendor/src/github.com/coreos/etcd/raft/rawnode.go

@@ -0,0 +1,228 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// ErrStepLocalMsg is returned when try to step a local raft message
+var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
+
+// ErrStepPeerNotFound is returned when try to step a response message
+// but there is no peer found in raft.prs for that node.
+var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
+
+// RawNode is a thread-unsafe Node.
+// The methods of this struct correspond to the methods of Node and are described
+// more fully there.
+type RawNode struct {
+	raft       *raft
+	prevSoftSt *SoftState
+	prevHardSt pb.HardState
+}
+
+func (rn *RawNode) newReady() Ready {
+	return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
+}
+
+func (rn *RawNode) commitReady(rd Ready) {
+	if rd.SoftState != nil {
+		rn.prevSoftSt = rd.SoftState
+	}
+	if !IsEmptyHardState(rd.HardState) {
+		rn.prevHardSt = rd.HardState
+	}
+	if rn.prevHardSt.Commit != 0 {
+		// In most cases, prevHardSt and rd.HardState will be the same
+		// because when there are new entries to apply we just sent a
+		// HardState with an updated Commit value. However, on initial
+		// startup the two are different because we don't send a HardState
+		// until something changes, but we do send any un-applied but
+		// committed entries (and previously-committed entries may be
+		// incorporated into the snapshot, even if rd.CommittedEntries is
+		// empty). Therefore we mark all committed entries as applied
+		// whether they were included in rd.HardState or not.
+		rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit)
+	}
+	if len(rd.Entries) > 0 {
+		e := rd.Entries[len(rd.Entries)-1]
+		rn.raft.raftLog.stableTo(e.Index, e.Term)
+	}
+	if !IsEmptySnap(rd.Snapshot) {
+		rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
+	}
+}
+
+// NewRawNode returns a new RawNode given configuration and a list of raft peers.
+func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
+	if config.ID == 0 {
+		panic("config.ID must not be zero")
+	}
+	r := newRaft(config)
+	rn := &RawNode{
+		raft: r,
+	}
+	lastIndex, err := config.Storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	// If the log is empty, this is a new RawNode (like StartNode); otherwise it's
+	// restoring an existing RawNode (like RestartNode).
+	// TODO(bdarnell): rethink RawNode initialization and whether the application needs
+	// to be able to tell us when it expects the RawNode to exist.
+	if lastIndex == 0 {
+		r.becomeFollower(1, None)
+		ents := make([]pb.Entry, len(peers))
+		for i, peer := range peers {
+			cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+			data, err := cc.Marshal()
+			if err != nil {
+				panic("unexpected marshal error")
+			}
+
+			ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
+		}
+		r.raftLog.append(ents...)
+		r.raftLog.committed = uint64(len(ents))
+		for _, peer := range peers {
+			r.addNode(peer.ID)
+		}
+	}
+	// Set the initial hard and soft states after performing all initialization.
+	rn.prevSoftSt = r.softState()
+	rn.prevHardSt = r.hardState()
+
+	return rn, nil
+}
+
+// Tick advances the internal logical clock by a single tick.
+func (rn *RawNode) Tick() {
+	rn.raft.tick()
+}
+
+// Campaign causes this RawNode to transition to candidate state.
+func (rn *RawNode) Campaign() error {
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgHup,
+	})
+}
+
+// Propose proposes data be appended to the raft log.
+func (rn *RawNode) Propose(data []byte) error {
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgProp,
+		From: rn.raft.id,
+		Entries: []pb.Entry{
+			{Data: data},
+		}})
+}
+
+// ProposeConfChange proposes a config change.
+func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error {
+	data, err := cc.Marshal()
+	if err != nil {
+		return err
+	}
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgProp,
+		Entries: []pb.Entry{
+			{Type: pb.EntryConfChange, Data: data},
+		},
+	})
+}
+
+// ApplyConfChange applies a config change to the local node.
+func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
+	if cc.NodeID == None {
+		rn.raft.resetPendingConf()
+		return &pb.ConfState{Nodes: rn.raft.nodes()}
+	}
+	switch cc.Type {
+	case pb.ConfChangeAddNode:
+		rn.raft.addNode(cc.NodeID)
+	case pb.ConfChangeRemoveNode:
+		rn.raft.removeNode(cc.NodeID)
+	case pb.ConfChangeUpdateNode:
+		rn.raft.resetPendingConf()
+	default:
+		panic("unexpected conf type")
+	}
+	return &pb.ConfState{Nodes: rn.raft.nodes()}
+}
+
+// Step advances the state machine using the given message.
+func (rn *RawNode) Step(m pb.Message) error {
+	// ignore unexpected local messages receiving over network
+	if IsLocalMsg(m) {
+		return ErrStepLocalMsg
+	}
+	if _, ok := rn.raft.prs[m.From]; ok || !IsResponseMsg(m) {
+		return rn.raft.Step(m)
+	}
+	return ErrStepPeerNotFound
+}
+
+// Ready returns the current point-in-time state of this RawNode.
+func (rn *RawNode) Ready() Ready {
+	rd := rn.newReady()
+	rn.raft.msgs = nil
+	return rd
+}
+
+// HasReady called when RawNode user need to check if any Ready pending.
+// Checking logic in this method should be consistent with Ready.containsUpdates().
+func (rn *RawNode) HasReady() bool {
+	r := rn.raft
+	if !r.softState().equal(rn.prevSoftSt) {
+		return true
+	}
+	if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
+		return true
+	}
+	if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
+		return true
+	}
+	if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
+		return true
+	}
+	return false
+}
+
+// Advance notifies the RawNode that the application has applied and saved progress in the
+// last Ready results.
+func (rn *RawNode) Advance(rd Ready) {
+	rn.commitReady(rd)
+}
+
+// Status returns the current status of the given group.
+func (rn *RawNode) Status() *Status {
+	status := getStatus(rn.raft)
+	return &status
+}
+
+// ReportUnreachable reports the given node is not reachable for the last send.
+func (rn *RawNode) ReportUnreachable(id uint64) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
+}
+
+// ReportSnapshot reports the status of the sent snapshot.
+func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
+	rej := status == SnapshotFailure
+
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
+}

+ 76 - 0
vendor/src/github.com/coreos/etcd/raft/status.go

@@ -0,0 +1,76 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type Status struct {
+	ID uint64
+
+	pb.HardState
+	SoftState
+
+	Applied  uint64
+	Progress map[uint64]Progress
+}
+
+// getStatus gets a copy of the current raft status.
+func getStatus(r *raft) Status {
+	s := Status{ID: r.id}
+	s.HardState = r.hardState()
+	s.SoftState = *r.softState()
+
+	s.Applied = r.raftLog.applied
+
+	if s.RaftState == StateLeader {
+		s.Progress = make(map[uint64]Progress)
+		for id, p := range r.prs {
+			s.Progress[id] = *p
+		}
+	}
+
+	return s
+}
+
+// MarshalJSON translates the raft status into JSON.
+// TODO: try to simplify this by introducing ID type into raft
+func (s Status) MarshalJSON() ([]byte, error) {
+	j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"progress":{`,
+		s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState)
+
+	if len(s.Progress) == 0 {
+		j += "}}"
+	} else {
+		for k, v := range s.Progress {
+			subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
+			j += subj
+		}
+		// remove the trailing ","
+		j = j[:len(j)-1] + "}}"
+	}
+	return []byte(j), nil
+}
+
+func (s Status) String() string {
+	b, err := s.MarshalJSON()
+	if err != nil {
+		raftLogger.Panicf("unexpected error: %v", err)
+	}
+	return string(b)
+}

+ 252 - 0
vendor/src/github.com/coreos/etcd/raft/storage.go

@@ -0,0 +1,252 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+	"sync"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// ErrCompacted is returned by Storage.Entries/Compact when a requested
+// index is unavailable because it predates the last snapshot.
+var ErrCompacted = errors.New("requested index is unavailable due to compaction")
+
+// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
+// index is older than the existing snapshot.
+var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
+
+var ErrUnavailable = errors.New("requested entry at index is unavailable")
+
+// Storage is an interface that may be implemented by the application
+// to retrieve log entries from storage.
+//
+// If any Storage method returns an error, the raft instance will
+// become inoperable and refuse to participate in elections; the
+// application is responsible for cleanup and recovery in this case.
+type Storage interface {
+	// InitialState returns the saved HardState and ConfState information.
+	InitialState() (pb.HardState, pb.ConfState, error)
+	// Entries returns a slice of log entries in the range [lo,hi).
+	// MaxSize limits the total size of the log entries returned, but
+	// Entries returns at least one entry if any.
+	Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
+	// Term returns the term of entry i, which must be in the range
+	// [FirstIndex()-1, LastIndex()]. The term of the entry before
+	// FirstIndex is retained for matching purposes even though the
+	// rest of that entry may not be available.
+	Term(i uint64) (uint64, error)
+	// LastIndex returns the index of the last entry in the log.
+	LastIndex() (uint64, error)
+	// FirstIndex returns the index of the first log entry that is
+	// possibly available via Entries (older entries have been incorporated
+	// into the latest Snapshot; if storage only contains the dummy entry the
+	// first log entry is not available).
+	FirstIndex() (uint64, error)
+	// Snapshot returns the most recent snapshot.
+	// If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
+	// so raft state machine could know that Storage needs some time to prepare
+	// snapshot and call Snapshot later.
+	Snapshot() (pb.Snapshot, error)
+}
+
+// MemoryStorage implements the Storage interface backed by an
+// in-memory array.
+type MemoryStorage struct {
+	// Protects access to all fields. Most methods of MemoryStorage are
+	// run on the raft goroutine, but Append() is run on an application
+	// goroutine.
+	sync.Mutex
+
+	hardState pb.HardState
+	snapshot  pb.Snapshot
+	// ents[i] has raft log position i+snapshot.Metadata.Index
+	ents []pb.Entry
+}
+
+// NewMemoryStorage creates an empty MemoryStorage.
+func NewMemoryStorage() *MemoryStorage {
+	return &MemoryStorage{
+		// When starting from scratch populate the list with a dummy entry at term zero.
+		ents: make([]pb.Entry, 1),
+	}
+}
+
+// InitialState implements the Storage interface.
+func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
+	return ms.hardState, ms.snapshot.Metadata.ConfState, nil
+}
+
+// SetHardState saves the current HardState.
+func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
+	ms.hardState = st
+	return nil
+}
+
+// Entries implements the Storage interface.
+func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if lo <= offset {
+		return nil, ErrCompacted
+	}
+	if hi > ms.lastIndex()+1 {
+		raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
+	}
+	// only contains dummy entries.
+	if len(ms.ents) == 1 {
+		return nil, ErrUnavailable
+	}
+
+	ents := ms.ents[lo-offset : hi-offset]
+	return limitSize(ents, maxSize), nil
+}
+
+// Term implements the Storage interface.
+func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if i < offset {
+		return 0, ErrCompacted
+	}
+	return ms.ents[i-offset].Term, nil
+}
+
+// LastIndex implements the Storage interface.
+func (ms *MemoryStorage) LastIndex() (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	return ms.lastIndex(), nil
+}
+
+func (ms *MemoryStorage) lastIndex() uint64 {
+	return ms.ents[0].Index + uint64(len(ms.ents)) - 1
+}
+
+// FirstIndex implements the Storage interface.
+func (ms *MemoryStorage) FirstIndex() (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	return ms.firstIndex(), nil
+}
+
+func (ms *MemoryStorage) firstIndex() uint64 {
+	return ms.ents[0].Index + 1
+}
+
+// Snapshot implements the Storage interface.
+func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	return ms.snapshot, nil
+}
+
+// ApplySnapshot overwrites the contents of this Storage object with
+// those of the given snapshot.
+func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
+	ms.Lock()
+	defer ms.Unlock()
+
+	// TODO: return ErrSnapOutOfDate?
+	ms.snapshot = snap
+	ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
+	return nil
+}
+
+// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
+// can be used to reconstruct the state at that point.
+// If any configuration changes have been made since the last compaction,
+// the result of the last ApplyConfChange must be passed in.
+func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	if i <= ms.snapshot.Metadata.Index {
+		return pb.Snapshot{}, ErrSnapOutOfDate
+	}
+
+	offset := ms.ents[0].Index
+	if i > ms.lastIndex() {
+		raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
+	}
+
+	ms.snapshot.Metadata.Index = i
+	ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
+	if cs != nil {
+		ms.snapshot.Metadata.ConfState = *cs
+	}
+	ms.snapshot.Data = data
+	return ms.snapshot, nil
+}
+
+// Compact discards all log entries prior to compactIndex.
+// It is the application's responsibility to not attempt to compact an index
+// greater than raftLog.applied.
+func (ms *MemoryStorage) Compact(compactIndex uint64) error {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if compactIndex <= offset {
+		return ErrCompacted
+	}
+	if compactIndex > ms.lastIndex() {
+		raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
+	}
+
+	i := compactIndex - offset
+	ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i)
+	ents[0].Index = ms.ents[i].Index
+	ents[0].Term = ms.ents[i].Term
+	ents = append(ents, ms.ents[i+1:]...)
+	ms.ents = ents
+	return nil
+}
+
+// Append the new entries to storage.
+// TODO (xiangli): ensure the entries are continuous and
+// entries[0].Index > ms.entries[0].Index
+func (ms *MemoryStorage) Append(entries []pb.Entry) error {
+	ms.Lock()
+	defer ms.Unlock()
+	if len(entries) == 0 {
+		return nil
+	}
+	first := ms.ents[0].Index + 1
+	last := entries[0].Index + uint64(len(entries)) - 1
+
+	// shortcut if there is no new entry.
+	if last < first {
+		return nil
+	}
+	// truncate compacted entries
+	if first > entries[0].Index {
+		entries = entries[first-entries[0].Index:]
+	}
+
+	offset := entries[0].Index - ms.ents[0].Index
+	switch {
+	case uint64(len(ms.ents)) > offset:
+		ms.ents = append([]pb.Entry{}, ms.ents[:offset]...)
+		ms.ents = append(ms.ents, entries...)
+	case uint64(len(ms.ents)) == offset:
+		ms.ents = append(ms.ents, entries...)
+	default:
+		raftLogger.Panicf("missing log entry [last: %d, append at: %d]",
+			ms.lastIndex(), entries[0].Index)
+	}
+	return nil
+}

+ 116 - 0
vendor/src/github.com/coreos/etcd/raft/util.go

@@ -0,0 +1,116 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"bytes"
+	"fmt"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+func (st StateType) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf("%q", st.String())), nil
+}
+
+// uint64Slice implements sort interface
+type uint64Slice []uint64
+
+func (p uint64Slice) Len() int           { return len(p) }
+func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func min(a, b uint64) uint64 {
+	if a > b {
+		return b
+	}
+	return a
+}
+
+func max(a, b uint64) uint64 {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func IsLocalMsg(m pb.Message) bool {
+	return m.Type == pb.MsgHup || m.Type == pb.MsgBeat || m.Type == pb.MsgUnreachable || m.Type == pb.MsgSnapStatus || m.Type == pb.MsgCheckQuorum
+}
+
+func IsResponseMsg(m pb.Message) bool {
+	return m.Type == pb.MsgAppResp || m.Type == pb.MsgVoteResp || m.Type == pb.MsgHeartbeatResp || m.Type == pb.MsgUnreachable
+}
+
+// EntryFormatter can be implemented by the application to provide human-readable formatting
+// of entry data. Nil is a valid EntryFormatter and will use a default format.
+type EntryFormatter func([]byte) string
+
+// DescribeMessage returns a concise human-readable description of a
+// Message for debugging.
+func DescribeMessage(m pb.Message, f EntryFormatter) string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
+	if m.Reject {
+		fmt.Fprintf(&buf, " Rejected")
+		if m.RejectHint != 0 {
+			fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint)
+		}
+	}
+	if m.Commit != 0 {
+		fmt.Fprintf(&buf, " Commit:%d", m.Commit)
+	}
+	if len(m.Entries) > 0 {
+		fmt.Fprintf(&buf, " Entries:[")
+		for i, e := range m.Entries {
+			if i != 0 {
+				buf.WriteString(", ")
+			}
+			buf.WriteString(DescribeEntry(e, f))
+		}
+		fmt.Fprintf(&buf, "]")
+	}
+	if !IsEmptySnap(m.Snapshot) {
+		fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot)
+	}
+	return buf.String()
+}
+
+// DescribeEntry returns a concise human-readable description of an
+// Entry for debugging.
+func DescribeEntry(e pb.Entry, f EntryFormatter) string {
+	var formatted string
+	if e.Type == pb.EntryNormal && f != nil {
+		formatted = f(e.Data)
+	} else {
+		formatted = fmt.Sprintf("%q", e.Data)
+	}
+	return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted)
+}
+
+func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
+	if len(ents) == 0 {
+		return ents
+	}
+	size := ents[0].Size()
+	var limit int
+	for limit = 1; limit < len(ents); limit++ {
+		size += ents[limit].Size()
+		if uint64(size) > maxSize {
+			break
+		}
+	}
+	return ents[:limit]
+}

+ 74 - 0
vendor/src/github.com/coreos/etcd/snap/db.go

@@ -0,0 +1,74 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+)
+
+// SaveDBFrom saves snapshot of the database from the given reader. It
+// guarantees the save operation is atomic.
+func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) error {
+	f, err := ioutil.TempFile(s.dir, "tmp")
+	if err != nil {
+		return err
+	}
+	var n int64
+	n, err = io.Copy(f, r)
+	if err == nil {
+		err = f.Sync()
+	}
+	f.Close()
+	if err != nil {
+		os.Remove(f.Name())
+		return err
+	}
+	fn := path.Join(s.dir, fmt.Sprintf("%016x.snap.db", id))
+	if fileutil.Exist(fn) {
+		os.Remove(f.Name())
+		return nil
+	}
+	err = os.Rename(f.Name(), fn)
+	if err != nil {
+		os.Remove(f.Name())
+		return err
+	}
+
+	plog.Infof("saved database snapshot to disk [total bytes: %d]", n)
+
+	return nil
+}
+
+// DBFilePath returns the file path for the snapshot of the database with
+// given id. If the snapshot does not exist, it returns error.
+func (s *Snapshotter) DBFilePath(id uint64) (string, error) {
+	fns, err := fileutil.ReadDir(s.dir)
+	if err != nil {
+		return "", err
+	}
+	wfn := fmt.Sprintf("%016x.snap.db", id)
+	for _, fn := range fns {
+		if fn == wfn {
+			return path.Join(s.dir, fn), nil
+		}
+	}
+	return "", fmt.Errorf("snap: snapshot file doesn't exist")
+}

+ 59 - 0
vendor/src/github.com/coreos/etcd/snap/message.go

@@ -0,0 +1,59 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+	"io"
+
+	"github.com/coreos/etcd/raft/raftpb"
+)
+
+// Message is a struct that contains a raft Message and a ReadCloser. The type
+// of raft message MUST be MsgSnap, which contains the raft meta-data and an
+// additional data []byte field that contains the snapshot of the actual state
+// machine.
+// Message contains the ReadCloser field for handling large snapshot. This avoid
+// copying the entire snapshot into a byte array, which consumes a lot of memory.
+//
+// User of Message should close the Message after sending it.
+type Message struct {
+	raftpb.Message
+	ReadCloser io.ReadCloser
+	closeC     chan bool
+}
+
+func NewMessage(rs raftpb.Message, rc io.ReadCloser) *Message {
+	return &Message{
+		Message:    rs,
+		ReadCloser: rc,
+		closeC:     make(chan bool, 1),
+	}
+}
+
+// CloseNotify returns a channel that receives a single value
+// when the message sent is finished. true indicates the sent
+// is successful.
+func (m Message) CloseNotify() <-chan bool {
+	return m.closeC
+}
+
+func (m Message) CloseWithError(err error) {
+	m.ReadCloser.Close()
+	if err == nil {
+		m.closeC <- true
+	} else {
+		m.closeC <- false
+	}
+}

+ 41 - 0
vendor/src/github.com/coreos/etcd/snap/metrics.go

@@ -0,0 +1,41 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+	// TODO: save_fsync latency?
+	saveDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "snapshot",
+		Name:      "save_total_durations_seconds",
+		Help:      "The total latency distributions of save called by snapshot.",
+		Buckets:   prometheus.ExponentialBuckets(0.001, 2, 14),
+	})
+
+	marshallingDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "snapshot",
+		Name:      "save_marshalling_durations_seconds",
+		Help:      "The marshalling cost distributions of save called by snapshot.",
+		Buckets:   prometheus.ExponentialBuckets(0.001, 2, 14),
+	})
+)
+
+func init() {
+	prometheus.MustRegister(saveDurations)
+	prometheus.MustRegister(marshallingDurations)
+}

+ 332 - 0
vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go

@@ -0,0 +1,332 @@
+// Code generated by protoc-gen-gogo.
+// source: snap.proto
+// DO NOT EDIT!
+
+/*
+	Package snappb is a generated protocol buffer package.
+
+	It is generated from these files:
+		snap.proto
+
+	It has these top-level messages:
+		Snapshot
+*/
+package snappb
+
+import (
+	"fmt"
+
+	proto "github.com/gogo/protobuf/proto"
+)
+
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Snapshot struct {
+	Crc              uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
+	Data             []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset()         { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()    {}
+
+func init() {
+	proto.RegisterType((*Snapshot)(nil), "snappb.snapshot")
+}
+func (m *Snapshot) Marshal() (data []byte, err error) {
+	size := m.Size()
+	data = make([]byte, size)
+	n, err := m.MarshalTo(data)
+	if err != nil {
+		return nil, err
+	}
+	return data[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(data []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	data[i] = 0x8
+	i++
+	i = encodeVarintSnap(data, i, uint64(m.Crc))
+	if m.Data != nil {
+		data[i] = 0x12
+		i++
+		i = encodeVarintSnap(data, i, uint64(len(m.Data)))
+		i += copy(data[i:], m.Data)
+	}
+	if m.XXX_unrecognized != nil {
+		i += copy(data[i:], m.XXX_unrecognized)
+	}
+	return i, nil
+}
+
+func encodeFixed64Snap(data []byte, offset int, v uint64) int {
+	data[offset] = uint8(v)
+	data[offset+1] = uint8(v >> 8)
+	data[offset+2] = uint8(v >> 16)
+	data[offset+3] = uint8(v >> 24)
+	data[offset+4] = uint8(v >> 32)
+	data[offset+5] = uint8(v >> 40)
+	data[offset+6] = uint8(v >> 48)
+	data[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Snap(data []byte, offset int, v uint32) int {
+	data[offset] = uint8(v)
+	data[offset+1] = uint8(v >> 8)
+	data[offset+2] = uint8(v >> 16)
+	data[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintSnap(data []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		data[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	data[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Snapshot) Size() (n int) {
+	var l int
+	_ = l
+	n += 1 + sovSnap(uint64(m.Crc))
+	if m.Data != nil {
+		l = len(m.Data)
+		n += 1 + l + sovSnap(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovSnap(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozSnap(x uint64) (n int) {
+	return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Snapshot) Unmarshal(data []byte) error {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnap
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: snapshot: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType)
+			}
+			m.Crc = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnap
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Crc |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnap
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				byteLen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthSnap
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnap(data[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnap
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipSnap(data []byte) (n int, err error) {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowSnap
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowSnap
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if data[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowSnap
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthSnap
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowSnap
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := data[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipSnap(data[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowSnap   = fmt.Errorf("proto: integer overflow")
+)

+ 14 - 0
vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto

@@ -0,0 +1,14 @@
+syntax = "proto2";
+package snappb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message snapshot {
+	optional uint32 crc  = 1 [(gogoproto.nullable) = false];
+	optional bytes data  = 2;
+}

+ 189 - 0
vendor/src/github.com/coreos/etcd/snap/snapshotter.go

@@ -0,0 +1,189 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package snap stores raft nodes' states with snapshots.
+package snap
+
+import (
+	"errors"
+	"fmt"
+	"hash/crc32"
+	"io/ioutil"
+	"os"
+	"path"
+	"sort"
+	"strings"
+	"time"
+
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/raft"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/snap/snappb"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+const (
+	snapSuffix = ".snap"
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "snap")
+
+	ErrNoSnapshot    = errors.New("snap: no available snapshot")
+	ErrEmptySnapshot = errors.New("snap: empty snapshot")
+	ErrCRCMismatch   = errors.New("snap: crc mismatch")
+	crcTable         = crc32.MakeTable(crc32.Castagnoli)
+)
+
+type Snapshotter struct {
+	dir string
+}
+
+func New(dir string) *Snapshotter {
+	return &Snapshotter{
+		dir: dir,
+	}
+}
+
+func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error {
+	if raft.IsEmptySnap(snapshot) {
+		return nil
+	}
+	return s.save(&snapshot)
+}
+
+func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error {
+	start := time.Now()
+
+	fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix)
+	b := pbutil.MustMarshal(snapshot)
+	crc := crc32.Update(0, crcTable, b)
+	snap := snappb.Snapshot{Crc: crc, Data: b}
+	d, err := snap.Marshal()
+	if err != nil {
+		return err
+	} else {
+		marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second))
+	}
+
+	err = ioutil.WriteFile(path.Join(s.dir, fname), d, 0666)
+	if err == nil {
+		saveDurations.Observe(float64(time.Since(start)) / float64(time.Second))
+	}
+	return err
+}
+
+func (s *Snapshotter) Load() (*raftpb.Snapshot, error) {
+	names, err := s.snapNames()
+	if err != nil {
+		return nil, err
+	}
+	var snap *raftpb.Snapshot
+	for _, name := range names {
+		if snap, err = loadSnap(s.dir, name); err == nil {
+			break
+		}
+	}
+	if err != nil {
+		return nil, ErrNoSnapshot
+	}
+	return snap, nil
+}
+
+func loadSnap(dir, name string) (*raftpb.Snapshot, error) {
+	fpath := path.Join(dir, name)
+	snap, err := Read(fpath)
+	if err != nil {
+		renameBroken(fpath)
+	}
+	return snap, err
+}
+
+// Read reads the snapshot named by snapname and returns the snapshot.
+func Read(snapname string) (*raftpb.Snapshot, error) {
+	b, err := ioutil.ReadFile(snapname)
+	if err != nil {
+		plog.Errorf("cannot read file %v: %v", snapname, err)
+		return nil, err
+	}
+
+	if len(b) == 0 {
+		plog.Errorf("unexpected empty snapshot")
+		return nil, ErrEmptySnapshot
+	}
+
+	var serializedSnap snappb.Snapshot
+	if err = serializedSnap.Unmarshal(b); err != nil {
+		plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
+		return nil, err
+	}
+
+	if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 {
+		plog.Errorf("unexpected empty snapshot")
+		return nil, ErrEmptySnapshot
+	}
+
+	crc := crc32.Update(0, crcTable, serializedSnap.Data)
+	if crc != serializedSnap.Crc {
+		plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname)
+		return nil, ErrCRCMismatch
+	}
+
+	var snap raftpb.Snapshot
+	if err = snap.Unmarshal(serializedSnap.Data); err != nil {
+		plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
+		return nil, err
+	}
+	return &snap, nil
+}
+
+// snapNames returns the filename of the snapshots in logical time order (from newest to oldest).
+// If there is no available snapshots, an ErrNoSnapshot will be returned.
+func (s *Snapshotter) snapNames() ([]string, error) {
+	dir, err := os.Open(s.dir)
+	if err != nil {
+		return nil, err
+	}
+	defer dir.Close()
+	names, err := dir.Readdirnames(-1)
+	if err != nil {
+		return nil, err
+	}
+	snaps := checkSuffix(names)
+	if len(snaps) == 0 {
+		return nil, ErrNoSnapshot
+	}
+	sort.Sort(sort.Reverse(sort.StringSlice(snaps)))
+	return snaps, nil
+}
+
+func checkSuffix(names []string) []string {
+	snaps := []string{}
+	for i := range names {
+		if strings.HasSuffix(names[i], snapSuffix) {
+			snaps = append(snaps, names[i])
+		} else {
+			plog.Warningf("skipped unexpected non snapshot file %v", names[i])
+		}
+	}
+	return snaps
+}
+
+func renameBroken(path string) {
+	brokenPath := path + ".broken"
+	if err := os.Rename(path, brokenPath); err != nil {
+		plog.Warningf("cannot rename broken snapshot file %v to %v: %v", path, brokenPath, err)
+	}
+}

+ 103 - 0
vendor/src/github.com/coreos/etcd/wal/decoder.go

@@ -0,0 +1,103 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"bufio"
+	"encoding/binary"
+	"hash"
+	"io"
+	"sync"
+
+	"github.com/coreos/etcd/pkg/crc"
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/wal/walpb"
+)
+
+type decoder struct {
+	mu sync.Mutex
+	br *bufio.Reader
+
+	c   io.Closer
+	crc hash.Hash32
+}
+
+func newDecoder(rc io.ReadCloser) *decoder {
+	return &decoder{
+		br:  bufio.NewReader(rc),
+		c:   rc,
+		crc: crc.New(0, crcTable),
+	}
+}
+
+func (d *decoder) decode(rec *walpb.Record) error {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	rec.Reset()
+	l, err := readInt64(d.br)
+	if err != nil {
+		return err
+	}
+	data := make([]byte, l)
+	if _, err = io.ReadFull(d.br, data); err != nil {
+		// ReadFull returns io.EOF only if no bytes were read
+		// the decoder should treat this as an ErrUnexpectedEOF instead.
+		if err == io.EOF {
+			err = io.ErrUnexpectedEOF
+		}
+		return err
+	}
+	if err := rec.Unmarshal(data); err != nil {
+		return err
+	}
+	// skip crc checking if the record type is crcType
+	if rec.Type == crcType {
+		return nil
+	}
+	d.crc.Write(rec.Data)
+	return rec.Validate(d.crc.Sum32())
+}
+
+func (d *decoder) updateCRC(prevCrc uint32) {
+	d.crc = crc.New(prevCrc, crcTable)
+}
+
+func (d *decoder) lastCRC() uint32 {
+	return d.crc.Sum32()
+}
+
+func (d *decoder) close() error {
+	return d.c.Close()
+}
+
+func mustUnmarshalEntry(d []byte) raftpb.Entry {
+	var e raftpb.Entry
+	pbutil.MustUnmarshal(&e, d)
+	return e
+}
+
+func mustUnmarshalState(d []byte) raftpb.HardState {
+	var s raftpb.HardState
+	pbutil.MustUnmarshal(&s, d)
+	return s
+}
+
+func readInt64(r io.Reader) (int64, error) {
+	var n int64
+	err := binary.Read(r, binary.LittleEndian, &n)
+	return n, err
+}

+ 68 - 0
vendor/src/github.com/coreos/etcd/wal/doc.go

@@ -0,0 +1,68 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package wal provides an implementation of a write ahead log that is used by
+etcd.
+
+A WAL is created at a particular directory and is made up of a number of
+segmented WAL files. Inside of each file the raft state and entries are appended
+to it with the Save method:
+
+	metadata := []byte{}
+	w, err := wal.Create("/var/lib/etcd", metadata)
+	...
+	err := w.Save(s, ents)
+
+After saving an raft snapshot to disk, SaveSnapshot method should be called to
+record it. So WAL can match with the saved snapshot when restarting.
+
+	err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2})
+
+When a user has finished using a WAL it must be closed:
+
+	w.Close()
+
+WAL files are placed inside of the directory in the following format:
+$seq-$index.wal
+
+The first WAL file to be created will be 0000000000000000-0000000000000000.wal
+indicating an initial sequence of 0 and an initial raft index of 0. The first
+entry written to WAL MUST have raft index 0.
+
+WAL will cuts its current wal files if its size exceeds 8MB. This will increment an internal
+sequence number and cause a new file to be created. If the last raft index saved
+was 0x20 and this is the first time cut has been called on this WAL then the sequence will
+increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal.
+If a second cut issues 0x10 entries with incremental index later then the file will be called:
+0000000000000002-0000000000000031.wal.
+
+At a later time a WAL can be opened at a particular snapshot. If there is no
+snapshot, an empty snapshot should be passed in.
+
+	w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2})
+	...
+
+The snapshot must have been written to the WAL.
+
+Additional items cannot be Saved to this WAL until all of the items from the given
+snapshot to the end of the WAL are read first:
+
+	metadata, state, ents, err := w.ReadAll()
+
+This will give you the metadata, the last raft.State and the slice of
+raft.Entry items in the log.
+
+*/
+package wal

+ 89 - 0
vendor/src/github.com/coreos/etcd/wal/encoder.go

@@ -0,0 +1,89 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"bufio"
+	"encoding/binary"
+	"hash"
+	"io"
+	"sync"
+
+	"github.com/coreos/etcd/pkg/crc"
+	"github.com/coreos/etcd/wal/walpb"
+)
+
+type encoder struct {
+	mu sync.Mutex
+	bw *bufio.Writer
+
+	crc       hash.Hash32
+	buf       []byte
+	uint64buf []byte
+}
+
+func newEncoder(w io.Writer, prevCrc uint32) *encoder {
+	return &encoder{
+		bw:  bufio.NewWriter(w),
+		crc: crc.New(prevCrc, crcTable),
+		// 1MB buffer
+		buf:       make([]byte, 1024*1024),
+		uint64buf: make([]byte, 8),
+	}
+}
+
+func (e *encoder) encode(rec *walpb.Record) error {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+
+	e.crc.Write(rec.Data)
+	rec.Crc = e.crc.Sum32()
+	var (
+		data []byte
+		err  error
+		n    int
+	)
+
+	if rec.Size() > len(e.buf) {
+		data, err = rec.Marshal()
+		if err != nil {
+			return err
+		}
+	} else {
+		n, err = rec.MarshalTo(e.buf)
+		if err != nil {
+			return err
+		}
+		data = e.buf[:n]
+	}
+	if err = writeInt64(e.bw, int64(len(data)), e.uint64buf); err != nil {
+		return err
+	}
+	_, err = e.bw.Write(data)
+	return err
+}
+
+func (e *encoder) flush() error {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+	return e.bw.Flush()
+}
+
+func writeInt64(w io.Writer, n int64, buf []byte) error {
+	// http://golang.org/src/encoding/binary/binary.go
+	binary.LittleEndian.PutUint64(buf, uint64(n))
+	_, err := w.Write(buf)
+	return err
+}

+ 38 - 0
vendor/src/github.com/coreos/etcd/wal/metrics.go

@@ -0,0 +1,38 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+	syncDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
+		Namespace: "etcd",
+		Subsystem: "wal",
+		Name:      "fsync_durations_seconds",
+		Help:      "The latency distributions of fsync called by wal.",
+		Buckets:   prometheus.ExponentialBuckets(0.001, 2, 14),
+	})
+	lastIndexSaved = prometheus.NewGauge(prometheus.GaugeOpts{
+		Namespace: "etcd",
+		Subsystem: "wal",
+		Name:      "last_index_saved",
+		Help:      "The index of the last entry saved by wal.",
+	})
+)
+
+func init() {
+	prometheus.MustRegister(syncDurations)
+	prometheus.MustRegister(lastIndexSaved)
+}

+ 45 - 0
vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go

@@ -0,0 +1,45 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import "io"
+
+type multiReadCloser struct {
+	closers []io.Closer
+	reader  io.Reader
+}
+
+func (mc *multiReadCloser) Close() error {
+	var err error
+	for i := range mc.closers {
+		err = mc.closers[i].Close()
+	}
+	return err
+}
+
+func (mc *multiReadCloser) Read(p []byte) (int, error) {
+	return mc.reader.Read(p)
+}
+
+func MultiReadCloser(readClosers ...io.ReadCloser) io.ReadCloser {
+	cs := make([]io.Closer, len(readClosers))
+	rs := make([]io.Reader, len(readClosers))
+	for i := range readClosers {
+		cs[i] = readClosers[i]
+		rs[i] = readClosers[i]
+	}
+	r := io.MultiReader(rs...)
+	return &multiReadCloser{cs, r}
+}

+ 106 - 0
vendor/src/github.com/coreos/etcd/wal/repair.go

@@ -0,0 +1,106 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"io"
+	"os"
+	"path"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+	"github.com/coreos/etcd/wal/walpb"
+)
+
+// Repair tries to repair ErrUnexpectedEOF in the
+// last wal file by truncating.
+func Repair(dirpath string) bool {
+	f, err := openLast(dirpath)
+	if err != nil {
+		return false
+	}
+	defer f.Close()
+
+	n := 0
+	rec := &walpb.Record{}
+
+	decoder := newDecoder(f)
+	defer decoder.close()
+	for {
+		err := decoder.decode(rec)
+		switch err {
+		case nil:
+			n += 8 + rec.Size()
+			// update crc of the decoder when necessary
+			switch rec.Type {
+			case crcType:
+				crc := decoder.crc.Sum32()
+				// current crc of decoder must match the crc of the record.
+				// do no need to match 0 crc, since the decoder is a new one at this case.
+				if crc != 0 && rec.Validate(crc) != nil {
+					return false
+				}
+				decoder.updateCRC(rec.Crc)
+			}
+			continue
+		case io.EOF:
+			return true
+		case io.ErrUnexpectedEOF:
+			plog.Noticef("repairing %v", f.Name())
+			bf, bferr := os.Create(f.Name() + ".broken")
+			if bferr != nil {
+				plog.Errorf("could not repair %v, failed to create backup file", f.Name())
+				return false
+			}
+			defer bf.Close()
+
+			if _, err = f.Seek(0, os.SEEK_SET); err != nil {
+				plog.Errorf("could not repair %v, failed to read file", f.Name())
+				return false
+			}
+
+			if _, err = io.Copy(bf, f); err != nil {
+				plog.Errorf("could not repair %v, failed to copy file", f.Name())
+				return false
+			}
+
+			if err = f.Truncate(int64(n)); err != nil {
+				plog.Errorf("could not repair %v, failed to truncate file", f.Name())
+				return false
+			}
+			if err = f.Sync(); err != nil {
+				plog.Errorf("could not repair %v, failed to sync file", f.Name())
+				return false
+			}
+			return true
+		default:
+			plog.Errorf("could not repair error (%v)", err)
+			return false
+		}
+	}
+}
+
+// openLast opens the last wal file for read and write.
+func openLast(dirpath string) (*os.File, error) {
+	names, err := fileutil.ReadDir(dirpath)
+	if err != nil {
+		return nil, err
+	}
+	names = checkWalNames(names)
+	if len(names) == 0 {
+		return nil, ErrFileNotFound
+	}
+	last := path.Join(dirpath, names[len(names)-1])
+	return os.OpenFile(last, os.O_RDWR, 0)
+}

+ 93 - 0
vendor/src/github.com/coreos/etcd/wal/util.go

@@ -0,0 +1,93 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+)
+
+var (
+	badWalName = errors.New("bad wal name")
+)
+
+func Exist(dirpath string) bool {
+	names, err := fileutil.ReadDir(dirpath)
+	if err != nil {
+		return false
+	}
+	return len(names) != 0
+}
+
+// searchIndex returns the last array index of names whose raft index section is
+// equal to or smaller than the given index.
+// The given names MUST be sorted.
+func searchIndex(names []string, index uint64) (int, bool) {
+	for i := len(names) - 1; i >= 0; i-- {
+		name := names[i]
+		_, curIndex, err := parseWalName(name)
+		if err != nil {
+			plog.Panicf("parse correct name should never fail: %v", err)
+		}
+		if index >= curIndex {
+			return i, true
+		}
+	}
+	return -1, false
+}
+
+// names should have been sorted based on sequence number.
+// isValidSeq checks whether seq increases continuously.
+func isValidSeq(names []string) bool {
+	var lastSeq uint64
+	for _, name := range names {
+		curSeq, _, err := parseWalName(name)
+		if err != nil {
+			plog.Panicf("parse correct name should never fail: %v", err)
+		}
+		if lastSeq != 0 && lastSeq != curSeq-1 {
+			return false
+		}
+		lastSeq = curSeq
+	}
+	return true
+}
+
+func checkWalNames(names []string) []string {
+	wnames := make([]string, 0)
+	for _, name := range names {
+		if _, _, err := parseWalName(name); err != nil {
+			plog.Warningf("ignored file %v in wal", name)
+			continue
+		}
+		wnames = append(wnames, name)
+	}
+	return wnames
+}
+
+func parseWalName(str string) (seq, index uint64, err error) {
+	if !strings.HasSuffix(str, ".wal") {
+		return 0, 0, badWalName
+	}
+	_, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index)
+	return seq, index, err
+}
+
+func walName(seq, index uint64) string {
+	return fmt.Sprintf("%016x-%016x.wal", seq, index)
+}

+ 562 - 0
vendor/src/github.com/coreos/etcd/wal/wal.go

@@ -0,0 +1,562 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+	"errors"
+	"fmt"
+	"hash/crc32"
+	"io"
+	"os"
+	"path"
+	"reflect"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/pkg/fileutil"
+	"github.com/coreos/etcd/pkg/pbutil"
+	"github.com/coreos/etcd/raft"
+	"github.com/coreos/etcd/raft/raftpb"
+	"github.com/coreos/etcd/wal/walpb"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+const (
+	metadataType int64 = iota + 1
+	entryType
+	stateType
+	crcType
+	snapshotType
+
+	// the owner can make/remove files inside the directory
+	privateDirMode = 0700
+
+	// the expected size of each wal segment file.
+	// the actual size might be bigger than it.
+	segmentSizeBytes = 64 * 1000 * 1000 // 64MB
+)
+
+var (
+	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "wal")
+
+	ErrMetadataConflict = errors.New("wal: conflicting metadata found")
+	ErrFileNotFound     = errors.New("wal: file not found")
+	ErrCRCMismatch      = errors.New("wal: crc mismatch")
+	ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
+	ErrSnapshotNotFound = errors.New("wal: snapshot not found")
+	crcTable            = crc32.MakeTable(crc32.Castagnoli)
+)
+
+// WAL is a logical representation of the stable storage.
+// WAL is either in read mode or append mode but not both.
+// A newly created WAL is in append mode, and ready for appending records.
+// A just opened WAL is in read mode, and ready for reading records.
+// The WAL will be ready for appending after reading out all the previous records.
+type WAL struct {
+	dir      string           // the living directory of the underlay files
+	metadata []byte           // metadata recorded at the head of each WAL
+	state    raftpb.HardState // hardstate recorded at the head of WAL
+
+	start   walpb.Snapshot // snapshot to start reading
+	decoder *decoder       // decoder to decode records
+
+	mu      sync.Mutex
+	f       *os.File // underlay file opened for appending, sync
+	seq     uint64   // sequence of the wal file currently used for writes
+	enti    uint64   // index of the last entry saved to the wal
+	encoder *encoder // encoder to encode records
+
+	locks []fileutil.Lock // the file locks the WAL is holding (the name is increasing)
+}
+
+// Create creates a WAL ready for appending records. The given metadata is
+// recorded at the head of each WAL file, and can be retrieved with ReadAll.
+func Create(dirpath string, metadata []byte) (*WAL, error) {
+	if Exist(dirpath) {
+		return nil, os.ErrExist
+	}
+
+	if err := os.MkdirAll(dirpath, privateDirMode); err != nil {
+		return nil, err
+	}
+
+	p := path.Join(dirpath, walName(0, 0))
+	f, err := os.OpenFile(p, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
+	if err != nil {
+		return nil, err
+	}
+	l, err := fileutil.NewLock(f.Name())
+	if err != nil {
+		return nil, err
+	}
+	if err = l.Lock(); err != nil {
+		return nil, err
+	}
+
+	w := &WAL{
+		dir:      dirpath,
+		metadata: metadata,
+		seq:      0,
+		f:        f,
+		encoder:  newEncoder(f, 0),
+	}
+	w.locks = append(w.locks, l)
+	if err := w.saveCrc(0); err != nil {
+		return nil, err
+	}
+	if err := w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
+		return nil, err
+	}
+	if err := w.SaveSnapshot(walpb.Snapshot{}); err != nil {
+		return nil, err
+	}
+	return w, nil
+}
+
+// Open opens the WAL at the given snap.
+// The snap SHOULD have been previously saved to the WAL, or the following
+// ReadAll will fail.
+// The returned WAL is ready to read and the first record will be the one after
+// the given snap. The WAL cannot be appended to before reading out all of its
+// previous records.
+func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) {
+	return openAtIndex(dirpath, snap, true)
+}
+
+// OpenForRead only opens the wal files for read.
+// Write on a read only wal panics.
+func OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error) {
+	return openAtIndex(dirpath, snap, false)
+}
+
+func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) {
+	names, err := fileutil.ReadDir(dirpath)
+	if err != nil {
+		return nil, err
+	}
+	names = checkWalNames(names)
+	if len(names) == 0 {
+		return nil, ErrFileNotFound
+	}
+
+	nameIndex, ok := searchIndex(names, snap.Index)
+	if !ok || !isValidSeq(names[nameIndex:]) {
+		return nil, ErrFileNotFound
+	}
+
+	// open the wal files for reading
+	rcs := make([]io.ReadCloser, 0)
+	ls := make([]fileutil.Lock, 0)
+	for _, name := range names[nameIndex:] {
+		f, err := os.Open(path.Join(dirpath, name))
+		if err != nil {
+			return nil, err
+		}
+		l, err := fileutil.NewLock(f.Name())
+		if err != nil {
+			return nil, err
+		}
+		err = l.TryLock()
+		if err != nil {
+			if write {
+				return nil, err
+			}
+		}
+		rcs = append(rcs, f)
+		ls = append(ls, l)
+	}
+	rc := MultiReadCloser(rcs...)
+
+	// create a WAL ready for reading
+	w := &WAL{
+		dir:     dirpath,
+		start:   snap,
+		decoder: newDecoder(rc),
+		locks:   ls,
+	}
+
+	if write {
+		// open the last wal file for appending
+		seq, _, err := parseWalName(names[len(names)-1])
+		if err != nil {
+			rc.Close()
+			return nil, err
+		}
+		last := path.Join(dirpath, names[len(names)-1])
+
+		f, err := os.OpenFile(last, os.O_WRONLY|os.O_APPEND, 0)
+		if err != nil {
+			rc.Close()
+			return nil, err
+		}
+		err = fileutil.Preallocate(f, segmentSizeBytes)
+		if err != nil {
+			rc.Close()
+			plog.Errorf("failed to allocate space when creating new wal file (%v)", err)
+			return nil, err
+		}
+
+		w.f = f
+		w.seq = seq
+	}
+
+	return w, nil
+}
+
+// ReadAll reads out records of the current WAL.
+// If opened in write mode, it must read out all records until EOF. Or an error
+// will be returned.
+// If opened in read mode, it will try to read all records if possible.
+// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
+// If loaded snap doesn't match with the expected one, it will return
+// all the records and error ErrSnapshotMismatch.
+// TODO: detect not-last-snap error.
+// TODO: maybe loose the checking of match.
+// After ReadAll, the WAL will be ready for appending new records.
+func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	rec := &walpb.Record{}
+	decoder := w.decoder
+
+	var match bool
+	for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
+		switch rec.Type {
+		case entryType:
+			e := mustUnmarshalEntry(rec.Data)
+			if e.Index > w.start.Index {
+				ents = append(ents[:e.Index-w.start.Index-1], e)
+			}
+			w.enti = e.Index
+		case stateType:
+			state = mustUnmarshalState(rec.Data)
+		case metadataType:
+			if metadata != nil && !reflect.DeepEqual(metadata, rec.Data) {
+				state.Reset()
+				return nil, state, nil, ErrMetadataConflict
+			}
+			metadata = rec.Data
+		case crcType:
+			crc := decoder.crc.Sum32()
+			// current crc of decoder must match the crc of the record.
+			// do no need to match 0 crc, since the decoder is a new one at this case.
+			if crc != 0 && rec.Validate(crc) != nil {
+				state.Reset()
+				return nil, state, nil, ErrCRCMismatch
+			}
+			decoder.updateCRC(rec.Crc)
+		case snapshotType:
+			var snap walpb.Snapshot
+			pbutil.MustUnmarshal(&snap, rec.Data)
+			if snap.Index == w.start.Index {
+				if snap.Term != w.start.Term {
+					state.Reset()
+					return nil, state, nil, ErrSnapshotMismatch
+				}
+				match = true
+			}
+		default:
+			state.Reset()
+			return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
+		}
+	}
+
+	switch w.f {
+	case nil:
+		// We do not have to read out all entries in read mode.
+		// The last record maybe a partial written one, so
+		// ErrunexpectedEOF might be returned.
+		if err != io.EOF && err != io.ErrUnexpectedEOF {
+			state.Reset()
+			return nil, state, nil, err
+		}
+	default:
+		// We must read all of the entries if WAL is opened in write mode.
+		if err != io.EOF {
+			state.Reset()
+			return nil, state, nil, err
+		}
+	}
+
+	err = nil
+	if !match {
+		err = ErrSnapshotNotFound
+	}
+
+	// close decoder, disable reading
+	w.decoder.close()
+	w.start = walpb.Snapshot{}
+
+	w.metadata = metadata
+
+	if w.f != nil {
+		// create encoder (chain crc with the decoder), enable appending
+		w.encoder = newEncoder(w.f, w.decoder.lastCRC())
+		w.decoder = nil
+		lastIndexSaved.Set(float64(w.enti))
+	}
+
+	return metadata, state, ents, err
+}
+
+// cut closes current file written and creates a new one ready to append.
+// cut first creates a temp wal file and writes necessary headers into it.
+// Then cut atomically rename temp wal file to a wal file.
+func (w *WAL) cut() error {
+	// close old wal file
+	if err := w.sync(); err != nil {
+		return err
+	}
+	if err := w.f.Close(); err != nil {
+		return err
+	}
+
+	fpath := path.Join(w.dir, walName(w.seq+1, w.enti+1))
+	ftpath := fpath + ".tmp"
+
+	// create a temp wal file with name sequence + 1, or truncate the existing one
+	ft, err := os.OpenFile(ftpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_TRUNC, 0600)
+	if err != nil {
+		return err
+	}
+
+	// update writer and save the previous crc
+	w.f = ft
+	prevCrc := w.encoder.crc.Sum32()
+	w.encoder = newEncoder(w.f, prevCrc)
+	if err = w.saveCrc(prevCrc); err != nil {
+		return err
+	}
+	if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil {
+		return err
+	}
+	if err = w.saveState(&w.state); err != nil {
+		return err
+	}
+	// close temp wal file
+	if err = w.sync(); err != nil {
+		return err
+	}
+	if err = w.f.Close(); err != nil {
+		return err
+	}
+
+	// atomically move temp wal file to wal file
+	if err = os.Rename(ftpath, fpath); err != nil {
+		return err
+	}
+
+	// open the wal file and update writer again
+	f, err := os.OpenFile(fpath, os.O_WRONLY|os.O_APPEND, 0600)
+	if err != nil {
+		return err
+	}
+	if err = fileutil.Preallocate(f, segmentSizeBytes); err != nil {
+		plog.Errorf("failed to allocate space when creating new wal file (%v)", err)
+		return err
+	}
+
+	w.f = f
+	prevCrc = w.encoder.crc.Sum32()
+	w.encoder = newEncoder(w.f, prevCrc)
+
+	// lock the new wal file
+	l, err := fileutil.NewLock(f.Name())
+	if err != nil {
+		return err
+	}
+
+	if err := l.Lock(); err != nil {
+		return err
+	}
+	w.locks = append(w.locks, l)
+
+	// increase the wal seq
+	w.seq++
+
+	plog.Infof("segmented wal file %v is created", fpath)
+	return nil
+}
+
+func (w *WAL) sync() error {
+	if w.encoder != nil {
+		if err := w.encoder.flush(); err != nil {
+			return err
+		}
+	}
+	start := time.Now()
+	err := fileutil.Fdatasync(w.f)
+	syncDurations.Observe(float64(time.Since(start)) / float64(time.Second))
+	return err
+}
+
+// ReleaseLockTo releases the locks, which has smaller index than the given index
+// except the largest one among them.
+// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release
+// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4.
+func (w *WAL) ReleaseLockTo(index uint64) error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	var smaller int
+	found := false
+
+	for i, l := range w.locks {
+		_, lockIndex, err := parseWalName(path.Base(l.Name()))
+		if err != nil {
+			return err
+		}
+		if lockIndex >= index {
+			smaller = i - 1
+			found = true
+			break
+		}
+	}
+
+	// if no lock index is greater than the release index, we can
+	// release lock up to the last one(excluding).
+	if !found && len(w.locks) != 0 {
+		smaller = len(w.locks) - 1
+	}
+
+	if smaller <= 0 {
+		return nil
+	}
+
+	for i := 0; i < smaller; i++ {
+		w.locks[i].Unlock()
+		w.locks[i].Destroy()
+	}
+	w.locks = w.locks[smaller:]
+
+	return nil
+}
+
+func (w *WAL) Close() error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	if w.f != nil {
+		if err := w.sync(); err != nil {
+			return err
+		}
+		if err := w.f.Close(); err != nil {
+			return err
+		}
+	}
+	for _, l := range w.locks {
+		err := l.Unlock()
+		if err != nil {
+			plog.Errorf("failed to unlock during closing wal: %s", err)
+		}
+		err = l.Destroy()
+		if err != nil {
+			plog.Errorf("failed to destroy lock during closing wal: %s", err)
+		}
+	}
+	return nil
+}
+
+func (w *WAL) saveEntry(e *raftpb.Entry) error {
+	// TODO: add MustMarshalTo to reduce one allocation.
+	b := pbutil.MustMarshal(e)
+	rec := &walpb.Record{Type: entryType, Data: b}
+	if err := w.encoder.encode(rec); err != nil {
+		return err
+	}
+	w.enti = e.Index
+	lastIndexSaved.Set(float64(w.enti))
+	return nil
+}
+
+func (w *WAL) saveState(s *raftpb.HardState) error {
+	if raft.IsEmptyHardState(*s) {
+		return nil
+	}
+	w.state = *s
+	b := pbutil.MustMarshal(s)
+	rec := &walpb.Record{Type: stateType, Data: b}
+	return w.encoder.encode(rec)
+}
+
+func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	// short cut, do not call sync
+	if raft.IsEmptyHardState(st) && len(ents) == 0 {
+		return nil
+	}
+
+	mustSync := mustSync(st, w.state, len(ents))
+
+	// TODO(xiangli): no more reference operator
+	for i := range ents {
+		if err := w.saveEntry(&ents[i]); err != nil {
+			return err
+		}
+	}
+	if err := w.saveState(&st); err != nil {
+		return err
+	}
+
+	fstat, err := w.f.Stat()
+	if err != nil {
+		return err
+	}
+	if fstat.Size() < segmentSizeBytes {
+		if mustSync {
+			return w.sync()
+		}
+		return nil
+	}
+	// TODO: add a test for this code path when refactoring the tests
+	return w.cut()
+}
+
+func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	b := pbutil.MustMarshal(&e)
+	rec := &walpb.Record{Type: snapshotType, Data: b}
+	if err := w.encoder.encode(rec); err != nil {
+		return err
+	}
+	// update enti only when snapshot is ahead of last index
+	if w.enti < e.Index {
+		w.enti = e.Index
+	}
+	lastIndexSaved.Set(float64(w.enti))
+	return w.sync()
+}
+
+func (w *WAL) saveCrc(prevCrc uint32) error {
+	return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc})
+}
+
+func mustSync(st, prevst raftpb.HardState, entsnum int) bool {
+	// Persistent state on all servers:
+	// (Updated on stable storage before responding to RPCs)
+	// currentTerm
+	// votedFor
+	// log entries[]
+	if entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term {
+		return true
+	}
+	return false
+}

+ 29 - 0
vendor/src/github.com/coreos/etcd/wal/walpb/record.go

@@ -0,0 +1,29 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package walpb
+
+import "errors"
+
+var (
+	ErrCRCMismatch = errors.New("walpb: crc mismatch")
+)
+
+func (rec *Record) Validate(crc uint32) error {
+	if rec.Crc == crc {
+		return nil
+	}
+	rec.Reset()
+	return ErrCRCMismatch
+}

+ 495 - 0
vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go

@@ -0,0 +1,495 @@
+// Code generated by protoc-gen-gogo.
+// source: record.proto
+// DO NOT EDIT!
+
+/*
+	Package walpb is a generated protocol buffer package.
+
+	It is generated from these files:
+		record.proto
+
+	It has these top-level messages:
+		Record
+		Snapshot
+*/
+package walpb
+
+import (
+	"fmt"
+
+	proto "github.com/gogo/protobuf/proto"
+)
+
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Record struct {
+	Type             int64  `protobuf:"varint,1,opt,name=type" json:"type"`
+	Crc              uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"`
+	Data             []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Record) Reset()         { *m = Record{} }
+func (m *Record) String() string { return proto.CompactTextString(m) }
+func (*Record) ProtoMessage()    {}
+
+type Snapshot struct {
+	Index            uint64 `protobuf:"varint,1,opt,name=index" json:"index"`
+	Term             uint64 `protobuf:"varint,2,opt,name=term" json:"term"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset()         { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()    {}
+
+func init() {
+	proto.RegisterType((*Record)(nil), "walpb.Record")
+	proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
+}
+func (m *Record) Marshal() (data []byte, err error) {
+	size := m.Size()
+	data = make([]byte, size)
+	n, err := m.MarshalTo(data)
+	if err != nil {
+		return nil, err
+	}
+	return data[:n], nil
+}
+
+func (m *Record) MarshalTo(data []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	data[i] = 0x8
+	i++
+	i = encodeVarintRecord(data, i, uint64(m.Type))
+	data[i] = 0x10
+	i++
+	i = encodeVarintRecord(data, i, uint64(m.Crc))
+	if m.Data != nil {
+		data[i] = 0x1a
+		i++
+		i = encodeVarintRecord(data, i, uint64(len(m.Data)))
+		i += copy(data[i:], m.Data)
+	}
+	if m.XXX_unrecognized != nil {
+		i += copy(data[i:], m.XXX_unrecognized)
+	}
+	return i, nil
+}
+
+func (m *Snapshot) Marshal() (data []byte, err error) {
+	size := m.Size()
+	data = make([]byte, size)
+	n, err := m.MarshalTo(data)
+	if err != nil {
+		return nil, err
+	}
+	return data[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(data []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	data[i] = 0x8
+	i++
+	i = encodeVarintRecord(data, i, uint64(m.Index))
+	data[i] = 0x10
+	i++
+	i = encodeVarintRecord(data, i, uint64(m.Term))
+	if m.XXX_unrecognized != nil {
+		i += copy(data[i:], m.XXX_unrecognized)
+	}
+	return i, nil
+}
+
+func encodeFixed64Record(data []byte, offset int, v uint64) int {
+	data[offset] = uint8(v)
+	data[offset+1] = uint8(v >> 8)
+	data[offset+2] = uint8(v >> 16)
+	data[offset+3] = uint8(v >> 24)
+	data[offset+4] = uint8(v >> 32)
+	data[offset+5] = uint8(v >> 40)
+	data[offset+6] = uint8(v >> 48)
+	data[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Record(data []byte, offset int, v uint32) int {
+	data[offset] = uint8(v)
+	data[offset+1] = uint8(v >> 8)
+	data[offset+2] = uint8(v >> 16)
+	data[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintRecord(data []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		data[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	data[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Record) Size() (n int) {
+	var l int
+	_ = l
+	n += 1 + sovRecord(uint64(m.Type))
+	n += 1 + sovRecord(uint64(m.Crc))
+	if m.Data != nil {
+		l = len(m.Data)
+		n += 1 + l + sovRecord(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Snapshot) Size() (n int) {
+	var l int
+	_ = l
+	n += 1 + sovRecord(uint64(m.Index))
+	n += 1 + sovRecord(uint64(m.Term))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovRecord(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozRecord(x uint64) (n int) {
+	return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Record) Unmarshal(data []byte) error {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRecord
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Record: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRecord
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Type |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType)
+			}
+			m.Crc = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRecord
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Crc |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRecord
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				byteLen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRecord
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRecord(data[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRecord
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Snapshot) Unmarshal(data []byte) error {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRecord
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRecord
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Index |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRecord
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.Term |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRecord(data[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRecord
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRecord(data []byte) (n int, err error) {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRecord
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRecord
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if data[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRecord
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthRecord
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowRecord
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := data[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipRecord(data[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRecord   = fmt.Errorf("proto: integer overflow")
+)

+ 20 - 0
vendor/src/github.com/coreos/etcd/wal/walpb/record.proto

@@ -0,0 +1,20 @@
+syntax = "proto2";
+package walpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message Record {
+	optional int64 type  = 1 [(gogoproto.nullable) = false];
+	optional uint32 crc  = 2 [(gogoproto.nullable) = false];
+	optional bytes data  = 3;
+}
+
+message Snapshot {
+	optional uint64 index = 1 [(gogoproto.nullable) = false];
+	optional uint64 term  = 2 [(gogoproto.nullable) = false];
+}

+ 202 - 0
vendor/src/github.com/coreos/pkg/LICENSE

@@ -0,0 +1,202 @@
+Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+

+ 39 - 0
vendor/src/github.com/coreos/pkg/capnslog/README.md

@@ -0,0 +1,39 @@
+# capnslog, the CoreOS logging package
+
+There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
+capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
+
+### Design Principles
+
+##### `package main` is the place where logging gets turned on and routed
+
+A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
+
+##### All log options are runtime-configurable. 
+
+Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. 
+
+##### There is one log object per package. It is registered under its repository and package name.
+
+`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
+
+##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
+
+Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
+
+Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application.
+
+##### Log objects are an interface
+
+An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
+
+##### Log levels have specific meanings:
+
+  * Critical: Unrecoverable. Must fail.
+  * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
+  * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
+  * Notice: Normal, but important (uncommon) log information.
+  * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
+  * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
+  * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.
+

+ 106 - 0
vendor/src/github.com/coreos/pkg/capnslog/formatters.go

@@ -0,0 +1,106 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"runtime"
+	"strings"
+	"time"
+)
+
+type Formatter interface {
+	Format(pkg string, level LogLevel, depth int, entries ...interface{})
+	Flush()
+}
+
+func NewStringFormatter(w io.Writer) *StringFormatter {
+	return &StringFormatter{
+		w: bufio.NewWriter(w),
+	}
+}
+
+type StringFormatter struct {
+	w *bufio.Writer
+}
+
+func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
+	now := time.Now().UTC()
+	s.w.WriteString(now.Format(time.RFC3339))
+	s.w.WriteByte(' ')
+	writeEntries(s.w, pkg, l, i, entries...)
+	s.Flush()
+}
+
+func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
+	if pkg != "" {
+		w.WriteString(pkg + ": ")
+	}
+	str := fmt.Sprint(entries...)
+	endsInNL := strings.HasSuffix(str, "\n")
+	w.WriteString(str)
+	if !endsInNL {
+		w.WriteString("\n")
+	}
+}
+
+func (s *StringFormatter) Flush() {
+	s.w.Flush()
+}
+
+func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
+	return &PrettyFormatter{
+		w:     bufio.NewWriter(w),
+		debug: debug,
+	}
+}
+
+type PrettyFormatter struct {
+	w     *bufio.Writer
+	debug bool
+}
+
+func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
+	now := time.Now()
+	ts := now.Format("2006-01-02 15:04:05")
+	c.w.WriteString(ts)
+	ms := now.Nanosecond() / 1000
+	c.w.WriteString(fmt.Sprintf(".%06d", ms))
+	if c.debug {
+		_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
+		if !ok {
+			file = "???"
+			line = 1
+		} else {
+			slash := strings.LastIndex(file, "/")
+			if slash >= 0 {
+				file = file[slash+1:]
+			}
+		}
+		if line < 0 {
+			line = 0 // not a real line number
+		}
+		c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
+	}
+	c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
+	writeEntries(c.w, pkg, l, depth, entries...)
+	c.Flush()
+}
+
+func (c *PrettyFormatter) Flush() {
+	c.w.Flush()
+}

+ 96 - 0
vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go

@@ -0,0 +1,96 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"bufio"
+	"bytes"
+	"io"
+	"os"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var pid = os.Getpid()
+
+type GlogFormatter struct {
+	StringFormatter
+}
+
+func NewGlogFormatter(w io.Writer) *GlogFormatter {
+	g := &GlogFormatter{}
+	g.w = bufio.NewWriter(w)
+	return g
+}
+
+func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
+	g.w.Write(GlogHeader(level, depth+1))
+	g.StringFormatter.Format(pkg, level, depth+1, entries...)
+}
+
+func GlogHeader(level LogLevel, depth int) []byte {
+	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+	now := time.Now().UTC()
+	_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
+	if !ok {
+		file = "???"
+		line = 1
+	} else {
+		slash := strings.LastIndex(file, "/")
+		if slash >= 0 {
+			file = file[slash+1:]
+		}
+	}
+	if line < 0 {
+		line = 0 // not a real line number
+	}
+	buf := &bytes.Buffer{}
+	buf.Grow(30)
+	_, month, day := now.Date()
+	hour, minute, second := now.Clock()
+	buf.WriteString(level.Char())
+	twoDigits(buf, int(month))
+	twoDigits(buf, day)
+	buf.WriteByte(' ')
+	twoDigits(buf, hour)
+	buf.WriteByte(':')
+	twoDigits(buf, minute)
+	buf.WriteByte(':')
+	twoDigits(buf, second)
+	buf.WriteByte('.')
+	buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
+	buf.WriteByte('Z')
+	buf.WriteByte(' ')
+	buf.WriteString(strconv.Itoa(pid))
+	buf.WriteByte(' ')
+	buf.WriteString(file)
+	buf.WriteByte(':')
+	buf.WriteString(strconv.Itoa(line))
+	buf.WriteByte(']')
+	buf.WriteByte(' ')
+	return buf.Bytes()
+}
+
+const digits = "0123456789"
+
+func twoDigits(b *bytes.Buffer, d int) {
+	c2 := digits[d%10]
+	d /= 10
+	c1 := digits[d%10]
+	b.WriteByte(c1)
+	b.WriteByte(c2)
+}

+ 49 - 0
vendor/src/github.com/coreos/pkg/capnslog/init.go

@@ -0,0 +1,49 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+	"io"
+	"os"
+	"syscall"
+)
+
+// Here's where the opinionation comes in. We need some sensible defaults,
+// especially after taking over the log package. Your project (whatever it may
+// be) may see things differently. That's okay; there should be no defaults in
+// the main package that cannot be controlled or overridden programatically,
+// otherwise it's a bug. Doing so is creating your own init_log.go file much
+// like this one.
+
+func init() {
+	initHijack()
+
+	// Go `log` pacakge uses os.Stderr.
+	SetFormatter(NewDefaultFormatter(os.Stderr))
+	SetGlobalLogLevel(INFO)
+}
+
+func NewDefaultFormatter(out io.Writer) Formatter {
+	if syscall.Getppid() == 1 {
+		// We're running under init, which may be systemd.
+		f, err := NewJournaldFormatter()
+		if err == nil {
+			return f
+		}
+	}
+	return NewPrettyFormatter(out, false)
+}

+ 25 - 0
vendor/src/github.com/coreos/pkg/capnslog/init_windows.go

@@ -0,0 +1,25 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import "os"
+
+func init() {
+	initHijack()
+
+	// Go `log` package uses os.Stderr.
+	SetFormatter(NewPrettyFormatter(os.Stderr, false))
+	SetGlobalLogLevel(INFO)
+}

+ 68 - 0
vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go

@@ -0,0 +1,68 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/coreos/go-systemd/journal"
+)
+
+func NewJournaldFormatter() (Formatter, error) {
+	if !journal.Enabled() {
+		return nil, errors.New("No systemd detected")
+	}
+	return &journaldFormatter{}, nil
+}
+
+type journaldFormatter struct{}
+
+func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
+	var pri journal.Priority
+	switch l {
+	case CRITICAL:
+		pri = journal.PriCrit
+	case ERROR:
+		pri = journal.PriErr
+	case WARNING:
+		pri = journal.PriWarning
+	case NOTICE:
+		pri = journal.PriNotice
+	case INFO:
+		pri = journal.PriInfo
+	case DEBUG:
+		pri = journal.PriDebug
+	case TRACE:
+		pri = journal.PriDebug
+	default:
+		panic("Unhandled loglevel")
+	}
+	msg := fmt.Sprint(entries...)
+	tags := map[string]string{
+		"PACKAGE":           pkg,
+		"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
+	}
+	err := journal.Send(msg, pri, tags)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+	}
+}
+
+func (j *journaldFormatter) Flush() {}

+ 39 - 0
vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go

@@ -0,0 +1,39 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"log"
+)
+
+func initHijack() {
+	pkg := NewPackageLogger("log", "")
+	w := packageWriter{pkg}
+	log.SetFlags(0)
+	log.SetPrefix("")
+	log.SetOutput(w)
+}
+
+type packageWriter struct {
+	pl *PackageLogger
+}
+
+func (p packageWriter) Write(b []byte) (int, error) {
+	if p.pl.level < INFO {
+		return 0, nil
+	}
+	p.pl.internalLog(calldepth+2, INFO, string(b))
+	return len(b), nil
+}

+ 240 - 0
vendor/src/github.com/coreos/pkg/capnslog/logmap.go

@@ -0,0 +1,240 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"errors"
+	"strings"
+	"sync"
+)
+
+// LogLevel is the set of all log levels.
+type LogLevel int8
+
+const (
+	// CRITICAL is the lowest log level; only errors which will end the program will be propagated.
+	CRITICAL LogLevel = iota - 1
+	// ERROR is for errors that are not fatal but lead to troubling behavior.
+	ERROR
+	// WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
+	WARNING
+	// NOTICE is for normal but significant conditions.
+	NOTICE
+	// INFO is a log level for common, everyday log updates.
+	INFO
+	// DEBUG is the default hidden level for more verbose updates about internal processes.
+	DEBUG
+	// TRACE is for (potentially) call by call tracing of programs.
+	TRACE
+)
+
+// Char returns a single-character representation of the log level.
+func (l LogLevel) Char() string {
+	switch l {
+	case CRITICAL:
+		return "C"
+	case ERROR:
+		return "E"
+	case WARNING:
+		return "W"
+	case NOTICE:
+		return "N"
+	case INFO:
+		return "I"
+	case DEBUG:
+		return "D"
+	case TRACE:
+		return "T"
+	default:
+		panic("Unhandled loglevel")
+	}
+}
+
+// String returns a multi-character representation of the log level.
+func (l LogLevel) String() string {
+	switch l {
+	case CRITICAL:
+		return "CRITICAL"
+	case ERROR:
+		return "ERROR"
+	case WARNING:
+		return "WARNING"
+	case NOTICE:
+		return "NOTICE"
+	case INFO:
+		return "INFO"
+	case DEBUG:
+		return "DEBUG"
+	case TRACE:
+		return "TRACE"
+	default:
+		panic("Unhandled loglevel")
+	}
+}
+
+// Update using the given string value. Fulfills the flag.Value interface.
+func (l *LogLevel) Set(s string) error {
+	value, err := ParseLevel(s)
+	if err != nil {
+		return err
+	}
+
+	*l = value
+	return nil
+}
+
+// ParseLevel translates some potential loglevel strings into their corresponding levels.
+func ParseLevel(s string) (LogLevel, error) {
+	switch s {
+	case "CRITICAL", "C":
+		return CRITICAL, nil
+	case "ERROR", "0", "E":
+		return ERROR, nil
+	case "WARNING", "1", "W":
+		return WARNING, nil
+	case "NOTICE", "2", "N":
+		return NOTICE, nil
+	case "INFO", "3", "I":
+		return INFO, nil
+	case "DEBUG", "4", "D":
+		return DEBUG, nil
+	case "TRACE", "5", "T":
+		return TRACE, nil
+	}
+	return CRITICAL, errors.New("couldn't parse log level " + s)
+}
+
+type RepoLogger map[string]*PackageLogger
+
+type loggerStruct struct {
+	sync.Mutex
+	repoMap   map[string]RepoLogger
+	formatter Formatter
+}
+
+// logger is the global logger
+var logger = new(loggerStruct)
+
+// SetGlobalLogLevel sets the log level for all packages in all repositories
+// registered with capnslog.
+func SetGlobalLogLevel(l LogLevel) {
+	logger.Lock()
+	defer logger.Unlock()
+	for _, r := range logger.repoMap {
+		r.setRepoLogLevelInternal(l)
+	}
+}
+
+// GetRepoLogger may return the handle to the repository's set of packages' loggers.
+func GetRepoLogger(repo string) (RepoLogger, error) {
+	logger.Lock()
+	defer logger.Unlock()
+	r, ok := logger.repoMap[repo]
+	if !ok {
+		return nil, errors.New("no packages registered for repo " + repo)
+	}
+	return r, nil
+}
+
+// MustRepoLogger returns the handle to the repository's packages' loggers.
+func MustRepoLogger(repo string) RepoLogger {
+	r, err := GetRepoLogger(repo)
+	if err != nil {
+		panic(err)
+	}
+	return r
+}
+
+// SetRepoLogLevel sets the log level for all packages in the repository.
+func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
+	logger.Lock()
+	defer logger.Unlock()
+	r.setRepoLogLevelInternal(l)
+}
+
+func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
+	for _, v := range r {
+		v.level = l
+	}
+}
+
+// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
+// order, and returns a map of the results, for use in SetLogLevel.
+func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
+	setlist := strings.Split(conf, ",")
+	out := make(map[string]LogLevel)
+	for _, setstring := range setlist {
+		setting := strings.Split(setstring, "=")
+		if len(setting) != 2 {
+			return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
+		}
+		l, err := ParseLevel(setting[1])
+		if err != nil {
+			return nil, err
+		}
+		out[setting[0]] = l
+	}
+	return out, nil
+}
+
+// SetLogLevel takes a map of package names within a repository to their desired
+// loglevel, and sets the levels appropriately. Unknown packages are ignored.
+// "*" is a special package name that corresponds to all packages, and will be
+// processed first.
+func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
+	logger.Lock()
+	defer logger.Unlock()
+	if l, ok := m["*"]; ok {
+		r.setRepoLogLevelInternal(l)
+	}
+	for k, v := range m {
+		l, ok := r[k]
+		if !ok {
+			continue
+		}
+		l.level = v
+	}
+}
+
+// SetFormatter sets the formatting function for all logs.
+func SetFormatter(f Formatter) {
+	logger.Lock()
+	defer logger.Unlock()
+	logger.formatter = f
+}
+
+// NewPackageLogger creates a package logger object.
+// This should be defined as a global var in your package, referencing your repo.
+func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
+	logger.Lock()
+	defer logger.Unlock()
+	if logger.repoMap == nil {
+		logger.repoMap = make(map[string]RepoLogger)
+	}
+	r, rok := logger.repoMap[repo]
+	if !rok {
+		logger.repoMap[repo] = make(RepoLogger)
+		r = logger.repoMap[repo]
+	}
+	p, pok := r[pkg]
+	if !pok {
+		r[pkg] = &PackageLogger{
+			pkg:   pkg,
+			level: INFO,
+		}
+		p = r[pkg]
+	}
+	return
+}

+ 158 - 0
vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go

@@ -0,0 +1,158 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"fmt"
+	"os"
+)
+
+type PackageLogger struct {
+	pkg   string
+	level LogLevel
+}
+
+const calldepth = 2
+
+func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
+	if inLevel != CRITICAL && p.level < inLevel {
+		return
+	}
+	logger.Lock()
+	defer logger.Unlock()
+	if logger.formatter != nil {
+		logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
+	}
+}
+
+func (p *PackageLogger) LevelAt(l LogLevel) bool {
+	return p.level >= l
+}
+
+// Log a formatted string at any level between ERROR and TRACE
+func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
+	p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
+}
+
+// Log a message at any level between ERROR and TRACE
+func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
+	p.internalLog(calldepth, l, fmt.Sprint(args...))
+}
+
+// log stdlib compatibility
+
+func (p *PackageLogger) Println(args ...interface{}) {
+	p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
+}
+
+func (p *PackageLogger) Printf(format string, args ...interface{}) {
+	p.internalLog(calldepth, INFO, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Print(args ...interface{}) {
+	p.internalLog(calldepth, INFO, fmt.Sprint(args...))
+}
+
+// Panic and fatal
+
+func (p *PackageLogger) Panicf(format string, args ...interface{}) {
+	s := fmt.Sprintf(format, args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	panic(s)
+}
+
+func (p *PackageLogger) Panic(args ...interface{}) {
+	s := fmt.Sprint(args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	panic(s)
+}
+
+func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
+	s := fmt.Sprintf(format, args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	os.Exit(1)
+}
+
+func (p *PackageLogger) Fatal(args ...interface{}) {
+	s := fmt.Sprint(args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	os.Exit(1)
+}
+
+// Error Functions
+
+func (p *PackageLogger) Errorf(format string, args ...interface{}) {
+	p.internalLog(calldepth, ERROR, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Error(entries ...interface{}) {
+	p.internalLog(calldepth, ERROR, entries...)
+}
+
+// Warning Functions
+
+func (p *PackageLogger) Warningf(format string, args ...interface{}) {
+	p.internalLog(calldepth, WARNING, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Warning(entries ...interface{}) {
+	p.internalLog(calldepth, WARNING, entries...)
+}
+
+// Notice Functions
+
+func (p *PackageLogger) Noticef(format string, args ...interface{}) {
+	p.internalLog(calldepth, NOTICE, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Notice(entries ...interface{}) {
+	p.internalLog(calldepth, NOTICE, entries...)
+}
+
+// Info Functions
+
+func (p *PackageLogger) Infof(format string, args ...interface{}) {
+	p.internalLog(calldepth, INFO, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Info(entries ...interface{}) {
+	p.internalLog(calldepth, INFO, entries...)
+}
+
+// Debug Functions
+
+func (p *PackageLogger) Debugf(format string, args ...interface{}) {
+	p.internalLog(calldepth, DEBUG, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Debug(entries ...interface{}) {
+	p.internalLog(calldepth, DEBUG, entries...)
+}
+
+// Trace Functions
+
+func (p *PackageLogger) Tracef(format string, args ...interface{}) {
+	p.internalLog(calldepth, TRACE, fmt.Sprintf(format, args...))
+}
+
+func (p *PackageLogger) Trace(entries ...interface{}) {
+	p.internalLog(calldepth, TRACE, entries...)
+}
+
+func (p *PackageLogger) Flush() {
+	logger.Lock()
+	defer logger.Unlock()
+	logger.formatter.Flush()
+}

+ 65 - 0
vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go

@@ -0,0 +1,65 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+	"fmt"
+	"log/syslog"
+)
+
+func NewSyslogFormatter(w *syslog.Writer) Formatter {
+	return &syslogFormatter{w}
+}
+
+func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
+	w, err := syslog.New(syslog.LOG_DEBUG, tag)
+	if err != nil {
+		return nil, err
+	}
+	return NewSyslogFormatter(w), nil
+}
+
+type syslogFormatter struct {
+	w *syslog.Writer
+}
+
+func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
+	for _, entry := range entries {
+		str := fmt.Sprint(entry)
+		switch l {
+		case CRITICAL:
+			s.w.Crit(str)
+		case ERROR:
+			s.w.Err(str)
+		case WARNING:
+			s.w.Warning(str)
+		case NOTICE:
+			s.w.Notice(str)
+		case INFO:
+			s.w.Info(str)
+		case DEBUG:
+			s.w.Debug(str)
+		case TRACE:
+			s.w.Debug(str)
+		default:
+			panic("Unhandled loglevel")
+		}
+	}
+}
+
+func (s *syslogFormatter) Flush() {
+}

+ 14 - 2
vendor/src/github.com/docker/engine-api/client/client.go

@@ -12,6 +12,9 @@ import (
 	"github.com/docker/go-connections/tlsconfig"
 )
 
+// DefaultVersion is the version of the current stable API
+const DefaultVersion string = "1.23"
+
 // Client is the API client that performs all operations
 // against a docker server.
 type Client struct {
@@ -59,13 +62,22 @@ func NewEnvClient() (*Client, error) {
 	if host == "" {
 		host = DefaultDockerHost
 	}
-	return NewClient(host, os.Getenv("DOCKER_API_VERSION"), client, nil)
+
+	version := os.Getenv("DOCKER_API_VERSION")
+	if version == "" {
+		version = DefaultVersion
+	}
+
+	return NewClient(host, version, client, nil)
 }
 
 // NewClient initializes a new API client for the given host and API version.
-// It won't send any version information if the version number is empty.
 // It uses the given http client as transport.
 // It also initializes the custom http headers to add to each request.
+//
+// It won't send any version information if the version number is empty. It is
+// highly recommended that you set a version or your client may break if the
+// server is upgraded.
 func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
 	proto, addr, basePath, err := ParseHost(host)
 	if err != nil {

+ 51 - 0
vendor/src/github.com/docker/engine-api/client/errors.go

@@ -120,3 +120,54 @@ func IsErrUnauthorized(err error) bool {
 	_, ok := err.(unauthorizedError)
 	return ok
 }
+
+// nodeNotFoundError implements an error returned when a node is not found.
+type nodeNotFoundError struct {
+	nodeID string
+}
+
+// Error returns a string representation of a nodeNotFoundError
+func (e nodeNotFoundError) Error() string {
+	return fmt.Sprintf("Error: No such node: %s", e.nodeID)
+}
+
+// IsErrNodeNotFound returns true if the error is caused
+// when a node is not found.
+func IsErrNodeNotFound(err error) bool {
+	_, ok := err.(nodeNotFoundError)
+	return ok
+}
+
+// serviceNotFoundError implements an error returned when a service is not found.
+type serviceNotFoundError struct {
+	serviceID string
+}
+
+// Error returns a string representation of a serviceNotFoundError
+func (e serviceNotFoundError) Error() string {
+	return fmt.Sprintf("Error: No such service: %s", e.serviceID)
+}
+
+// IsErrServiceNotFound returns true if the error is caused
+// when a service is not found.
+func IsErrServiceNotFound(err error) bool {
+	_, ok := err.(serviceNotFoundError)
+	return ok
+}
+
+// taskNotFoundError implements an error returned when a task is not found.
+type taskNotFoundError struct {
+	taskID string
+}
+
+// Error returns a string representation of a taskNotFoundError
+func (e taskNotFoundError) Error() string {
+	return fmt.Sprintf("Error: No such task: %s", e.taskID)
+}
+
+// IsErrTaskNotFound returns true if the error is caused
+// when a task is not found.
+func IsErrTaskNotFound(err error) bool {
+	_, ok := err.(taskNotFoundError)
+	return ok
+}

+ 17 - 0
vendor/src/github.com/docker/engine-api/client/interface.go

@@ -11,6 +11,7 @@ import (
 	"github.com/docker/engine-api/types/filters"
 	"github.com/docker/engine-api/types/network"
 	"github.com/docker/engine-api/types/registry"
+	"github.com/docker/engine-api/types/swarm"
 )
 
 // APIClient is an interface that clients that talk with a docker server must implement.
@@ -19,6 +20,22 @@ type APIClient interface {
 	CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error
 	CheckpointDelete(ctx context.Context, container string, checkpointID string) error
 	CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error)
+	SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
+	SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
+	SwarmLeave(ctx context.Context, force bool) error
+	SwarmInspect(ctx context.Context) (swarm.Swarm, error)
+	SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec) error
+	NodeInspect(ctx context.Context, nodeID string) (swarm.Node, error)
+	NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
+	NodeRemove(ctx context.Context, nodeID string) error
+	NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
+	ServiceCreate(ctx context.Context, service swarm.ServiceSpec) (types.ServiceCreateResponse, error)
+	ServiceInspect(ctx context.Context, serviceID string) (swarm.Service, error)
+	ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
+	ServiceRemove(ctx context.Context, serviceID string) error
+	ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec) error
+	TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
+	TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
 	ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
 	ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error)
 	ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error)

+ 25 - 0
vendor/src/github.com/docker/engine-api/client/node_inspect.go

@@ -0,0 +1,25 @@
+package client
+
+import (
+	"encoding/json"
+	"net/http"
+
+	"github.com/docker/engine-api/types/swarm"
+	"golang.org/x/net/context"
+)
+
+// NodeInspect returns the node information.
+func (cli *Client) NodeInspect(ctx context.Context, nodeID string) (swarm.Node, error) {
+	serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
+	if err != nil {
+		if serverResp.statusCode == http.StatusNotFound {
+			return swarm.Node{}, nodeNotFoundError{nodeID}
+		}
+		return swarm.Node{}, err
+	}
+
+	var response swarm.Node
+	err = json.NewDecoder(serverResp.body).Decode(&response)
+	ensureReaderClosed(serverResp)
+	return response, err
+}

+ 36 - 0
vendor/src/github.com/docker/engine-api/client/node_list.go

@@ -0,0 +1,36 @@
+package client
+
+import (
+	"encoding/json"
+	"net/url"
+
+	"github.com/docker/engine-api/types"
+	"github.com/docker/engine-api/types/filters"
+	"github.com/docker/engine-api/types/swarm"
+	"golang.org/x/net/context"
+)
+
+// NodeList returns the list of nodes.
+func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
+	query := url.Values{}
+
+	if options.Filter.Len() > 0 {
+		filterJSON, err := filters.ToParam(options.Filter)
+
+		if err != nil {
+			return nil, err
+		}
+
+		query.Set("filters", filterJSON)
+	}
+
+	resp, err := cli.get(ctx, "/nodes", query, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	var nodes []swarm.Node
+	err = json.NewDecoder(resp.body).Decode(&nodes)
+	ensureReaderClosed(resp)
+	return nodes, err
+}

+ 10 - 0
vendor/src/github.com/docker/engine-api/client/node_remove.go

@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// NodeRemove removes a Node.
+func (cli *Client) NodeRemove(ctx context.Context, nodeID string) error {
+	resp, err := cli.delete(ctx, "/nodes/"+nodeID, nil, nil)
+	ensureReaderClosed(resp)
+	return err
+}

+ 18 - 0
vendor/src/github.com/docker/engine-api/client/node_update.go

@@ -0,0 +1,18 @@
+package client
+
+import (
+	"net/url"
+	"strconv"
+
+	"github.com/docker/engine-api/types/swarm"
+	"golang.org/x/net/context"
+)
+
+// NodeUpdate updates a Node.
+func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error {
+	query := url.Values{}
+	query.Set("version", strconv.FormatUint(version.Index, 10))
+	resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil)
+	ensureReaderClosed(resp)
+	return err
+}

+ 3 - 1
vendor/src/github.com/docker/engine-api/client/request.go

@@ -12,6 +12,7 @@ import (
 
 	"github.com/docker/engine-api/client/transport/cancellable"
 	"github.com/docker/engine-api/types"
+	"github.com/docker/engine-api/types/versions"
 	"golang.org/x/net/context"
 )
 
@@ -133,7 +134,8 @@ func (cli *Client) sendClientRequest(ctx context.Context, method, path string, q
 		}
 
 		var errorMessage string
-		if resp.Header.Get("Content-Type") == "application/json" {
+		if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) &&
+			resp.Header.Get("Content-Type") == "application/json" {
 			var errorResponse types.ErrorResponse
 			if err := json.Unmarshal(body, &errorResponse); err != nil {
 				return serverResp, fmt.Errorf("Error reading JSON: %v", err)

+ 22 - 0
vendor/src/github.com/docker/engine-api/client/service_create.go

@@ -0,0 +1,22 @@
+package client
+
+import (
+	"encoding/json"
+
+	"github.com/docker/engine-api/types"
+	"github.com/docker/engine-api/types/swarm"
+	"golang.org/x/net/context"
+)
+
+// ServiceCreate creates a new Service.
+func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec) (types.ServiceCreateResponse, error) {
+	var response types.ServiceCreateResponse
+	resp, err := cli.post(ctx, "/services/create", nil, service, nil)
+	if err != nil {
+		return response, err
+	}
+
+	err = json.NewDecoder(resp.body).Decode(&response)
+	ensureReaderClosed(resp)
+	return response, err
+}

+ 25 - 0
vendor/src/github.com/docker/engine-api/client/service_inspect.go

@@ -0,0 +1,25 @@
+package client
+
+import (
+	"encoding/json"
+	"net/http"
+
+	"github.com/docker/engine-api/types/swarm"
+	"golang.org/x/net/context"
+)
+
+// ServiceInspect returns the service information.
+func (cli *Client) ServiceInspect(ctx context.Context, serviceID string) (swarm.Service, error) {
+	serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil)
+	if err != nil {
+		if serverResp.statusCode == http.StatusNotFound {
+			return swarm.Service{}, serviceNotFoundError{serviceID}
+		}
+		return swarm.Service{}, err
+	}
+
+	var response swarm.Service
+	err = json.NewDecoder(serverResp.body).Decode(&response)
+	ensureReaderClosed(serverResp)
+	return response, err
+}

+ 35 - 0
vendor/src/github.com/docker/engine-api/client/service_list.go

@@ -0,0 +1,35 @@
+package client
+
+import (
+	"encoding/json"
+	"net/url"
+
+	"github.com/docker/engine-api/types"
+	"github.com/docker/engine-api/types/filters"
+	"github.com/docker/engine-api/types/swarm"
+	"golang.org/x/net/context"
+)
+
+// ServiceList returns the list of services.
+func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
+	query := url.Values{}
+
+	if options.Filter.Len() > 0 {
+		filterJSON, err := filters.ToParam(options.Filter)
+		if err != nil {
+			return nil, err
+		}
+
+		query.Set("filters", filterJSON)
+	}
+
+	resp, err := cli.get(ctx, "/services", query, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	var services []swarm.Service
+	err = json.NewDecoder(resp.body).Decode(&services)
+	ensureReaderClosed(resp)
+	return services, err
+}

+ 10 - 0
vendor/src/github.com/docker/engine-api/client/service_remove.go

@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ServiceRemove kills and removes a service.
+func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
+	resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
+	ensureReaderClosed(resp)
+	return err
+}

+ 18 - 0
vendor/src/github.com/docker/engine-api/client/service_update.go

@@ -0,0 +1,18 @@
+package client
+
+import (
+	"net/url"
+	"strconv"
+
+	"github.com/docker/engine-api/types/swarm"
+	"golang.org/x/net/context"
+)
+
+// ServiceUpdate updates a Service.
+func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec) error {
+	query := url.Values{}
+	query.Set("version", strconv.FormatUint(version.Index, 10))
+	resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, nil)
+	ensureReaderClosed(resp)
+	return err
+}

+ 21 - 0
vendor/src/github.com/docker/engine-api/client/swarm_init.go

@@ -0,0 +1,21 @@
+package client
+
+import (
+	"encoding/json"
+
+	"github.com/docker/engine-api/types/swarm"
+	"golang.org/x/net/context"
+)
+
+// SwarmInit initializes the Swarm.
+func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) {
+	serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil)
+	if err != nil {
+		return "", err
+	}
+
+	var response string
+	err = json.NewDecoder(serverResp.body).Decode(&response)
+	ensureReaderClosed(serverResp)
+	return response, err
+}

+ 21 - 0
vendor/src/github.com/docker/engine-api/client/swarm_inspect.go

@@ -0,0 +1,21 @@
+package client
+
+import (
+	"encoding/json"
+
+	"github.com/docker/engine-api/types/swarm"
+	"golang.org/x/net/context"
+)
+
+// SwarmInspect inspects the Swarm.
+func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) {
+	serverResp, err := cli.get(ctx, "/swarm", nil, nil)
+	if err != nil {
+		return swarm.Swarm{}, err
+	}
+
+	var response swarm.Swarm
+	err = json.NewDecoder(serverResp.body).Decode(&response)
+	ensureReaderClosed(serverResp)
+	return response, err
+}

+ 13 - 0
vendor/src/github.com/docker/engine-api/client/swarm_join.go

@@ -0,0 +1,13 @@
+package client
+
+import (
+	"github.com/docker/engine-api/types/swarm"
+	"golang.org/x/net/context"
+)
+
+// SwarmJoin joins the Swarm.
+func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error {
+	resp, err := cli.post(ctx, "/swarm/join", nil, req, nil)
+	ensureReaderClosed(resp)
+	return err
+}

+ 18 - 0
vendor/src/github.com/docker/engine-api/client/swarm_leave.go

@@ -0,0 +1,18 @@
+package client
+
+import (
+	"net/url"
+
+	"golang.org/x/net/context"
+)
+
+// SwarmLeave leaves the Swarm.
+func (cli *Client) SwarmLeave(ctx context.Context, force bool) error {
+	query := url.Values{}
+	if force {
+		query.Set("force", "1")
+	}
+	resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil)
+	ensureReaderClosed(resp)
+	return err
+}

+ 18 - 0
vendor/src/github.com/docker/engine-api/client/swarm_update.go

@@ -0,0 +1,18 @@
+package client
+
+import (
+	"net/url"
+	"strconv"
+
+	"github.com/docker/engine-api/types/swarm"
+	"golang.org/x/net/context"
+)
+
+// SwarmUpdate updates the Swarm.
+func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec) error {
+	query := url.Values{}
+	query.Set("version", strconv.FormatUint(version.Index, 10))
+	resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil)
+	ensureReaderClosed(resp)
+	return err
+}

+ 34 - 0
vendor/src/github.com/docker/engine-api/client/task_inspect.go

@@ -0,0 +1,34 @@
+package client
+
+import (
+	"bytes"
+	"encoding/json"
+	"io/ioutil"
+	"net/http"
+
+	"github.com/docker/engine-api/types/swarm"
+
+	"golang.org/x/net/context"
+)
+
+// TaskInspectWithRaw returns the task information and its raw representation..
+func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
+	serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
+	if err != nil {
+		if serverResp.statusCode == http.StatusNotFound {
+			return swarm.Task{}, nil, taskNotFoundError{taskID}
+		}
+		return swarm.Task{}, nil, err
+	}
+	defer ensureReaderClosed(serverResp)
+
+	body, err := ioutil.ReadAll(serverResp.body)
+	if err != nil {
+		return swarm.Task{}, nil, err
+	}
+
+	var response swarm.Task
+	rdr := bytes.NewReader(body)
+	err = json.NewDecoder(rdr).Decode(&response)
+	return response, body, err
+}

Some files were not shown because too many files changed in this diff