Merge pull request #42247 from thaJeztah/remove_discovery
Remove deprecated host-discovery and overlay networks with external k/v
This commit is contained in:
commit
0f1d65b2e0
194 changed files with 46 additions and 26296 deletions
|
@ -80,6 +80,9 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
|||
if cli.Config, err = loadDaemonCliConfig(opts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkDeprecatedOptions(cli.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if opts.Validate {
|
||||
// If config wasn't OK we wouldn't have made it this far.
|
||||
|
@ -89,8 +92,6 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
|||
|
||||
configureProxyEnv(cli.Config)
|
||||
|
||||
warnOnDeprecatedConfigOptions(cli.Config)
|
||||
|
||||
if err := configureDaemonLogs(cli.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -465,16 +466,12 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
|
|||
return conf, nil
|
||||
}
|
||||
|
||||
func warnOnDeprecatedConfigOptions(config *config.Config) {
|
||||
if config.ClusterAdvertise != "" {
|
||||
logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`)
|
||||
}
|
||||
if config.ClusterStore != "" {
|
||||
logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`)
|
||||
}
|
||||
if len(config.ClusterOpts) > 0 {
|
||||
logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`)
|
||||
func checkDeprecatedOptions(config *config.Config) error {
|
||||
// Overlay networks with external k/v stores have been deprecated
|
||||
if config.ClusterAdvertise != "" || len(config.ClusterOpts) > 0 || config.ClusterStore != "" {
|
||||
return errors.New("Host-discovery and overlay networks with external k/v stores are deprecated. The 'cluster-advertise', 'cluster-store', and 'cluster-store-opt' options have been removed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func initRouter(opts routerOptions) {
|
||||
|
|
|
@ -48,10 +48,7 @@ func TestLoadDaemonConfigWithNetwork(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLoadDaemonConfigWithMapOptions(t *testing.T) {
|
||||
content := `{
|
||||
"cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"},
|
||||
"log-opts": {"tag": "test"}
|
||||
}`
|
||||
content := `{"log-opts": {"tag": "test"}}`
|
||||
tempFile := fs.NewFile(t, "config", fs.WithContent(content))
|
||||
defer tempFile.Remove()
|
||||
|
||||
|
@ -59,10 +56,6 @@ func TestLoadDaemonConfigWithMapOptions(t *testing.T) {
|
|||
loadedConfig, err := loadDaemonCliConfig(opts)
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, loadedConfig != nil)
|
||||
assert.Check(t, loadedConfig.ClusterOpts != nil)
|
||||
|
||||
expectedPath := "/var/lib/docker/discovery_certs/ca.pem"
|
||||
assert.Check(t, is.Equal(expectedPath, loadedConfig.ClusterOpts["kv.cacertfile"]))
|
||||
assert.Check(t, loadedConfig.LogConfig.Config != nil)
|
||||
assert.Check(t, is.Equal("test", loadedConfig.LogConfig.Config["tag"]))
|
||||
}
|
||||
|
|
|
@ -7,14 +7,11 @@ import (
|
|||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
daemondiscovery "github.com/docker/docker/daemon/discovery"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/authorization"
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -301,27 +298,10 @@ func New() *Config {
|
|||
LogConfig: LogConfig{
|
||||
Config: make(map[string]string),
|
||||
},
|
||||
ClusterOpts: make(map[string]string),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ParseClusterAdvertiseSettings parses the specified advertise settings
|
||||
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
|
||||
if clusterAdvertise == "" {
|
||||
return "", daemondiscovery.ErrDiscoveryDisabled
|
||||
}
|
||||
if clusterStore == "" {
|
||||
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
|
||||
}
|
||||
|
||||
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "discovery advertise parsing failed")
|
||||
}
|
||||
return advertise, nil
|
||||
}
|
||||
|
||||
// GetConflictFreeLabels validates Labels for conflict
|
||||
// In swarm the duplicates for labels are removed
|
||||
// so we only take same values here, no conflict values
|
||||
|
@ -636,21 +616,6 @@ func ValidateMaxDownloadAttempts(config *Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
|
||||
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
|
||||
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
|
||||
return true
|
||||
}
|
||||
|
||||
if (config.ClusterOpts == nil && clusterOpts == nil) ||
|
||||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
|
||||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
|
||||
}
|
||||
|
||||
// GetDefaultRuntimeName returns the current default runtime
|
||||
func (conf *Config) GetDefaultRuntimeName() string {
|
||||
conf.Lock()
|
||||
|
|
|
@ -120,9 +120,6 @@ func (conf *Config) GetResolvConf() string {
|
|||
|
||||
// IsSwarmCompatible defines if swarm mode can be enabled in this config
|
||||
func (conf *Config) IsSwarmCompatible() error {
|
||||
if conf.ClusterStore != "" || conf.ClusterAdvertise != "" {
|
||||
return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode")
|
||||
}
|
||||
if conf.LiveRestoreEnabled {
|
||||
return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode")
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/daemon/discovery"
|
||||
"github.com/docker/docker/libnetwork/ipamutils"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/spf13/pflag"
|
||||
|
@ -38,23 +37,6 @@ func TestDaemonBrokenConfiguration(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseClusterAdvertiseSettings(t *testing.T) {
|
||||
_, err := ParseClusterAdvertiseSettings("something", "")
|
||||
if err != discovery.ErrDiscoveryDisabled {
|
||||
t.Fatalf("expected discovery disabled error, got %v\n", err)
|
||||
}
|
||||
|
||||
_, err = ParseClusterAdvertiseSettings("", "something")
|
||||
if err == nil {
|
||||
t.Fatalf("expected discovery store error, got %v\n", err)
|
||||
}
|
||||
|
||||
_, err = ParseClusterAdvertiseSettings("etcd", "127.0.0.1:8080")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindConfigurationConflicts(t *testing.T) {
|
||||
config := map[string]interface{}{"authorization-plugins": "foobar"}
|
||||
flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
|
||||
|
@ -447,67 +429,6 @@ func TestValidateConfiguration(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestModifiedDiscoverySettings(t *testing.T) {
|
||||
cases := []struct {
|
||||
current *Config
|
||||
modified *Config
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
current: discoveryConfig("foo", "bar", map[string]string{}),
|
||||
modified: discoveryConfig("foo", "bar", map[string]string{}),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}),
|
||||
modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
current: discoveryConfig("foo", "bar", map[string]string{}),
|
||||
modified: discoveryConfig("foo", "bar", nil),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
current: discoveryConfig("foo", "bar", nil),
|
||||
modified: discoveryConfig("foo", "bar", map[string]string{}),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
current: discoveryConfig("foo", "bar", nil),
|
||||
modified: discoveryConfig("baz", "bar", nil),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
current: discoveryConfig("foo", "bar", nil),
|
||||
modified: discoveryConfig("foo", "baz", nil),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
current: discoveryConfig("foo", "bar", nil),
|
||||
modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}),
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
got := ModifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts)
|
||||
if c.expected != got {
|
||||
t.Fatalf("expected %v, got %v: current config %v, new config %v", c.expected, got, c.current, c.modified)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config {
|
||||
return &Config{
|
||||
CommonConfig: CommonConfig{
|
||||
ClusterStore: backendAddr,
|
||||
ClusterAdvertise: advertiseAddr,
|
||||
ClusterOpts: opts,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestReloadSetConfigFileNotExist tests that when `--config-file` is set
|
||||
// and it doesn't exist the `Reload` function returns an error.
|
||||
func TestReloadSetConfigFileNotExist(t *testing.T) {
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -29,7 +28,6 @@ import (
|
|||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/discovery"
|
||||
"github.com/docker/docker/daemon/events"
|
||||
"github.com/docker/docker/daemon/exec"
|
||||
_ "github.com/docker/docker/daemon/graphdriver/register" // register graph drivers
|
||||
|
@ -93,7 +91,6 @@ type Daemon struct {
|
|||
EventsService *events.Events
|
||||
netController libnetwork.NetworkController
|
||||
volumes *volumesservice.VolumesService
|
||||
discoveryWatcher discovery.Reloader
|
||||
root string
|
||||
seccompEnabled bool
|
||||
apparmorEnabled bool
|
||||
|
@ -523,8 +520,6 @@ func (daemon *Daemon) restore() error {
|
|||
}
|
||||
}
|
||||
|
||||
// Make sure networks are available before starting
|
||||
daemon.waitForNetworks(c)
|
||||
if err := daemon.containerStart(c, "", "", true); err != nil {
|
||||
log.WithError(err).Error("failed to start container")
|
||||
}
|
||||
|
@ -629,40 +624,6 @@ func (daemon *Daemon) RestartSwarmContainers() {
|
|||
group.Wait()
|
||||
}
|
||||
|
||||
// waitForNetworks is used during daemon initialization when starting up containers
|
||||
// It ensures that all of a container's networks are available before the daemon tries to start the container.
|
||||
// In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
|
||||
func (daemon *Daemon) waitForNetworks(c *container.Container) {
|
||||
if daemon.discoveryWatcher == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure if the container has a network that requires discovery that the discovery service is available before starting
|
||||
for netName := range c.NetworkSettings.Networks {
|
||||
// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
|
||||
// Most likely this is because the K/V store used for discovery is in a container and needs to be started
|
||||
if _, err := daemon.netController.NetworkByName(netName); err != nil {
|
||||
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
|
||||
// FIXME: why is this slow???
|
||||
dur := 60 * time.Second
|
||||
timer := time.NewTimer(dur)
|
||||
|
||||
logrus.WithField("container", c.ID).Debugf("Container %s waiting for network to be ready", c.Name)
|
||||
select {
|
||||
case <-daemon.discoveryWatcher.ReadyCh():
|
||||
case <-timer.C:
|
||||
}
|
||||
timer.Stop()
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
|
||||
return daemon.linkIndex.children(c)
|
||||
}
|
||||
|
@ -1050,12 +1011,6 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Discovery is only enabled when the daemon is launched with an address to advertise. When
|
||||
// initialized, the daemon is registered and we can store the discovery backend as it's read-only
|
||||
if err := d.initDiscovery(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sysInfo := d.RawSysInfo()
|
||||
for _, w := range sysInfo.Warnings {
|
||||
logrus.Warn(w)
|
||||
|
@ -1390,26 +1345,6 @@ func (daemon *Daemon) IsShuttingDown() bool {
|
|||
return daemon.shutdown
|
||||
}
|
||||
|
||||
// initDiscovery initializes the discovery watcher for this daemon.
|
||||
func (daemon *Daemon) initDiscovery(conf *config.Config) error {
|
||||
advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise)
|
||||
if err != nil {
|
||||
if err == discovery.ErrDiscoveryDisabled {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
conf.ClusterAdvertise = advertise
|
||||
discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("discovery initialization failed (%v)", err)
|
||||
}
|
||||
|
||||
daemon.discoveryWatcher = discoveryWatcher
|
||||
return nil
|
||||
}
|
||||
|
||||
func isBridgeNetworkDisabled(conf *config.Config) bool {
|
||||
return conf.BridgeConfig.Iface == config.DisableNetworkBridge
|
||||
}
|
||||
|
@ -1428,27 +1363,6 @@ func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.Plu
|
|||
dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
|
||||
options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
|
||||
options = append(options, nwconfig.OptionDefaultNetwork(dn))
|
||||
|
||||
if strings.TrimSpace(dconfig.ClusterStore) != "" {
|
||||
kv := strings.Split(dconfig.ClusterStore, "://")
|
||||
if len(kv) != 2 {
|
||||
return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
|
||||
}
|
||||
options = append(options, nwconfig.OptionKVProvider(kv[0]))
|
||||
options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
|
||||
}
|
||||
if len(dconfig.ClusterOpts) > 0 {
|
||||
options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
|
||||
}
|
||||
|
||||
if daemon.discoveryWatcher != nil {
|
||||
options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
|
||||
}
|
||||
|
||||
if dconfig.ClusterAdvertise != "" {
|
||||
options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
|
||||
}
|
||||
|
||||
options = append(options, nwconfig.OptionLabels(dconfig.Labels))
|
||||
options = append(options, driverOptions(dconfig))
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
_ "github.com/docker/docker/pkg/discovery/memory"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/truncindex"
|
||||
volumesservice "github.com/docker/docker/volume/service"
|
||||
|
|
|
@ -250,30 +250,6 @@ func TestParseNNPSecurityOptions(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNetworkOptions(t *testing.T) {
|
||||
daemon := &Daemon{}
|
||||
dconfigCorrect := &config.Config{
|
||||
CommonConfig: config.CommonConfig{
|
||||
ClusterStore: "consul://localhost:8500",
|
||||
ClusterAdvertise: "192.168.0.1:8000",
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := daemon.networkOptions(dconfigCorrect, nil, nil); err != nil {
|
||||
t.Fatalf("Expect networkOptions success, got error: %v", err)
|
||||
}
|
||||
|
||||
dconfigWrong := &config.Config{
|
||||
CommonConfig: config.CommonConfig{
|
||||
ClusterStore: "consul://localhost:8500://test://bbb",
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := daemon.networkOptions(dconfigWrong, nil, nil); err == nil {
|
||||
t.Fatal("Expected networkOptions error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyPlatformContainerResources(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
|
|
|
@ -1,204 +0,0 @@
|
|||
package discovery // import "github.com/docker/docker/daemon/discovery"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
// Register the libkv backends for discovery.
|
||||
_ "github.com/docker/docker/pkg/discovery/kv"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval.
|
||||
defaultDiscoveryHeartbeat = 20 * time.Second
|
||||
// defaultDiscoveryTTLFactor is the default TTL factor for discovery
|
||||
defaultDiscoveryTTLFactor = 3
|
||||
)
|
||||
|
||||
// ErrDiscoveryDisabled is an error returned if the discovery is disabled
|
||||
var ErrDiscoveryDisabled = errors.New("discovery is disabled")
|
||||
|
||||
// Reloader is the discovery reloader of the daemon
|
||||
type Reloader interface {
|
||||
discovery.Watcher
|
||||
Stop()
|
||||
Reload(backend, address string, clusterOpts map[string]string) error
|
||||
ReadyCh() <-chan struct{}
|
||||
}
|
||||
|
||||
type daemonDiscoveryReloader struct {
|
||||
backend discovery.Backend
|
||||
ticker *time.Ticker
|
||||
term chan bool
|
||||
readyCh chan struct{}
|
||||
}
|
||||
|
||||
func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
|
||||
return d.backend.Watch(stopCh)
|
||||
}
|
||||
|
||||
func (d *daemonDiscoveryReloader) ReadyCh() <-chan struct{} {
|
||||
return d.readyCh
|
||||
}
|
||||
|
||||
func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) {
|
||||
var (
|
||||
heartbeat = defaultDiscoveryHeartbeat
|
||||
ttl = defaultDiscoveryTTLFactor * defaultDiscoveryHeartbeat
|
||||
)
|
||||
|
||||
if hb, ok := clusterOpts["discovery.heartbeat"]; ok {
|
||||
h, err := strconv.Atoi(hb)
|
||||
if err != nil {
|
||||
return time.Duration(0), time.Duration(0), err
|
||||
}
|
||||
|
||||
if h <= 0 {
|
||||
return time.Duration(0), time.Duration(0),
|
||||
fmt.Errorf("discovery.heartbeat must be positive")
|
||||
}
|
||||
|
||||
heartbeat = time.Duration(h) * time.Second
|
||||
ttl = defaultDiscoveryTTLFactor * heartbeat
|
||||
}
|
||||
|
||||
if tstr, ok := clusterOpts["discovery.ttl"]; ok {
|
||||
t, err := strconv.Atoi(tstr)
|
||||
if err != nil {
|
||||
return time.Duration(0), time.Duration(0), err
|
||||
}
|
||||
|
||||
if t <= 0 {
|
||||
return time.Duration(0), time.Duration(0),
|
||||
fmt.Errorf("discovery.ttl must be positive")
|
||||
}
|
||||
|
||||
ttl = time.Duration(t) * time.Second
|
||||
|
||||
if _, ok := clusterOpts["discovery.heartbeat"]; !ok {
|
||||
heartbeat = time.Duration(t) * time.Second / time.Duration(defaultDiscoveryTTLFactor)
|
||||
}
|
||||
|
||||
if ttl <= heartbeat {
|
||||
return time.Duration(0), time.Duration(0),
|
||||
fmt.Errorf("discovery.ttl timer must be greater than discovery.heartbeat")
|
||||
}
|
||||
}
|
||||
|
||||
return heartbeat, ttl, nil
|
||||
}
|
||||
|
||||
// Init initializes the nodes discovery subsystem by connecting to the specified backend
|
||||
// and starts a registration loop to advertise the current node under the specified address.
|
||||
func Init(backendAddress, advertiseAddress string, clusterOpts map[string]string) (Reloader, error) {
|
||||
heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reloader := &daemonDiscoveryReloader{
|
||||
backend: backend,
|
||||
ticker: time.NewTicker(heartbeat),
|
||||
term: make(chan bool),
|
||||
readyCh: make(chan struct{}),
|
||||
}
|
||||
// We call Register() on the discovery backend in a loop for the whole lifetime of the daemon,
|
||||
// but we never actually Watch() for nodes appearing and disappearing for the moment.
|
||||
go reloader.advertiseHeartbeat(advertiseAddress)
|
||||
return reloader, nil
|
||||
}
|
||||
|
||||
// advertiseHeartbeat registers the current node against the discovery backend using the specified
|
||||
// address. The function never returns, as registration against the backend comes with a TTL and
|
||||
// requires regular heartbeats.
|
||||
func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) {
|
||||
var ready bool
|
||||
if err := d.initHeartbeat(address); err == nil {
|
||||
ready = true
|
||||
close(d.readyCh)
|
||||
} else {
|
||||
logrus.WithError(err).Debug("First discovery heartbeat failed")
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-d.ticker.C:
|
||||
if err := d.backend.Register(address); err != nil {
|
||||
logrus.Warnf("Registering as %q in discovery failed: %v", address, err)
|
||||
} else {
|
||||
if !ready {
|
||||
close(d.readyCh)
|
||||
ready = true
|
||||
}
|
||||
}
|
||||
case <-d.term:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// initHeartbeat is used to do the first heartbeat. It uses a tight loop until
|
||||
// either the timeout period is reached or the heartbeat is successful and returns.
|
||||
func (d *daemonDiscoveryReloader) initHeartbeat(address string) error {
|
||||
// Setup a short ticker until the first heartbeat has succeeded
|
||||
t := time.NewTicker(500 * time.Millisecond)
|
||||
defer t.Stop()
|
||||
|
||||
// timeout makes sure that after a period of time we stop being so aggressive trying to reach the discovery service
|
||||
timeout := time.NewTimer(60 * time.Second)
|
||||
defer timeout.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timeout.C:
|
||||
return errors.New("timeout waiting for initial discovery")
|
||||
case <-d.term:
|
||||
return errors.New("terminated")
|
||||
case <-t.C:
|
||||
if err := d.backend.Register(address); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address.
|
||||
func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error {
|
||||
d.Stop()
|
||||
|
||||
heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.backend = backend
|
||||
d.ticker = time.NewTicker(heartbeat)
|
||||
d.readyCh = make(chan struct{})
|
||||
|
||||
go d.advertiseHeartbeat(advertiseAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop terminates the discovery advertising.
|
||||
func (d *daemonDiscoveryReloader) Stop() {
|
||||
d.ticker.Stop()
|
||||
d.term <- true
|
||||
}
|
||||
|
||||
func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) {
|
||||
heartbeat, ttl, err := discoveryOpts(clusterOpts)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
return heartbeat, backend, nil
|
||||
}
|
|
@ -1,96 +0,0 @@
|
|||
package discovery // import "github.com/docker/docker/daemon/discovery"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
)
|
||||
|
||||
func TestDiscoveryOptsErrors(t *testing.T) {
|
||||
var testcases = []struct {
|
||||
doc string
|
||||
opts map[string]string
|
||||
}{
|
||||
{
|
||||
doc: "discovery.ttl < discovery.heartbeat",
|
||||
opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"},
|
||||
},
|
||||
{
|
||||
doc: "discovery.ttl == discovery.heartbeat",
|
||||
opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"},
|
||||
},
|
||||
{
|
||||
doc: "negative discovery.heartbeat",
|
||||
opts: map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"},
|
||||
},
|
||||
{
|
||||
doc: "negative discovery.ttl",
|
||||
opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"},
|
||||
},
|
||||
{
|
||||
doc: "invalid discovery.heartbeat",
|
||||
opts: map[string]string{"discovery.heartbeat": "invalid"},
|
||||
},
|
||||
{
|
||||
doc: "invalid discovery.ttl",
|
||||
opts: map[string]string{"discovery.ttl": "invalid"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range testcases {
|
||||
_, _, err := discoveryOpts(testcase.opts)
|
||||
assert.Check(t, is.ErrorContains(err, ""), testcase.doc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiscoveryOpts(t *testing.T) {
|
||||
clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"}
|
||||
heartbeat, ttl, err := discoveryOpts(clusterOpts)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(10*time.Second, heartbeat))
|
||||
assert.Check(t, is.Equal(20*time.Second, ttl))
|
||||
|
||||
clusterOpts = map[string]string{"discovery.heartbeat": "10"}
|
||||
heartbeat, ttl, err = discoveryOpts(clusterOpts)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(10*time.Second, heartbeat))
|
||||
assert.Check(t, is.Equal(10*defaultDiscoveryTTLFactor*time.Second, ttl))
|
||||
|
||||
clusterOpts = map[string]string{"discovery.ttl": "30"}
|
||||
heartbeat, ttl, err = discoveryOpts(clusterOpts)
|
||||
assert.NilError(t, err)
|
||||
|
||||
if ttl != 30*time.Second {
|
||||
t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl)
|
||||
}
|
||||
|
||||
expected := 30 * time.Second / defaultDiscoveryTTLFactor
|
||||
if heartbeat != expected {
|
||||
t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat)
|
||||
}
|
||||
|
||||
discoveryTTL := fmt.Sprintf("%d", defaultDiscoveryTTLFactor-1)
|
||||
clusterOpts = map[string]string{"discovery.ttl": discoveryTTL}
|
||||
heartbeat, _, err = discoveryOpts(clusterOpts)
|
||||
if err == nil && heartbeat == 0 {
|
||||
t.Fatal("discovery.heartbeat must be positive")
|
||||
}
|
||||
|
||||
clusterOpts = map[string]string{}
|
||||
heartbeat, ttl, err = discoveryOpts(clusterOpts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if heartbeat != defaultDiscoveryHeartbeat {
|
||||
t.Fatalf("Heartbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat)
|
||||
}
|
||||
|
||||
expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor
|
||||
if ttl != expected {
|
||||
t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl)
|
||||
}
|
||||
}
|
|
@ -70,7 +70,6 @@ func (daemon *Daemon) SystemInfo() *types.Info {
|
|||
Isolation: daemon.defaultIsolation,
|
||||
}
|
||||
|
||||
daemon.fillClusterInfo(v)
|
||||
daemon.fillAPIInfo(v)
|
||||
// Retrieve platform specific info
|
||||
daemon.fillPlatformInfo(v, sysInfo)
|
||||
|
@ -131,16 +130,6 @@ func (daemon *Daemon) SystemVersion() types.Version {
|
|||
return v
|
||||
}
|
||||
|
||||
func (daemon *Daemon) fillClusterInfo(v *types.Info) {
|
||||
v.ClusterAdvertise = daemon.configStore.ClusterAdvertise
|
||||
v.ClusterStore = daemon.configStore.ClusterStore
|
||||
|
||||
if v.ClusterAdvertise != "" || v.ClusterStore != "" {
|
||||
v.Warnings = append(v.Warnings, `WARNING: node discovery and overlay networks with an external k/v store (cluster-advertise,
|
||||
cluster-store, cluster-store-opt) are deprecated and will be removed in a future release.`)
|
||||
}
|
||||
}
|
||||
|
||||
func (daemon *Daemon) fillDriverInfo(v *types.Info) {
|
||||
switch daemon.graphDriver {
|
||||
case "aufs", "devicemapper", "overlay":
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/discovery"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -63,9 +62,6 @@ func (daemon *Daemon) Reload(conf *config.Config) (err error) {
|
|||
daemon.reloadShutdownTimeout(conf, attributes)
|
||||
daemon.reloadFeatures(conf, attributes)
|
||||
|
||||
if err := daemon.reloadClusterDiscovery(conf, attributes); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := daemon.reloadLabels(conf, attributes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -160,81 +156,6 @@ func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[
|
|||
attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout)
|
||||
}
|
||||
|
||||
// reloadClusterDiscovery updates configuration with cluster discovery options
|
||||
// and updates the passed attributes
|
||||
func (daemon *Daemon) reloadClusterDiscovery(conf *config.Config, attributes map[string]string) (err error) {
|
||||
defer func() {
|
||||
// prepare reload event attributes with updatable configurations
|
||||
attributes["cluster-store"] = conf.ClusterStore
|
||||
attributes["cluster-advertise"] = conf.ClusterAdvertise
|
||||
|
||||
attributes["cluster-store-opts"] = "{}"
|
||||
if daemon.configStore.ClusterOpts != nil {
|
||||
opts, err2 := json.Marshal(conf.ClusterOpts)
|
||||
if err != nil {
|
||||
err = err2
|
||||
}
|
||||
attributes["cluster-store-opts"] = string(opts)
|
||||
}
|
||||
}()
|
||||
|
||||
newAdvertise := conf.ClusterAdvertise
|
||||
newClusterStore := daemon.configStore.ClusterStore
|
||||
if conf.IsValueSet("cluster-advertise") {
|
||||
if conf.IsValueSet("cluster-store") {
|
||||
newClusterStore = conf.ClusterStore
|
||||
}
|
||||
newAdvertise, err = config.ParseClusterAdvertiseSettings(newClusterStore, conf.ClusterAdvertise)
|
||||
if err != nil && err != discovery.ErrDiscoveryDisabled {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if daemon.clusterProvider != nil {
|
||||
if err := conf.IsSwarmCompatible(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// check discovery modifications
|
||||
if !config.ModifiedDiscoverySettings(daemon.configStore, newClusterStore, newAdvertise, conf.ClusterOpts) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// enable discovery for the first time if it was not previously enabled
|
||||
if daemon.discoveryWatcher == nil {
|
||||
discoveryWatcher, err := discovery.Init(newClusterStore, newAdvertise, conf.ClusterOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize discovery: %v", err)
|
||||
}
|
||||
daemon.discoveryWatcher = discoveryWatcher
|
||||
} else if err == discovery.ErrDiscoveryDisabled {
|
||||
// disable discovery if it was previously enabled and it's disabled now
|
||||
daemon.discoveryWatcher.Stop()
|
||||
} else if err = daemon.discoveryWatcher.Reload(conf.ClusterStore, newAdvertise, conf.ClusterOpts); err != nil {
|
||||
// reload discovery
|
||||
return err
|
||||
}
|
||||
|
||||
daemon.configStore.ClusterStore = newClusterStore
|
||||
daemon.configStore.ClusterOpts = conf.ClusterOpts
|
||||
daemon.configStore.ClusterAdvertise = newAdvertise
|
||||
|
||||
if daemon.netController == nil {
|
||||
return nil
|
||||
}
|
||||
netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warn("failed to get options with network controller")
|
||||
return nil
|
||||
}
|
||||
err = daemon.netController.ReloadConfiguration(netOptions...)
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to reload configuration with network controller: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// reloadLabels updates configuration with engine labels
|
||||
// and updates the passed attributes
|
||||
func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) error {
|
||||
|
|
|
@ -2,16 +2,12 @@ package daemon // import "github.com/docker/docker/daemon"
|
|||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/daemon/config"
|
||||
"github.com/docker/docker/daemon/images"
|
||||
"github.com/docker/docker/libnetwork"
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
_ "github.com/docker/docker/pkg/discovery/memory"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gotest.tools/v3/assert"
|
||||
|
@ -341,180 +337,6 @@ func TestDaemonReloadNotAffectOthers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDaemonDiscoveryReload(t *testing.T) {
|
||||
daemon := &Daemon{
|
||||
imageService: images.NewImageService(images.ImageServiceConfig{}),
|
||||
}
|
||||
muteLogs()
|
||||
daemon.configStore = &config.Config{
|
||||
CommonConfig: config.CommonConfig{
|
||||
ClusterStore: "memory://127.0.0.1",
|
||||
ClusterAdvertise: "127.0.0.1:3333",
|
||||
},
|
||||
}
|
||||
|
||||
if err := daemon.initDiscovery(daemon.configStore); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := discovery.Entries{
|
||||
&discovery.Entry{Host: "127.0.0.1", Port: "3333"},
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("timeout waiting for discovery")
|
||||
case <-daemon.discoveryWatcher.ReadyCh():
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
ch, errCh := daemon.discoveryWatcher.Watch(stopCh)
|
||||
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatal("failed to get discovery advertisements in time")
|
||||
case e := <-ch:
|
||||
if !reflect.DeepEqual(e, expected) {
|
||||
t.Fatalf("expected %v, got %v\n", expected, e)
|
||||
}
|
||||
case e := <-errCh:
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
valuesSets := make(map[string]interface{})
|
||||
valuesSets["cluster-store"] = "memory://127.0.0.1:2222"
|
||||
valuesSets["cluster-advertise"] = "127.0.0.1:5555"
|
||||
newConfig := &config.Config{
|
||||
CommonConfig: config.CommonConfig{
|
||||
ClusterStore: "memory://127.0.0.1:2222",
|
||||
ClusterAdvertise: "127.0.0.1:5555",
|
||||
ValuesSet: valuesSets,
|
||||
},
|
||||
}
|
||||
|
||||
expected = discovery.Entries{
|
||||
&discovery.Entry{Host: "127.0.0.1", Port: "5555"},
|
||||
}
|
||||
|
||||
if err := daemon.Reload(newConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("timeout waiting for discovery")
|
||||
case <-daemon.discoveryWatcher.ReadyCh():
|
||||
}
|
||||
|
||||
ch, errCh = daemon.discoveryWatcher.Watch(stopCh)
|
||||
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatal("failed to get discovery advertisements in time")
|
||||
case e := <-ch:
|
||||
if !reflect.DeepEqual(e, expected) {
|
||||
t.Fatalf("expected %v, got %v\n", expected, e)
|
||||
}
|
||||
case e := <-errCh:
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) {
|
||||
daemon := &Daemon{
|
||||
imageService: images.NewImageService(images.ImageServiceConfig{}),
|
||||
}
|
||||
daemon.configStore = &config.Config{}
|
||||
muteLogs()
|
||||
|
||||
valuesSet := make(map[string]interface{})
|
||||
valuesSet["cluster-store"] = "memory://127.0.0.1:2222"
|
||||
valuesSet["cluster-advertise"] = "127.0.0.1:5555"
|
||||
newConfig := &config.Config{
|
||||
CommonConfig: config.CommonConfig{
|
||||
ClusterStore: "memory://127.0.0.1:2222",
|
||||
ClusterAdvertise: "127.0.0.1:5555",
|
||||
ValuesSet: valuesSet,
|
||||
},
|
||||
}
|
||||
|
||||
expected := discovery.Entries{
|
||||
&discovery.Entry{Host: "127.0.0.1", Port: "5555"},
|
||||
}
|
||||
|
||||
if err := daemon.Reload(newConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("timeout waiting for discovery")
|
||||
case <-daemon.discoveryWatcher.ReadyCh():
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
ch, errCh := daemon.discoveryWatcher.Watch(stopCh)
|
||||
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatal("failed to get discovery advertisements in time")
|
||||
case e := <-ch:
|
||||
if !reflect.DeepEqual(e, expected) {
|
||||
t.Fatalf("expected %v, got %v\n", expected, e)
|
||||
}
|
||||
case e := <-errCh:
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) {
|
||||
daemon := &Daemon{
|
||||
imageService: images.NewImageService(images.ImageServiceConfig{}),
|
||||
}
|
||||
daemon.configStore = &config.Config{
|
||||
CommonConfig: config.CommonConfig{
|
||||
ClusterStore: "memory://127.0.0.1",
|
||||
},
|
||||
}
|
||||
valuesSets := make(map[string]interface{})
|
||||
valuesSets["cluster-advertise"] = "127.0.0.1:5555"
|
||||
newConfig := &config.Config{
|
||||
CommonConfig: config.CommonConfig{
|
||||
ClusterAdvertise: "127.0.0.1:5555",
|
||||
ValuesSet: valuesSets,
|
||||
},
|
||||
}
|
||||
expected := discovery.Entries{
|
||||
&discovery.Entry{Host: "127.0.0.1", Port: "5555"},
|
||||
}
|
||||
|
||||
if err := daemon.Reload(newConfig); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-daemon.discoveryWatcher.ReadyCh():
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("Timeout waiting for discovery")
|
||||
}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
ch, errCh := daemon.discoveryWatcher.Watch(stopCh)
|
||||
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatal("failed to get discovery advertisements in time")
|
||||
case e := <-ch:
|
||||
if !reflect.DeepEqual(e, expected) {
|
||||
t.Fatalf("expected %v, got %v\n", expected, e)
|
||||
}
|
||||
case e := <-errCh:
|
||||
t.Fatal(e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) {
|
||||
if os.Getuid() != 0 {
|
||||
t.Skip("root required")
|
||||
|
|
|
@ -601,7 +601,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *testing.T) {
|
|||
// which may happen if it was created with the same IP range.
|
||||
deleteInterface(c, "docker0")
|
||||
|
||||
bridgeName := "external-bridge"
|
||||
bridgeName := "ext-bridge1"
|
||||
bridgeIP := "192.169.1.1/24"
|
||||
_, bridgeIPNet, _ := net.ParseCIDR(bridgeIP)
|
||||
|
||||
|
@ -720,7 +720,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *testing.T) {
|
|||
// which may happen if it was created with the same IP range.
|
||||
deleteInterface(c, "docker0")
|
||||
|
||||
bridgeName := "external-bridge"
|
||||
bridgeName := "ext-bridge2"
|
||||
bridgeIP := "192.169.1.1/24"
|
||||
|
||||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
|
@ -746,7 +746,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr2(c *testing.T) {
|
|||
// which may happen if it was created with the same IP range.
|
||||
deleteInterface(c, "docker0")
|
||||
|
||||
bridgeName := "external-bridge"
|
||||
bridgeName := "ext-bridge3"
|
||||
bridgeIP := "10.2.2.1/16"
|
||||
|
||||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
|
@ -775,7 +775,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCIDREqualBridgeNetwork(c *testi
|
|||
// which may happen if it was created with the same IP range.
|
||||
deleteInterface(c, "docker0")
|
||||
|
||||
bridgeName := "external-bridge"
|
||||
bridgeName := "ext-bridge4"
|
||||
bridgeIP := "172.27.42.1/16"
|
||||
|
||||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
|
@ -840,22 +840,6 @@ func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainer
|
|||
s.d.Restart(c)
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonDefaultNetworkInvalidClusterConfig(c *testing.T) {
|
||||
|
||||
// Start daemon without docker0 bridge
|
||||
defaultNetworkBridge := "docker0"
|
||||
deleteInterface(c, defaultNetworkBridge)
|
||||
|
||||
discoveryBackend := "consul://consuladdr:consulport/some/path"
|
||||
s.d.Start(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend))
|
||||
|
||||
// Start daemon with docker0 bridge
|
||||
result := icmd.RunCommand("ifconfig", defaultNetworkBridge)
|
||||
result.Assert(c, icmd.Success)
|
||||
|
||||
s.d.Restart(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend))
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonIP(c *testing.T) {
|
||||
d := s.d
|
||||
|
||||
|
@ -895,7 +879,7 @@ func (s *DockerDaemonSuite) TestDaemonICCPing(c *testing.T) {
|
|||
// which may happen if it was created with the same IP range.
|
||||
deleteInterface(c, "docker0")
|
||||
|
||||
bridgeName := "external-bridge"
|
||||
bridgeName := "ext-bridge5"
|
||||
bridgeIP := "192.169.1.1/24"
|
||||
|
||||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
|
@ -933,7 +917,7 @@ func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *testing.T) {
|
|||
// which may happen if it was created with the same IP range.
|
||||
deleteInterface(c, "docker0")
|
||||
|
||||
bridgeName := "external-bridge"
|
||||
bridgeName := "ext-bridge6"
|
||||
bridgeIP := "192.169.1.1/24"
|
||||
|
||||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
|
@ -959,7 +943,7 @@ func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *tes
|
|||
// which may happen if it was created with the same IP range.
|
||||
deleteInterface(c, "docker0")
|
||||
|
||||
bridgeName := "external-bridge"
|
||||
bridgeName := "ext-bridge7"
|
||||
bridgeIP := "192.169.1.1/24"
|
||||
|
||||
createInterface(c, "bridge", bridgeName, bridgeIP)
|
||||
|
@ -2229,51 +2213,6 @@ func (s *DockerDaemonSuite) TestDaemonDebugLog(c *testing.T) {
|
|||
assert.Assert(c, strings.Contains(b.String(), debugLog))
|
||||
}
|
||||
|
||||
func (s *DockerDaemonSuite) TestDaemonDiscoveryBackendConfigReload(c *testing.T) {
|
||||
testRequires(c, testEnv.IsLocalDaemon, DaemonIsLinux)
|
||||
|
||||
// daemon config file
|
||||
daemonConfig := `{ "debug" : false }`
|
||||
configFile, err := os.CreateTemp("", "test-daemon-discovery-backend-config-reload-config")
|
||||
assert.Assert(c, err == nil, "could not create temp file for config reload")
|
||||
configFilePath := configFile.Name()
|
||||
defer func() {
|
||||
configFile.Close()
|
||||
os.RemoveAll(configFile.Name())
|
||||
}()
|
||||
|
||||
_, err = configFile.Write([]byte(daemonConfig))
|
||||
assert.NilError(c, err)
|
||||
|
||||
// --log-level needs to be set so that d.Start() doesn't add --debug causing
|
||||
// a conflict with the config
|
||||
s.d.Start(c, "--config-file", configFilePath, "--log-level=info")
|
||||
|
||||
// daemon config file
|
||||
daemonConfig = `{
|
||||
"cluster-store": "consul://consuladdr:consulport/some/path",
|
||||
"cluster-advertise": "192.168.56.100:0",
|
||||
"debug" : false
|
||||
}`
|
||||
|
||||
err = configFile.Truncate(0)
|
||||
assert.NilError(c, err)
|
||||
_, err = configFile.Seek(0, io.SeekStart)
|
||||
assert.NilError(c, err)
|
||||
|
||||
_, err = configFile.Write([]byte(daemonConfig))
|
||||
assert.NilError(c, err)
|
||||
|
||||
err = s.d.ReloadConfig()
|
||||
assert.Assert(c, err == nil, "error reloading daemon config")
|
||||
|
||||
out, err := s.d.Cmd("info")
|
||||
assert.NilError(c, err)
|
||||
|
||||
assert.Assert(c, strings.Contains(out, "Cluster Store: consul://consuladdr:consulport/some/path"))
|
||||
assert.Assert(c, strings.Contains(out, "Cluster Advertise: 192.168.56.100:0"))
|
||||
}
|
||||
|
||||
// Test for #21956
|
||||
func (s *DockerDaemonSuite) TestDaemonLogOptions(c *testing.T) {
|
||||
s.d.StartWithBusybox(c, "--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514")
|
||||
|
|
|
@ -415,9 +415,6 @@ func (s *DockerDaemonSuite) TestDaemonEvents(c *testing.T) {
|
|||
expectedSubstrings := []string{
|
||||
" daemon reload " + info.ID + " ",
|
||||
"(allow-nondistributable-artifacts=[",
|
||||
" cluster-advertise=, ",
|
||||
" cluster-store=, ",
|
||||
" cluster-store-opts=",
|
||||
" debug=true, ",
|
||||
" default-ipc-mode=",
|
||||
" default-runtime=",
|
||||
|
|
|
@ -152,17 +152,11 @@ func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *testing.T) {
|
|||
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
|
||||
d.Stop(c)
|
||||
|
||||
// start a daemon with --cluster-store and --cluster-advertise
|
||||
err := d.StartWithError("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375")
|
||||
// start a daemon with --live-restore
|
||||
err := d.StartWithError("--live-restore")
|
||||
assert.ErrorContains(c, err, "")
|
||||
content, err := d.ReadLogFile()
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, strings.Contains(string(content), "--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode"))
|
||||
// start a daemon with --live-restore
|
||||
err = d.StartWithError("--live-restore")
|
||||
assert.ErrorContains(c, err, "")
|
||||
content, err = d.ReadLogFile()
|
||||
assert.NilError(c, err)
|
||||
assert.Assert(c, strings.Contains(string(content), "--live-restore daemon configuration is incompatible with swarm mode"))
|
||||
// restart for teardown
|
||||
d.StartNode(c)
|
||||
|
|
|
@ -5,16 +5,12 @@ package system // import "github.com/docker/docker/integration/system"
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil/daemon"
|
||||
req "github.com/docker/docker/testutil/request"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
func TestInfoBinaryCommits(t *testing.T) {
|
||||
|
@ -48,72 +44,3 @@ func TestInfoAPIVersioned(t *testing.T) {
|
|||
assert.Check(t, is.Contains(out, "ExecutionDriver"))
|
||||
assert.Check(t, is.Contains(out, "not supported"))
|
||||
}
|
||||
|
||||
// TestInfoDiscoveryBackend verifies that a daemon run with `--cluster-advertise` and
|
||||
// `--cluster-store` properly returns the backend's endpoint in info output.
|
||||
func TestInfoDiscoveryBackend(t *testing.T) {
|
||||
skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon")
|
||||
|
||||
const (
|
||||
discoveryBackend = "consul://consuladdr:consulport/some/path"
|
||||
discoveryAdvertise = "1.1.1.1:2375"
|
||||
)
|
||||
|
||||
d := daemon.New(t)
|
||||
d.Start(t, "--cluster-store="+discoveryBackend, "--cluster-advertise="+discoveryAdvertise)
|
||||
defer d.Stop(t)
|
||||
|
||||
info := d.Info(t)
|
||||
assert.Equal(t, info.ClusterStore, discoveryBackend)
|
||||
assert.Equal(t, info.ClusterAdvertise, discoveryAdvertise)
|
||||
}
|
||||
|
||||
// TestInfoDiscoveryInvalidAdvertise verifies that a daemon run with
|
||||
// an invalid `--cluster-advertise` configuration
|
||||
func TestInfoDiscoveryInvalidAdvertise(t *testing.T) {
|
||||
skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon")
|
||||
d := daemon.New(t)
|
||||
|
||||
// --cluster-advertise with an invalid string is an error
|
||||
err := d.StartWithError("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=invalid")
|
||||
if err == nil {
|
||||
d.Stop(t)
|
||||
}
|
||||
assert.ErrorContains(t, err, "", "expected error when starting daemon")
|
||||
|
||||
// --cluster-advertise without --cluster-store is also an error
|
||||
err = d.StartWithError("--cluster-advertise=1.1.1.1:2375")
|
||||
if err == nil {
|
||||
d.Stop(t)
|
||||
}
|
||||
assert.ErrorContains(t, err, "", "expected error when starting daemon")
|
||||
}
|
||||
|
||||
// TestInfoDiscoveryAdvertiseInterfaceName verifies that a daemon run with `--cluster-advertise`
|
||||
// configured with interface name properly show the advertise ip-address in info output.
|
||||
func TestInfoDiscoveryAdvertiseInterfaceName(t *testing.T) {
|
||||
skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon")
|
||||
skip.If(t, testEnv.IsRootless, "rootless mode has different view of network")
|
||||
// TODO should we check for networking availability (integration-cli suite checks for networking through `Network()`)
|
||||
|
||||
d := daemon.New(t)
|
||||
const (
|
||||
discoveryStore = "consul://consuladdr:consulport/some/path"
|
||||
discoveryInterface = "eth0"
|
||||
)
|
||||
|
||||
d.Start(t, "--cluster-store="+discoveryStore, fmt.Sprintf("--cluster-advertise=%s:2375", discoveryInterface))
|
||||
defer d.Stop(t)
|
||||
|
||||
iface, err := net.InterfaceByName(discoveryInterface)
|
||||
assert.NilError(t, err)
|
||||
addrs, err := iface.Addrs()
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, len(addrs) > 0)
|
||||
ip, _, err := net.ParseCIDR(addrs[0].String())
|
||||
assert.NilError(t, err)
|
||||
|
||||
info := d.Info(t)
|
||||
assert.Equal(t, info.ClusterStore, discoveryStore)
|
||||
assert.Equal(t, info.ClusterAdvertise, ip.String()+":2375")
|
||||
}
|
||||
|
|
|
@ -11,9 +11,7 @@ import (
|
|||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"github.com/docker/docker/libnetwork/osl"
|
||||
"github.com/docker/docker/libnetwork/portallocator"
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/docker/libkv/store"
|
||||
"github.com/pelletier/go-toml"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -27,7 +25,6 @@ const (
|
|||
// Config encapsulates configurations of various Libnetwork components
|
||||
type Config struct {
|
||||
Daemon DaemonCfg
|
||||
Cluster ClusterCfg
|
||||
Scopes map[string]*datastore.ScopeCfg
|
||||
ActiveSandboxes map[string]interface{}
|
||||
PluginGetter plugingetter.PluginGetter
|
||||
|
@ -48,14 +45,6 @@ type DaemonCfg struct {
|
|||
DefaultAddressPool []*ipamutils.NetworkToSplit
|
||||
}
|
||||
|
||||
// ClusterCfg represents cluster configuration
|
||||
type ClusterCfg struct {
|
||||
Watcher discovery.Watcher
|
||||
Address string
|
||||
Discovery string
|
||||
Heartbeat uint64
|
||||
}
|
||||
|
||||
// LoadDefaultScopes loads default scope configs for scopes which
|
||||
// doesn't have explicit user specified configs.
|
||||
func (c *Config) LoadDefaultScopes(dataDir string) {
|
||||
|
@ -144,76 +133,6 @@ func OptionLabels(labels []string) Option {
|
|||
}
|
||||
}
|
||||
|
||||
// OptionKVProvider function returns an option setter for kvstore provider
|
||||
func OptionKVProvider(provider string) Option {
|
||||
return func(c *Config) {
|
||||
logrus.Debugf("Option OptionKVProvider: %s", provider)
|
||||
if _, ok := c.Scopes[datastore.GlobalScope]; !ok {
|
||||
c.Scopes[datastore.GlobalScope] = &datastore.ScopeCfg{}
|
||||
}
|
||||
c.Scopes[datastore.GlobalScope].Client.Provider = strings.TrimSpace(provider)
|
||||
}
|
||||
}
|
||||
|
||||
// OptionKVProviderURL function returns an option setter for kvstore url
|
||||
func OptionKVProviderURL(url string) Option {
|
||||
return func(c *Config) {
|
||||
logrus.Debugf("Option OptionKVProviderURL: %s", url)
|
||||
if _, ok := c.Scopes[datastore.GlobalScope]; !ok {
|
||||
c.Scopes[datastore.GlobalScope] = &datastore.ScopeCfg{}
|
||||
}
|
||||
c.Scopes[datastore.GlobalScope].Client.Address = strings.TrimSpace(url)
|
||||
}
|
||||
}
|
||||
|
||||
// OptionKVOpts function returns an option setter for kvstore options
|
||||
func OptionKVOpts(opts map[string]string) Option {
|
||||
return func(c *Config) {
|
||||
if opts["kv.cacertfile"] != "" && opts["kv.certfile"] != "" && opts["kv.keyfile"] != "" {
|
||||
logrus.Info("Option Initializing KV with TLS")
|
||||
tlsConfig, err := tlsconfig.Client(tlsconfig.Options{
|
||||
CAFile: opts["kv.cacertfile"],
|
||||
CertFile: opts["kv.certfile"],
|
||||
KeyFile: opts["kv.keyfile"],
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Errorf("Unable to set up TLS: %s", err)
|
||||
return
|
||||
}
|
||||
if _, ok := c.Scopes[datastore.GlobalScope]; !ok {
|
||||
c.Scopes[datastore.GlobalScope] = &datastore.ScopeCfg{}
|
||||
}
|
||||
if c.Scopes[datastore.GlobalScope].Client.Config == nil {
|
||||
c.Scopes[datastore.GlobalScope].Client.Config = &store.Config{TLS: tlsConfig}
|
||||
} else {
|
||||
c.Scopes[datastore.GlobalScope].Client.Config.TLS = tlsConfig
|
||||
}
|
||||
// Workaround libkv/etcd bug for https
|
||||
c.Scopes[datastore.GlobalScope].Client.Config.ClientTLS = &store.ClientTLSConfig{
|
||||
CACertFile: opts["kv.cacertfile"],
|
||||
CertFile: opts["kv.certfile"],
|
||||
KeyFile: opts["kv.keyfile"],
|
||||
}
|
||||
} else {
|
||||
logrus.Info("Option Initializing KV without TLS")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OptionDiscoveryWatcher function returns an option setter for discovery watcher
|
||||
func OptionDiscoveryWatcher(watcher discovery.Watcher) Option {
|
||||
return func(c *Config) {
|
||||
c.Cluster.Watcher = watcher
|
||||
}
|
||||
}
|
||||
|
||||
// OptionDiscoveryAddress function returns an option setter for self discovery address
|
||||
func OptionDiscoveryAddress(address string) Option {
|
||||
return func(c *Config) {
|
||||
c.Cluster.Address = address
|
||||
}
|
||||
}
|
||||
|
||||
// OptionDataDir function returns an option setter for data folder
|
||||
func OptionDataDir(dataDir string) Option {
|
||||
return func(c *Config) {
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/libnetwork/datastore"
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
)
|
||||
|
||||
|
@ -54,90 +52,3 @@ func TestValidName(t *testing.T) {
|
|||
t.Fatal("Name validation succeeds for a case when it is expected to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTLSConfiguration(t *testing.T) {
|
||||
cert := `-----BEGIN CERTIFICATE-----
|
||||
MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT
|
||||
B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD
|
||||
VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC
|
||||
O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds
|
||||
+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q
|
||||
V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb
|
||||
UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55
|
||||
Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT
|
||||
V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/
|
||||
BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j
|
||||
BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz
|
||||
7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI
|
||||
xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M
|
||||
ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY
|
||||
8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn
|
||||
t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX
|
||||
FpTxDmJHEV4bzUzh
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
key := `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4
|
||||
+zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR
|
||||
SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr
|
||||
pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe
|
||||
rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj
|
||||
xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj
|
||||
i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx
|
||||
qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO
|
||||
1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5
|
||||
5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony
|
||||
MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0
|
||||
ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP
|
||||
L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N
|
||||
XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT
|
||||
Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B
|
||||
LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU
|
||||
t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+
|
||||
QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV
|
||||
xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj
|
||||
xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc
|
||||
qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa
|
||||
V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV
|
||||
PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk
|
||||
dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL
|
||||
BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
`
|
||||
certFile, err := os.CreateTemp("", "cert")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup temp file: %s", err)
|
||||
}
|
||||
defer os.Remove(certFile.Name())
|
||||
certFile.Write([]byte(cert))
|
||||
certFile.Close()
|
||||
keyFile, err := os.CreateTemp("", "key")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup temp file: %s", err)
|
||||
}
|
||||
defer os.Remove(keyFile.Name())
|
||||
keyFile.Write([]byte(key))
|
||||
keyFile.Close()
|
||||
|
||||
c := &Config{Scopes: map[string]*datastore.ScopeCfg{}}
|
||||
l := map[string]string{
|
||||
"kv.cacertfile": certFile.Name(),
|
||||
"kv.certfile": certFile.Name(),
|
||||
"kv.keyfile": keyFile.Name(),
|
||||
}
|
||||
f := OptionKVOpts(l)
|
||||
f(c)
|
||||
if _, ok := c.Scopes[datastore.GlobalScope]; !ok {
|
||||
t.Fatal("GlobalScope not established")
|
||||
}
|
||||
|
||||
if c.Scopes[datastore.GlobalScope].Client.Config.TLS == nil {
|
||||
t.Fatal("TLS is nil")
|
||||
}
|
||||
if c.Scopes[datastore.GlobalScope].Client.Config.TLS.RootCAs == nil {
|
||||
t.Fatal("TLS.RootCAs is nil")
|
||||
}
|
||||
if len(c.Scopes[datastore.GlobalScope].Client.Config.TLS.Certificates) != 1 {
|
||||
t.Fatal("TLS.Certificates is not length 1")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,6 +7,3 @@ title = "LibNetwork Configuration file"
|
|||
Address = "Cluster-wide reachable Host IP"
|
||||
[datastore]
|
||||
embedded = false
|
||||
[datastore.client]
|
||||
provider = "consul"
|
||||
Address = "localhost:8500"
|
||||
|
|
|
@ -59,13 +59,11 @@ import (
|
|||
"github.com/docker/docker/libnetwork/discoverapi"
|
||||
"github.com/docker/docker/libnetwork/driverapi"
|
||||
"github.com/docker/docker/libnetwork/drvregistry"
|
||||
"github.com/docker/docker/libnetwork/hostdiscovery"
|
||||
"github.com/docker/docker/libnetwork/ipamapi"
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"github.com/docker/docker/libnetwork/options"
|
||||
"github.com/docker/docker/libnetwork/osl"
|
||||
"github.com/docker/docker/libnetwork/types"
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
|
@ -161,7 +159,6 @@ type controller struct {
|
|||
sandboxes sandboxTable
|
||||
cfg *config.Config
|
||||
stores []datastore.DataStore
|
||||
discovery hostdiscovery.HostDiscovery
|
||||
extKeyListener net.Listener
|
||||
watchCh chan *endpoint
|
||||
unWatchCh chan *endpoint
|
||||
|
@ -228,14 +225,6 @@ func New(cfgOptions ...config.Option) (NetworkController, error) {
|
|||
|
||||
c.drvRegistry = drvRegistry
|
||||
|
||||
if c.cfg != nil && c.cfg.Cluster.Watcher != nil {
|
||||
if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil {
|
||||
// Failing to initialize discovery is a bad situation to be in.
|
||||
// But it cannot fail creating the Controller
|
||||
logrus.Errorf("Failed to Initialize Discovery : %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
c.WalkNetworks(populateSpecial)
|
||||
|
||||
// Reserve pools first before doing cleanup. Otherwise the
|
||||
|
@ -518,13 +507,6 @@ func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error {
|
|||
}
|
||||
return false
|
||||
})
|
||||
|
||||
if c.discovery == nil && c.cfg.Cluster.Watcher != nil {
|
||||
if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil {
|
||||
logrus.Errorf("Failed to Initialize Discovery after configuration update: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -554,40 +536,6 @@ func (c *controller) BuiltinIPAMDrivers() []string {
|
|||
return drivers
|
||||
}
|
||||
|
||||
func (c *controller) clusterHostID() string {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if c.cfg == nil || c.cfg.Cluster.Address == "" {
|
||||
return ""
|
||||
}
|
||||
addr := strings.Split(c.cfg.Cluster.Address, ":")
|
||||
return addr[0]
|
||||
}
|
||||
|
||||
func (c *controller) initDiscovery(watcher discovery.Watcher) error {
|
||||
if c.cfg == nil {
|
||||
return fmt.Errorf("discovery initialization requires a valid configuration")
|
||||
}
|
||||
|
||||
c.discovery = hostdiscovery.NewHostDiscovery(watcher)
|
||||
return c.discovery.Watch(c.activeCallback, c.hostJoinCallback, c.hostLeaveCallback)
|
||||
}
|
||||
|
||||
func (c *controller) activeCallback() {
|
||||
ds := c.getStore(datastore.GlobalScope)
|
||||
if ds != nil && !ds.Active() {
|
||||
ds.RestartWatch()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controller) hostJoinCallback(nodes []net.IP) {
|
||||
c.processNodeDiscovery(nodes, true)
|
||||
}
|
||||
|
||||
func (c *controller) hostLeaveCallback(nodes []net.IP) {
|
||||
c.processNodeDiscovery(nodes, false)
|
||||
}
|
||||
|
||||
func (c *controller) processNodeDiscovery(nodes []net.IP, add bool) {
|
||||
c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool {
|
||||
c.pushNodeDiscovery(driver, capability, nodes, add)
|
||||
|
@ -597,15 +545,9 @@ func (c *controller) processNodeDiscovery(nodes []net.IP, add bool) {
|
|||
|
||||
func (c *controller) pushNodeDiscovery(d driverapi.Driver, cap driverapi.Capability, nodes []net.IP, add bool) {
|
||||
var self net.IP
|
||||
if c.cfg != nil {
|
||||
addr := strings.Split(c.cfg.Cluster.Address, ":")
|
||||
self = net.ParseIP(addr[0])
|
||||
// if external kvstore is not configured, try swarm-mode config
|
||||
if self == nil {
|
||||
if agent := c.getAgent(); agent != nil {
|
||||
self = net.ParseIP(agent.advertiseAddr)
|
||||
}
|
||||
}
|
||||
// try swarm-mode config
|
||||
if agent := c.getAgent(); agent != nil {
|
||||
self = net.ParseIP(agent.advertiseAddr)
|
||||
}
|
||||
|
||||
if d == nil || cap.ConnectivityScope != datastore.GlobalScope || nodes == nil {
|
||||
|
@ -662,14 +604,6 @@ func (c *controller) GetPluginGetter() plugingetter.PluginGetter {
|
|||
}
|
||||
|
||||
func (c *controller) RegisterDriver(networkType string, driver driverapi.Driver, capability driverapi.Capability) error {
|
||||
c.Lock()
|
||||
hd := c.discovery
|
||||
c.Unlock()
|
||||
|
||||
if hd != nil {
|
||||
c.pushNodeDiscovery(driver, capability, hd.Fetch(), true)
|
||||
}
|
||||
|
||||
c.agentDriverNotify(driver)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ type ScopeClientCfg struct {
|
|||
const (
|
||||
// LocalScope indicates to store the KV object in local datastore such as boltdb
|
||||
LocalScope = "local"
|
||||
// GlobalScope indicates to store the KV object in global datastore such as consul/etcd/zookeeper
|
||||
// GlobalScope indicates to store the KV object in global datastore
|
||||
GlobalScope = "global"
|
||||
// SwarmScope is not indicating a datastore location. It is defined here
|
||||
// along with the other two scopes just for consistency.
|
||||
|
|
|
@ -1,153 +0,0 @@
|
|||
# Overlay Driver
|
||||
|
||||
### Design
|
||||
TODO
|
||||
|
||||
### Multi-Host Overlay Driver Quick Start
|
||||
|
||||
This example is to provision two Docker Hosts with the **experimental** Libnetwork overlay network driver.
|
||||
|
||||
### Pre-Requisites
|
||||
|
||||
- Kernel >= 3.16
|
||||
- Experimental Docker client
|
||||
|
||||
### Install Docker Experimental
|
||||
|
||||
Follow Docker experimental installation instructions at: [https://github.com/docker/docker/tree/master/experimental](https://github.com/docker/docker/tree/master/experimental)
|
||||
|
||||
To ensure you are running the experimental Docker branch, check the version and look for the experimental tag:
|
||||
|
||||
```
|
||||
$ docker -v
|
||||
Docker version 1.8.0-dev, build f39b9a0, experimental
|
||||
```
|
||||
|
||||
### Install and Bootstrap K/V Store
|
||||
|
||||
|
||||
Multi-host networking uses a pluggable Key-Value store backend to distribute states using `libkv`.
|
||||
`libkv` supports multiple pluggable backends such as `consul`, `etcd` & `zookeeper` (more to come).
|
||||
|
||||
In this example we will use `consul`
|
||||
|
||||
Install:
|
||||
|
||||
```
|
||||
$ curl -OL https://dl.bintray.com/mitchellh/consul/0.5.2_linux_amd64.zip
|
||||
$ unzip 0.5.2_linux_amd64.zip
|
||||
$ mv consul /usr/local/bin/
|
||||
```
|
||||
|
||||
**host-1** Start Consul as a server in bootstrap mode:
|
||||
|
||||
```
|
||||
$ consul agent -server -bootstrap -data-dir /tmp/consul -bind=<host-1-ip-address>
|
||||
```
|
||||
|
||||
**host-2** Start the Consul agent:
|
||||
|
||||
```
|
||||
$ consul agent -data-dir /tmp/consul -bind=<host-2-ip-address>
|
||||
$ consul join <host-1-ip-address>
|
||||
```
|
||||
|
||||
|
||||
### Start the Docker Daemon with the Network Driver Daemon Flags
|
||||
|
||||
**host-1** Docker daemon:
|
||||
|
||||
```
|
||||
$ docker -d --kv-store=consul:localhost:8500 --label=com.docker.network.driver.overlay.bind_interface=eth0
|
||||
```
|
||||
|
||||
**host-2** Start the Docker Daemon with the neighbor ID configuration:
|
||||
|
||||
```
|
||||
$ docker -d --kv-store=consul:localhost:8500 --label=com.docker.network.driver.overlay.bind_interface=eth0 --label=com.docker.network.driver.overlay.neighbor_ip=<host-1-ip-address>
|
||||
```
|
||||
|
||||
### QuickStart Containers Attached to a Network
|
||||
|
||||
**host-1** Start a container that publishes a service svc1 in the network dev that is managed by overlay driver.
|
||||
|
||||
```
|
||||
$ docker run -i -t --publish-service=svc1.dev.overlay debian
|
||||
root@21578ff721a9:/# ip add show eth0
|
||||
34: eth0: <BROADCAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
|
||||
link/ether 02:42:ec:41:35:bf brd ff:ff:ff:ff:ff:ff
|
||||
inet 172.21.0.16/16 scope global eth0
|
||||
valid_lft forever preferred_lft forever
|
||||
inet6 fe80::42:ecff:fe41:35bf/64 scope link
|
||||
valid_lft forever preferred_lft forever
|
||||
```
|
||||
|
||||
**host-2** Start a container that publishes a service svc2 in the network dev that is managed by overlay driver.
|
||||
|
||||
```
|
||||
$ docker run -i -t --publish-service=svc2.dev.overlay debian
|
||||
root@d217828eb876:/# ping svc1
|
||||
PING svc1 (172.21.0.16): 56 data bytes
|
||||
64 bytes from 172.21.0.16: icmp_seq=0 ttl=64 time=0.706 ms
|
||||
64 bytes from 172.21.0.16: icmp_seq=1 ttl=64 time=0.687 ms
|
||||
64 bytes from 172.21.0.16: icmp_seq=2 ttl=64 time=0.841 ms
|
||||
```
|
||||
### Detailed Setup
|
||||
|
||||
You can also setup networks and services and then attach a running container to them.
|
||||
|
||||
**host-1**:
|
||||
|
||||
```
|
||||
docker network create -d overlay prod
|
||||
docker network ls
|
||||
docker network info prod
|
||||
docker service publish db1.prod
|
||||
cid=$(docker run -itd -p 8000:8000 ubuntu)
|
||||
docker service attach $cid db1.prod
|
||||
```
|
||||
|
||||
**host-2**:
|
||||
|
||||
```
|
||||
docker network ls
|
||||
docker network info prod
|
||||
docker service publish db2.prod
|
||||
cid=$(docker run -itd -p 8000:8000 ubuntu)
|
||||
docker service attach $cid db2.prod
|
||||
```
|
||||
|
||||
Once a container is started, a container on `host-1` and `host-2` both containers should be able to ping one another via IP, service name, \<service name>.\<network name>
|
||||
|
||||
|
||||
View information about the networks and services using `ls` and `info` subcommands like so:
|
||||
|
||||
```
|
||||
$ docker service ls
|
||||
SERVICE ID NAME NETWORK CONTAINER
|
||||
0771deb5f84b db2 prod 0e54a527f22c
|
||||
aea23b224acf db1 prod 4b0a309ca311
|
||||
|
||||
$ docker network info prod
|
||||
Network Id: 5ac68be2518959b48ad102e9ec3d8f42fb2ec72056aa9592eb5abd0252203012
|
||||
Name: prod
|
||||
Type: overlay
|
||||
|
||||
$ docker service info db1.prod
|
||||
Service Id: aea23b224acfd2da9b893870e0d632499188a1a4b3881515ba042928a9d3f465
|
||||
Name: db1
|
||||
Network: prod
|
||||
```
|
||||
|
||||
To detach and unpublish a service:
|
||||
|
||||
```
|
||||
$ docker service detach $cid <service>.<network>
|
||||
$ docker service unpublish <service>.<network>
|
||||
|
||||
# Example:
|
||||
$ docker service detach $cid db2.prod
|
||||
$ docker service unpublish db2.prod
|
||||
```
|
||||
|
||||
To reiterate, this is experimental, and will be under active development.
|
|
@ -8,23 +8,24 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/docker/docker/libnetwork/datastore"
|
||||
"github.com/docker/docker/libnetwork/discoverapi"
|
||||
"github.com/docker/docker/libnetwork/driverapi"
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"github.com/docker/docker/pkg/plugingetter"
|
||||
"github.com/docker/libkv/store/consul"
|
||||
"github.com/docker/libkv/store"
|
||||
"github.com/docker/libkv/store/boltdb"
|
||||
"github.com/vishvananda/netlink/nl"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func init() {
|
||||
consul.Register()
|
||||
boltdb.Register()
|
||||
}
|
||||
|
||||
type driverTester struct {
|
||||
|
@ -37,10 +38,25 @@ const testNetworkType = "overlay"
|
|||
func setupDriver(t *testing.T) *driverTester {
|
||||
dt := &driverTester{t: t}
|
||||
config := make(map[string]interface{})
|
||||
|
||||
tmp, err := os.CreateTemp(t.TempDir(), "libnetwork-")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating temp file: %v", err)
|
||||
}
|
||||
err = tmp.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Error closing temp file: %v", err)
|
||||
}
|
||||
defaultPrefix := filepath.Join(os.TempDir(), "libnetwork", "test", "overlay")
|
||||
|
||||
config[netlabel.GlobalKVClient] = discoverapi.DatastoreConfigData{
|
||||
Scope: datastore.GlobalScope,
|
||||
Provider: "consul",
|
||||
Address: "127.0.0.01:8500",
|
||||
Provider: "boltdb",
|
||||
Address: filepath.Join(defaultPrefix, filepath.Base(tmp.Name())),
|
||||
Config: &store.Config{
|
||||
Bucket: "libnetwork",
|
||||
ConnectionTimeout: 3 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
if err := Init(dt, config); err != nil {
|
||||
|
|
|
@ -54,7 +54,6 @@ type endpoint struct {
|
|||
iface *endpointInterface
|
||||
joinInfo *endpointJoinInfo
|
||||
sandboxID string
|
||||
locator string
|
||||
exposedPorts []types.TransportPort
|
||||
anonymous bool
|
||||
disableResolution bool
|
||||
|
@ -90,7 +89,6 @@ func (ep *endpoint) MarshalJSON() ([]byte, error) {
|
|||
epMap["generic"] = ep.generic
|
||||
}
|
||||
epMap["sandbox"] = ep.sandboxID
|
||||
epMap["locator"] = ep.locator
|
||||
epMap["anonymous"] = ep.anonymous
|
||||
epMap["disableResolution"] = ep.disableResolution
|
||||
epMap["myAliases"] = ep.myAliases
|
||||
|
@ -190,9 +188,6 @@ func (ep *endpoint) UnmarshalJSON(b []byte) (err error) {
|
|||
if v, ok := epMap["disableResolution"]; ok {
|
||||
ep.disableResolution = v.(bool)
|
||||
}
|
||||
if l, ok := epMap["locator"]; ok {
|
||||
ep.locator = l.(string)
|
||||
}
|
||||
|
||||
if sn, ok := epMap["svcName"]; ok {
|
||||
ep.svcName = sn.(string)
|
||||
|
@ -239,7 +234,6 @@ func (ep *endpoint) CopyTo(o datastore.KVObject) error {
|
|||
dstEp.name = ep.name
|
||||
dstEp.id = ep.id
|
||||
dstEp.sandboxID = ep.sandboxID
|
||||
dstEp.locator = ep.locator
|
||||
dstEp.dbIndex = ep.dbIndex
|
||||
dstEp.dbExists = ep.dbExists
|
||||
dstEp.anonymous = ep.anonymous
|
||||
|
|
|
@ -1,122 +0,0 @@
|
|||
package hostdiscovery
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
|
||||
// Including KV
|
||||
"github.com/docker/docker/libnetwork/types"
|
||||
_ "github.com/docker/docker/pkg/discovery/kv" // register all the things with host discovery
|
||||
"github.com/docker/libkv/store/consul"
|
||||
"github.com/docker/libkv/store/etcd"
|
||||
"github.com/docker/libkv/store/zookeeper"
|
||||
)
|
||||
|
||||
type hostDiscovery struct {
|
||||
watcher discovery.Watcher
|
||||
nodes mapset.Set
|
||||
stopChan chan struct{}
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func init() {
|
||||
consul.Register()
|
||||
etcd.Register()
|
||||
zookeeper.Register()
|
||||
}
|
||||
|
||||
// NewHostDiscovery function creates a host discovery object
|
||||
func NewHostDiscovery(watcher discovery.Watcher) HostDiscovery {
|
||||
return &hostDiscovery{watcher: watcher, nodes: mapset.NewSet(), stopChan: make(chan struct{})}
|
||||
}
|
||||
|
||||
func (h *hostDiscovery) Watch(activeCallback ActiveCallback, joinCallback JoinCallback, leaveCallback LeaveCallback) error {
|
||||
h.Lock()
|
||||
d := h.watcher
|
||||
h.Unlock()
|
||||
if d == nil {
|
||||
return types.BadRequestErrorf("invalid discovery watcher")
|
||||
}
|
||||
discoveryCh, errCh := d.Watch(h.stopChan)
|
||||
go h.monitorDiscovery(discoveryCh, errCh, activeCallback, joinCallback, leaveCallback)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *hostDiscovery) monitorDiscovery(ch <-chan discovery.Entries, errCh <-chan error,
|
||||
activeCallback ActiveCallback, joinCallback JoinCallback, leaveCallback LeaveCallback) {
|
||||
for {
|
||||
select {
|
||||
case entries := <-ch:
|
||||
h.processCallback(entries, activeCallback, joinCallback, leaveCallback)
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
logrus.Errorf("discovery error: %v", err)
|
||||
}
|
||||
case <-h.stopChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hostDiscovery) StopDiscovery() error {
|
||||
h.Lock()
|
||||
stopChan := h.stopChan
|
||||
h.watcher = nil
|
||||
h.Unlock()
|
||||
|
||||
close(stopChan)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *hostDiscovery) processCallback(entries discovery.Entries,
|
||||
activeCallback ActiveCallback, joinCallback JoinCallback, leaveCallback LeaveCallback) {
|
||||
updated := hosts(entries)
|
||||
h.Lock()
|
||||
existing := h.nodes
|
||||
added, removed := diff(existing, updated)
|
||||
h.nodes = updated
|
||||
h.Unlock()
|
||||
|
||||
activeCallback()
|
||||
if len(added) > 0 {
|
||||
joinCallback(added)
|
||||
}
|
||||
if len(removed) > 0 {
|
||||
leaveCallback(removed)
|
||||
}
|
||||
}
|
||||
|
||||
func diff(existing mapset.Set, updated mapset.Set) (added []net.IP, removed []net.IP) {
|
||||
addSlice := updated.Difference(existing).ToSlice()
|
||||
removeSlice := existing.Difference(updated).ToSlice()
|
||||
for _, ip := range addSlice {
|
||||
added = append(added, net.ParseIP(ip.(string)))
|
||||
}
|
||||
for _, ip := range removeSlice {
|
||||
removed = append(removed, net.ParseIP(ip.(string)))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (h *hostDiscovery) Fetch() []net.IP {
|
||||
h.Lock()
|
||||
defer h.Unlock()
|
||||
ips := []net.IP{}
|
||||
for _, ipstr := range h.nodes.ToSlice() {
|
||||
ips = append(ips, net.ParseIP(ipstr.(string)))
|
||||
}
|
||||
return ips
|
||||
}
|
||||
|
||||
func hosts(entries discovery.Entries) mapset.Set {
|
||||
hosts := mapset.NewSet()
|
||||
for _, entry := range entries {
|
||||
hosts.Add(entry.Host)
|
||||
}
|
||||
return hosts
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
package hostdiscovery
|
||||
|
||||
import "net"
|
||||
|
||||
// JoinCallback provides a callback event for new node joining the cluster
|
||||
type JoinCallback func(entries []net.IP)
|
||||
|
||||
// ActiveCallback provides a callback event for active discovery event
|
||||
type ActiveCallback func()
|
||||
|
||||
// LeaveCallback provides a callback event for node leaving the cluster
|
||||
type LeaveCallback func(entries []net.IP)
|
||||
|
||||
// HostDiscovery primary interface
|
||||
type HostDiscovery interface {
|
||||
//Watch Node join and leave cluster events
|
||||
Watch(activeCallback ActiveCallback, joinCallback JoinCallback, leaveCallback LeaveCallback) error
|
||||
// StopDiscovery stops the discovery process
|
||||
StopDiscovery() error
|
||||
// Fetch returns a list of host IPs that are currently discovered
|
||||
Fetch() []net.IP
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
package hostdiscovery
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
mapset "github.com/deckarep/golang-set"
|
||||
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
)
|
||||
|
||||
func TestDiff(t *testing.T) {
|
||||
existing := mapset.NewSetFromSlice([]interface{}{"1.1.1.1", "2.2.2.2"})
|
||||
addedIP := "3.3.3.3"
|
||||
updated := existing.Clone()
|
||||
updated.Add(addedIP)
|
||||
|
||||
added, removed := diff(existing, updated)
|
||||
if len(added) != 1 {
|
||||
t.Fatalf("Diff failed for an Add update. Expecting 1 element, but got %d elements", len(added))
|
||||
}
|
||||
if added[0].String() != addedIP {
|
||||
t.Fatalf("Expecting : %v, Got : %v", addedIP, added[0])
|
||||
}
|
||||
if len(removed) > 0 {
|
||||
t.Fatalf("Diff failed for remove use-case. Expecting 0 element, but got %d elements", len(removed))
|
||||
}
|
||||
|
||||
updated = mapset.NewSetFromSlice([]interface{}{addedIP})
|
||||
added, removed = diff(existing, updated)
|
||||
if len(removed) != 2 {
|
||||
t.Fatalf("Diff failed for a remove update. Expecting 2 element, but got %d elements", len(removed))
|
||||
}
|
||||
if len(added) != 1 {
|
||||
t.Fatalf("Diff failed for add use-case. Expecting 1 element, but got %d elements", len(added))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddedCallback(t *testing.T) {
|
||||
hd := hostDiscovery{}
|
||||
hd.nodes = mapset.NewSetFromSlice([]interface{}{"1.1.1.1"})
|
||||
update := []*discovery.Entry{{Host: "1.1.1.1", Port: "0"}, {Host: "2.2.2.2", Port: "0"}}
|
||||
|
||||
added := false
|
||||
removed := false
|
||||
hd.processCallback(update, func() {}, func(hosts []net.IP) { added = true }, func(hosts []net.IP) { removed = true })
|
||||
if !added {
|
||||
t.Fatal("Expecting an Added callback notification. But none received")
|
||||
}
|
||||
if removed {
|
||||
t.Fatal("Not expecting a Removed callback notification. But received a callback")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovedCallback(t *testing.T) {
|
||||
hd := hostDiscovery{}
|
||||
hd.nodes = mapset.NewSetFromSlice([]interface{}{"1.1.1.1", "2.2.2.2"})
|
||||
update := []*discovery.Entry{{Host: "1.1.1.1", Port: "0"}}
|
||||
|
||||
added := false
|
||||
removed := false
|
||||
hd.processCallback(update, func() {}, func(hosts []net.IP) { added = true }, func(hosts []net.IP) { removed = true })
|
||||
if added {
|
||||
t.Fatal("Not expecting an Added callback notification. But received a callback")
|
||||
}
|
||||
if !removed {
|
||||
t.Fatal("Expecting a Removed callback notification. But none received")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoCallback(t *testing.T) {
|
||||
hd := hostDiscovery{}
|
||||
hd.nodes = mapset.NewSetFromSlice([]interface{}{"1.1.1.1", "2.2.2.2"})
|
||||
update := []*discovery.Entry{{Host: "1.1.1.1", Port: "0"}, {Host: "2.2.2.2", Port: "0"}}
|
||||
|
||||
added := false
|
||||
removed := false
|
||||
hd.processCallback(update, func() {}, func(hosts []net.IP) { added = true }, func(hosts []net.IP) { removed = true })
|
||||
if added || removed {
|
||||
t.Fatal("Not expecting any callback notification. But received a callback")
|
||||
}
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
title = "LibNetwork Configuration file"
|
||||
|
||||
[cluster]
|
||||
discovery = "consul://localhost:8500"
|
||||
Address = "6.5.5.5"
|
||||
Heartbeat = 3
|
|
@ -1175,7 +1175,6 @@ func (n *network) createEndpoint(name string, options ...EndpointOption) (Endpoi
|
|||
// Initialize ep.network with a possibly stale copy of n. We need this to get network from
|
||||
// store. But once we get it from store we will have the most uptodate copy possibly.
|
||||
ep.network = n
|
||||
ep.locator = n.getController().clusterHostID()
|
||||
ep.network, err = ep.getNetworkFromStore()
|
||||
if err != nil {
|
||||
logrus.Errorf("failed to get network during CreateEndpoint: %v", err)
|
||||
|
|
|
@ -6,16 +6,10 @@ import (
|
|||
|
||||
"github.com/docker/docker/libnetwork/datastore"
|
||||
"github.com/docker/libkv/store/boltdb"
|
||||
"github.com/docker/libkv/store/consul"
|
||||
"github.com/docker/libkv/store/etcd"
|
||||
"github.com/docker/libkv/store/zookeeper"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func registerKVStores() {
|
||||
consul.Register()
|
||||
zookeeper.Register()
|
||||
etcd.Register()
|
||||
boltdb.Register()
|
||||
}
|
||||
|
||||
|
|
|
@ -40,23 +40,6 @@ function net_disconnect() {
|
|||
dnet_cmd $(inst_id2port ${1}) service unpublish ${2}.${3}
|
||||
}
|
||||
|
||||
function start_consul() {
|
||||
stop_consul
|
||||
docker run -d \
|
||||
--name=pr_consul \
|
||||
-p 8500:8500 \
|
||||
-p 8300-8302:8300-8302/tcp \
|
||||
-p 8300-8302:8300-8302/udp \
|
||||
-h consul \
|
||||
progrium/consul -server -bootstrap
|
||||
sleep 2
|
||||
}
|
||||
|
||||
function stop_consul() {
|
||||
echo "consul started"
|
||||
docker rm -f pr_consul || true
|
||||
}
|
||||
|
||||
hrun() {
|
||||
local e E T oldIFS
|
||||
[[ ! "$-" =~ e ]] || e=1
|
||||
|
@ -149,13 +132,6 @@ function start_dnet() {
|
|||
# Try discovery URLs with or without path
|
||||
neigh_ip=""
|
||||
neighbors=""
|
||||
if [ "$store" = "zookeeper" ]; then
|
||||
read discovery provider address < <(parse_discovery_str zk://${bridge_ip}:2182)
|
||||
elif [ "$store" = "etcd" ]; then
|
||||
read discovery provider address < <(parse_discovery_str etcd://${bridge_ip}:42000/custom_prefix)
|
||||
elif [ "$store" = "consul" ]; then
|
||||
read discovery provider address < <(parse_discovery_str consul://${bridge_ip}:8500/custom_prefix)
|
||||
else
|
||||
if [ "$nip" != "" ]; then
|
||||
neighbors=${nip}
|
||||
fi
|
||||
|
@ -163,7 +139,6 @@ function start_dnet() {
|
|||
discovery=""
|
||||
provider=""
|
||||
address=""
|
||||
fi
|
||||
|
||||
if [ "$discovery" != "" ]; then
|
||||
cat > ${tomlfile} <<EOF
|
||||
|
@ -271,38 +246,6 @@ function runc_nofail() {
|
|||
dnet_exec ${dnet} "umount /var/run/netns/c && rm /var/run/netns/c"
|
||||
}
|
||||
|
||||
function start_etcd() {
|
||||
local bridge_ip
|
||||
stop_etcd
|
||||
|
||||
bridge_ip=$(get_docker_bridge_ip)
|
||||
docker run -d \
|
||||
--net=host \
|
||||
--name=dn_etcd \
|
||||
mrjana/etcd --listen-client-urls http://0.0.0.0:42000 \
|
||||
--advertise-client-urls http://${bridge_ip}:42000
|
||||
sleep 2
|
||||
}
|
||||
|
||||
function stop_etcd() {
|
||||
docker rm -f dn_etcd || true
|
||||
}
|
||||
|
||||
function start_zookeeper() {
|
||||
stop_zookeeper
|
||||
docker run -d \
|
||||
--name=zookeeper_server \
|
||||
-p 2182:2181 \
|
||||
-h zookeeper \
|
||||
dnephin/docker-zookeeper:3.4.6
|
||||
sleep 2
|
||||
}
|
||||
|
||||
function stop_zookeeper() {
|
||||
echo "zookeeper started"
|
||||
docker rm -f zookeeper_server || true
|
||||
}
|
||||
|
||||
function test_overlay() {
|
||||
dnet_suffix=$1
|
||||
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
# -*- mode: sh -*-
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "Test overlay network hostmode with consul" {
|
||||
test_overlay_hostmode consul
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
# -*- mode: sh -*-
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "Test overlay network with consul" {
|
||||
test_overlay consul
|
||||
}
|
||||
|
||||
@test "Test overlay network singlehost with consul" {
|
||||
test_overlay_singlehost consul
|
||||
}
|
||||
|
||||
@test "Test overlay network with dnet restart" {
|
||||
test_overlay consul skip_rm
|
||||
docker restart dnet-1-consul
|
||||
wait_for_dnet $(inst_id2port 1) dnet-1-consul
|
||||
docker restart dnet-2-consul
|
||||
wait_for_dnet $(inst_id2port 2) dnet-2-consul
|
||||
docker restart dnet-3-consul
|
||||
wait_for_dnet $(inst_id2port 3) dnet-3-consul
|
||||
test_overlay consul skip_add
|
||||
}
|
||||
|
||||
@test "Test overlay network internal network with consul" {
|
||||
test_overlay consul internal
|
||||
}
|
||||
|
||||
@test "Test overlay network with dnet ungraceful shutdown" {
|
||||
dnet_cmd $(inst_id2port 1) network create -d overlay multihost
|
||||
start=1
|
||||
end=3
|
||||
for i in `seq ${start} ${end}`;
|
||||
do
|
||||
dnet_cmd $(inst_id2port $i) container create container_${i}
|
||||
net_connect ${i} container_${i} multihost
|
||||
done
|
||||
|
||||
hrun runc $(dnet_container_name 1 consul) $(get_sbox_id 1 container_1) "ifconfig eth0"
|
||||
container_1_ip=$(echo ${output} | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}')
|
||||
|
||||
# ungracefully kill dnet-1-consul container
|
||||
docker rm -f dnet-1-consul
|
||||
|
||||
# forcefully unpublish the service from dnet2 when dnet1 is dead.
|
||||
dnet_cmd $(inst_id2port 2) service unpublish -f container_1.multihost
|
||||
dnet_cmd $(inst_id2port 2) container create container_1
|
||||
net_connect 2 container_1 multihost
|
||||
|
||||
hrun runc $(dnet_container_name 2 consul) $(get_sbox_id 2 container_1) "ifconfig eth0"
|
||||
container_1_new_ip=$(echo ${output} | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}')
|
||||
|
||||
if [ "$container_1_ip" != "$container_1_new_ip" ]; then
|
||||
exit 1
|
||||
fi
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
# -*- mode: sh -*-
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "Test overlay network with etcd" {
|
||||
test_overlay etcd
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
# -*- mode: sh -*-
|
||||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "Test overlay network with zookeeper" {
|
||||
test_overlay zookeeper
|
||||
}
|
|
@ -52,155 +52,11 @@ function run_overlay_local_tests() {
|
|||
unset cmap[dnet-3-local]
|
||||
}
|
||||
|
||||
function run_overlay_consul_tests() {
|
||||
## Test overlay network with consul
|
||||
## Setup
|
||||
start_dnet 1 consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 1 - consul]=dnet-1-consul
|
||||
start_dnet 2 consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 2 - consul]=dnet-2-consul
|
||||
start_dnet 3 consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 3 - consul]=dnet-3-consul
|
||||
|
||||
## Run the test cases
|
||||
./integration-tmp/bin/bats ./test/integration/dnet/overlay-consul.bats
|
||||
|
||||
## Teardown
|
||||
stop_dnet 1 consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-1-consul]
|
||||
stop_dnet 2 consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-2-consul]
|
||||
stop_dnet 3 consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-3-consul]
|
||||
}
|
||||
|
||||
function run_overlay_consul_host_tests() {
|
||||
export _OVERLAY_HOST_MODE="true"
|
||||
## Setup
|
||||
start_dnet 1 consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 1 - consul]=dnet-1-consul
|
||||
|
||||
## Run the test cases
|
||||
./integration-tmp/bin/bats ./test/integration/dnet/overlay-consul-host.bats
|
||||
|
||||
## Teardown
|
||||
stop_dnet 1 consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-1-consul]
|
||||
unset _OVERLAY_HOST_MODE
|
||||
}
|
||||
|
||||
function run_overlay_zk_tests() {
|
||||
## Test overlay network with zookeeper
|
||||
start_dnet 1 zookeeper 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 1 - zookeeper]=dnet-1-zookeeper
|
||||
start_dnet 2 zookeeper 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 2 - zookeeper]=dnet-2-zookeeper
|
||||
start_dnet 3 zookeeper 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 3 - zookeeper]=dnet-3-zookeeper
|
||||
|
||||
./integration-tmp/bin/bats ./test/integration/dnet/overlay-zookeeper.bats
|
||||
|
||||
stop_dnet 1 zookeeper 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-1-zookeeper]
|
||||
stop_dnet 2 zookeeper 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-2-zookeeper]
|
||||
stop_dnet 3 zookeeper 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-3-zookeeper]
|
||||
}
|
||||
|
||||
function run_overlay_etcd_tests() {
|
||||
## Test overlay network with etcd
|
||||
start_dnet 1 etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 1 - etcd]=dnet-1-etcd
|
||||
start_dnet 2 etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 2 - etcd]=dnet-2-etcd
|
||||
start_dnet 3 etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 3 - etcd]=dnet-3-etcd
|
||||
|
||||
./integration-tmp/bin/bats ./test/integration/dnet/overlay-etcd.bats
|
||||
|
||||
stop_dnet 1 etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-1-etcd]
|
||||
stop_dnet 2 etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-2-etcd]
|
||||
stop_dnet 3 etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-3-etcd]
|
||||
}
|
||||
|
||||
function run_dnet_tests() {
|
||||
# Test dnet configuration options
|
||||
./integration-tmp/bin/bats ./test/integration/dnet/dnet.bats
|
||||
}
|
||||
|
||||
function run_multi_consul_tests() {
|
||||
# Test multi node configuration with a global scope test driver backed by consul
|
||||
|
||||
## Setup
|
||||
start_dnet 1 multi_consul consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 1 - multi_consul]=dnet-1-multi_consul
|
||||
start_dnet 2 multi_consul consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 2 - multi_consul]=dnet-2-multi_consul
|
||||
start_dnet 3 multi_consul consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 3 - multi_consul]=dnet-3-multi_consul
|
||||
|
||||
## Run the test cases
|
||||
./integration-tmp/bin/bats ./test/integration/dnet/multi.bats
|
||||
|
||||
## Teardown
|
||||
stop_dnet 1 multi_consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-1-multi_consul]
|
||||
stop_dnet 2 multi_consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-2-multi_consul]
|
||||
stop_dnet 3 multi_consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-3-multi_consul]
|
||||
}
|
||||
|
||||
function run_multi_zk_tests() {
|
||||
# Test multi node configuration with a global scope test driver backed by zookeeper
|
||||
|
||||
## Setup
|
||||
start_dnet 1 multi_zk zookeeper 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 1 - multi_zk]=dnet-1-multi_zk
|
||||
start_dnet 2 multi_zk zookeeper 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 2 - multi_zk]=dnet-2-multi_zk
|
||||
start_dnet 3 multi_zk zookeeper 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 3 - multi_zk]=dnet-3-multi_zk
|
||||
|
||||
## Run the test cases
|
||||
./integration-tmp/bin/bats ./test/integration/dnet/multi.bats
|
||||
|
||||
## Teardown
|
||||
stop_dnet 1 multi_zk 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-1-multi_zk]
|
||||
stop_dnet 2 multi_zk 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-2-multi_zk]
|
||||
stop_dnet 3 multi_zk 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-3-multi_zk]
|
||||
}
|
||||
|
||||
function run_multi_etcd_tests() {
|
||||
# Test multi node configuration with a global scope test driver backed by etcd
|
||||
|
||||
## Setup
|
||||
start_dnet 1 multi_etcd etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 1 - multi_etcd]=dnet-1-multi_etcd
|
||||
start_dnet 2 multi_etcd etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 2 - multi_etcd]=dnet-2-multi_etcd
|
||||
start_dnet 3 multi_etcd etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dnet - 3 - multi_etcd]=dnet-3-multi_etcd
|
||||
|
||||
## Run the test cases
|
||||
./integration-tmp/bin/bats ./test/integration/dnet/multi.bats
|
||||
|
||||
## Teardown
|
||||
stop_dnet 1 multi_etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-1-multi_etcd]
|
||||
stop_dnet 2 multi_etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-2-multi_etcd]
|
||||
stop_dnet 3 multi_etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
unset cmap[dnet-3-multi_etcd]
|
||||
}
|
||||
|
||||
source ./test/integration/dnet/helpers.bash
|
||||
|
||||
if [ ! -d ${INTEGRATION_ROOT} ]; then
|
||||
|
@ -220,29 +76,11 @@ fi
|
|||
# Suite setup
|
||||
|
||||
if [ -z "$SUITES" ]; then
|
||||
suites="dnet multi_consul multi_zk multi_etcd bridge overlay_consul overlay_consul_host overlay_zk overlay_etcd"
|
||||
suites="dnet bridge"
|
||||
else
|
||||
suites="$SUITES"
|
||||
fi
|
||||
|
||||
if [[ ("$suites" =~ .*consul.*) || ("$suites" =~ .*bridge.*) ]]; then
|
||||
echo "Starting consul ..."
|
||||
start_consul 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[pr_consul]=pr_consul
|
||||
fi
|
||||
|
||||
if [[ "$suites" =~ .*zk.* ]]; then
|
||||
echo "Starting zookeeper ..."
|
||||
start_zookeeper 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[zookeeper_server]=zookeeper_server
|
||||
fi
|
||||
|
||||
if [[ "$suites" =~ .*etcd.* ]]; then
|
||||
echo "Starting etcd ..."
|
||||
start_etcd 1>> ${INTEGRATION_ROOT}/test.log 2>&1
|
||||
cmap[dn_etcd]=dn_etcd
|
||||
fi
|
||||
|
||||
echo ""
|
||||
|
||||
for suite in ${suites}; do
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
---
|
||||
page_title: Docker discovery
|
||||
page_description: discovery
|
||||
page_keywords: docker, clustering, discovery
|
||||
---
|
||||
|
||||
# Discovery
|
||||
|
||||
Docker comes with multiple Discovery backends.
|
||||
|
||||
## Backends
|
||||
|
||||
### Using etcd
|
||||
|
||||
Point your Docker Engine instances to a common etcd instance. You can specify
|
||||
the address Docker uses to advertise the node using the `--cluster-advertise`
|
||||
flag.
|
||||
|
||||
```bash
|
||||
$ dockerd -H=<node_ip:2376> --cluster-advertise=<node_ip:2376> --cluster-store etcd://<etcd_ip1>,<etcd_ip2>/<path>
|
||||
```
|
||||
|
||||
### Using consul
|
||||
|
||||
Point your Docker Engine instances to a common Consul instance. You can specify
|
||||
the address Docker uses to advertise the node using the `--cluster-advertise`
|
||||
flag.
|
||||
|
||||
```bash
|
||||
$ dockerd -H=<node_ip:2376> --cluster-advertise=<node_ip:2376> --cluster-store consul://<consul_ip>/<path>
|
||||
```
|
||||
|
||||
### Using zookeeper
|
||||
|
||||
Point your Docker Engine instances to a common Zookeeper instance. You can specify
|
||||
the address Docker uses to advertise the node using the `--cluster-advertise`
|
||||
flag.
|
||||
|
||||
```bash
|
||||
$ dockerd -H=<node_ip:2376> --cluster-advertise=<node_ip:2376> --cluster-store zk://<zk_addr1>,<zk_addr2>/<path>
|
||||
```
|
|
@ -1,107 +0,0 @@
|
|||
package discovery // import "github.com/docker/docker/pkg/discovery"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// Backends is a global map of discovery backends indexed by their
|
||||
// associated scheme.
|
||||
backends = make(map[string]Backend)
|
||||
)
|
||||
|
||||
// Register makes a discovery backend available by the provided scheme.
|
||||
// If Register is called twice with the same scheme an error is returned.
|
||||
func Register(scheme string, d Backend) error {
|
||||
if _, exists := backends[scheme]; exists {
|
||||
return fmt.Errorf("scheme already registered %s", scheme)
|
||||
}
|
||||
logrus.WithField("name", scheme).Debugf("Registering discovery service")
|
||||
backends[scheme] = d
|
||||
return nil
|
||||
}
|
||||
|
||||
func parse(rawurl string) (string, string) {
|
||||
parts := strings.SplitN(rawurl, "://", 2)
|
||||
|
||||
// nodes:port,node2:port => nodes://node1:port,node2:port
|
||||
if len(parts) == 1 {
|
||||
return "nodes", parts[0]
|
||||
}
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
|
||||
// ParseAdvertise parses the --cluster-advertise daemon config which accepts
|
||||
// <ip-address>:<port> or <interface-name>:<port>
|
||||
func ParseAdvertise(advertise string) (string, error) {
|
||||
var (
|
||||
iface *net.Interface
|
||||
addrs []net.Addr
|
||||
err error
|
||||
)
|
||||
|
||||
addr, port, err := net.SplitHostPort(advertise)
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err)
|
||||
}
|
||||
|
||||
ip := net.ParseIP(addr)
|
||||
// If it is a valid ip-address, use it as is
|
||||
if ip != nil {
|
||||
return advertise, nil
|
||||
}
|
||||
|
||||
// If advertise is a valid interface name, get the valid IPv4 address and use it to advertise
|
||||
ifaceName := addr
|
||||
iface, err = net.InterfaceByName(ifaceName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err)
|
||||
}
|
||||
|
||||
addrs, err = iface.Addrs()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err)
|
||||
}
|
||||
|
||||
if len(addrs) == 0 {
|
||||
return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise)
|
||||
}
|
||||
|
||||
addr = ""
|
||||
for _, a := range addrs {
|
||||
ip, _, err := net.ParseCIDR(a.String())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err)
|
||||
}
|
||||
if ip.To4() == nil || ip.IsLoopback() {
|
||||
continue
|
||||
}
|
||||
addr = ip.String()
|
||||
break
|
||||
}
|
||||
if addr == "" {
|
||||
return "", fmt.Errorf("could not find a valid ip-address in interface %s", advertise)
|
||||
}
|
||||
|
||||
addr = net.JoinHostPort(addr, port)
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// New returns a new Discovery given a URL, heartbeat and ttl settings.
|
||||
// Returns an error if the URL scheme is not supported.
|
||||
func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) {
|
||||
scheme, uri := parse(rawurl)
|
||||
if backend, exists := backends[scheme]; exists {
|
||||
logrus.WithFields(logrus.Fields{"name": scheme, "uri": uri}).Debugf("Initializing discovery service")
|
||||
err := backend.Initialize(uri, heartbeat, ttl, clusterOpts)
|
||||
return backend, err
|
||||
}
|
||||
|
||||
return nil, ErrNotSupported
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package discovery // import "github.com/docker/docker/pkg/discovery"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotSupported is returned when a discovery service is not supported.
|
||||
ErrNotSupported = errors.New("discovery service not supported")
|
||||
|
||||
// ErrNotImplemented is returned when discovery feature is not implemented
|
||||
// by discovery backend.
|
||||
ErrNotImplemented = errors.New("not implemented in this discovery service")
|
||||
)
|
||||
|
||||
// Watcher provides watching over a cluster for nodes joining and leaving.
|
||||
type Watcher interface {
|
||||
// Watch the discovery for entry changes.
|
||||
// Returns a channel that will receive changes or an error.
|
||||
// Providing a non-nil stopCh can be used to stop watching.
|
||||
Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error)
|
||||
}
|
||||
|
||||
// Backend is implemented by discovery backends which manage cluster entries.
|
||||
type Backend interface {
|
||||
// Watcher must be provided by every backend.
|
||||
Watcher
|
||||
|
||||
// Initialize the discovery with URIs, a heartbeat, a ttl and optional settings.
|
||||
Initialize(string, time.Duration, time.Duration, map[string]string) error
|
||||
|
||||
// Register to the discovery.
|
||||
Register(string) error
|
||||
}
|
|
@ -1,135 +0,0 @@
|
|||
package discovery // import "github.com/docker/docker/pkg/discovery"
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/internal/test/suite"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) {
|
||||
suite.Run(t, &DiscoverySuite{})
|
||||
}
|
||||
|
||||
type DiscoverySuite struct{}
|
||||
|
||||
func (s *DiscoverySuite) TestNewEntry(c *testing.T) {
|
||||
entry, err := NewEntry("127.0.0.1:2375")
|
||||
assert.Assert(c, err == nil)
|
||||
assert.Equal(c, entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"}), true)
|
||||
assert.Equal(c, entry.String(), "127.0.0.1:2375")
|
||||
|
||||
entry, err = NewEntry("[2001:db8:0:f101::2]:2375")
|
||||
assert.Assert(c, err == nil)
|
||||
assert.Equal(c, entry.Equals(&Entry{Host: "2001:db8:0:f101::2", Port: "2375"}), true)
|
||||
assert.Equal(c, entry.String(), "[2001:db8:0:f101::2]:2375")
|
||||
|
||||
_, err = NewEntry("127.0.0.1")
|
||||
assert.Assert(c, err != nil)
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestParse(c *testing.T) {
|
||||
scheme, uri := parse("127.0.0.1:2375")
|
||||
assert.Equal(c, scheme, "nodes")
|
||||
assert.Equal(c, uri, "127.0.0.1:2375")
|
||||
|
||||
scheme, uri = parse("localhost:2375")
|
||||
assert.Equal(c, scheme, "nodes")
|
||||
assert.Equal(c, uri, "localhost:2375")
|
||||
|
||||
scheme, uri = parse("scheme://127.0.0.1:2375")
|
||||
assert.Equal(c, scheme, "scheme")
|
||||
assert.Equal(c, uri, "127.0.0.1:2375")
|
||||
|
||||
scheme, uri = parse("scheme://localhost:2375")
|
||||
assert.Equal(c, scheme, "scheme")
|
||||
assert.Equal(c, uri, "localhost:2375")
|
||||
|
||||
scheme, uri = parse("")
|
||||
assert.Equal(c, scheme, "nodes")
|
||||
assert.Equal(c, uri, "")
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestCreateEntries(c *testing.T) {
|
||||
entries, err := CreateEntries(nil)
|
||||
assert.DeepEqual(c, entries, Entries{})
|
||||
assert.Assert(c, err == nil)
|
||||
|
||||
entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", "[2001:db8:0:f101::2]:2375", ""})
|
||||
assert.Assert(c, err == nil)
|
||||
expected := Entries{
|
||||
&Entry{Host: "127.0.0.1", Port: "2375"},
|
||||
&Entry{Host: "127.0.0.2", Port: "2375"},
|
||||
&Entry{Host: "2001:db8:0:f101::2", Port: "2375"},
|
||||
}
|
||||
assert.Equal(c, entries.Equals(expected), true)
|
||||
|
||||
_, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"})
|
||||
assert.Assert(c, err != nil)
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestContainsEntry(c *testing.T) {
|
||||
entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""})
|
||||
assert.Assert(c, err == nil)
|
||||
assert.Equal(c, entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"}), true)
|
||||
assert.Equal(c, entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"}), false)
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestEntriesEquality(c *testing.T) {
|
||||
entries := Entries{
|
||||
&Entry{Host: "127.0.0.1", Port: "2375"},
|
||||
&Entry{Host: "127.0.0.2", Port: "2375"},
|
||||
}
|
||||
|
||||
// Same
|
||||
assert.Assert(c, entries.Equals(Entries{
|
||||
&Entry{Host: "127.0.0.1", Port: "2375"},
|
||||
&Entry{Host: "127.0.0.2", Port: "2375"},
|
||||
}))
|
||||
|
||||
// Different size
|
||||
assert.Assert(c, !entries.Equals(Entries{
|
||||
&Entry{Host: "127.0.0.1", Port: "2375"},
|
||||
&Entry{Host: "127.0.0.2", Port: "2375"},
|
||||
&Entry{Host: "127.0.0.3", Port: "2375"},
|
||||
}))
|
||||
|
||||
// Different content
|
||||
assert.Assert(c, !entries.Equals(Entries{
|
||||
&Entry{Host: "127.0.0.1", Port: "2375"},
|
||||
&Entry{Host: "127.0.0.42", Port: "2375"},
|
||||
}))
|
||||
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestEntriesDiff(c *testing.T) {
|
||||
entry1 := &Entry{Host: "1.1.1.1", Port: "1111"}
|
||||
entry2 := &Entry{Host: "2.2.2.2", Port: "2222"}
|
||||
entry3 := &Entry{Host: "3.3.3.3", Port: "3333"}
|
||||
entries := Entries{entry1, entry2}
|
||||
|
||||
// No diff
|
||||
added, removed := entries.Diff(Entries{entry2, entry1})
|
||||
assert.Equal(c, len(added), 0)
|
||||
assert.Equal(c, len(removed), 0)
|
||||
|
||||
// Add
|
||||
added, removed = entries.Diff(Entries{entry2, entry3, entry1})
|
||||
assert.Equal(c, len(added), 1)
|
||||
assert.Equal(c, added.Contains(entry3), true)
|
||||
assert.Equal(c, len(removed), 0)
|
||||
|
||||
// Remove
|
||||
added, removed = entries.Diff(Entries{entry2})
|
||||
assert.Equal(c, len(added), 0)
|
||||
assert.Equal(c, len(removed), 1)
|
||||
assert.Equal(c, removed.Contains(entry1), true)
|
||||
|
||||
// Add and remove
|
||||
added, removed = entries.Diff(Entries{entry1, entry3})
|
||||
assert.Equal(c, len(added), 1)
|
||||
assert.Equal(c, added.Contains(entry3), true)
|
||||
assert.Equal(c, len(removed), 1)
|
||||
assert.Equal(c, removed.Contains(entry2), true)
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
package discovery // import "github.com/docker/docker/pkg/discovery"
|
||||
|
||||
import "net"
|
||||
|
||||
// NewEntry creates a new entry.
|
||||
func NewEntry(url string) (*Entry, error) {
|
||||
host, port, err := net.SplitHostPort(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Entry{host, port}, nil
|
||||
}
|
||||
|
||||
// An Entry represents a host.
|
||||
type Entry struct {
|
||||
Host string
|
||||
Port string
|
||||
}
|
||||
|
||||
// Equals returns true if cmp contains the same data.
|
||||
func (e *Entry) Equals(cmp *Entry) bool {
|
||||
return e.Host == cmp.Host && e.Port == cmp.Port
|
||||
}
|
||||
|
||||
// String returns the string form of an entry.
|
||||
func (e *Entry) String() string {
|
||||
return net.JoinHostPort(e.Host, e.Port)
|
||||
}
|
||||
|
||||
// Entries is a list of *Entry with some helpers.
|
||||
type Entries []*Entry
|
||||
|
||||
// Equals returns true if cmp contains the same data.
|
||||
func (e Entries) Equals(cmp Entries) bool {
|
||||
// Check if the file has really changed.
|
||||
if len(e) != len(cmp) {
|
||||
return false
|
||||
}
|
||||
for i := range e {
|
||||
if !e[i].Equals(cmp[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Contains returns true if the Entries contain a given Entry.
|
||||
func (e Entries) Contains(entry *Entry) bool {
|
||||
for _, curr := range e {
|
||||
if curr.Equals(entry) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Diff compares two entries and returns the added and removed entries.
|
||||
func (e Entries) Diff(cmp Entries) (Entries, Entries) {
|
||||
added := Entries{}
|
||||
for _, entry := range cmp {
|
||||
if !e.Contains(entry) {
|
||||
added = append(added, entry)
|
||||
}
|
||||
}
|
||||
|
||||
removed := Entries{}
|
||||
for _, entry := range e {
|
||||
if !cmp.Contains(entry) {
|
||||
removed = append(removed, entry)
|
||||
}
|
||||
}
|
||||
|
||||
return added, removed
|
||||
}
|
||||
|
||||
// CreateEntries returns an array of entries based on the given addresses.
|
||||
func CreateEntries(addrs []string) (Entries, error) {
|
||||
entries := Entries{}
|
||||
if addrs == nil {
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
if len(addr) == 0 {
|
||||
continue
|
||||
}
|
||||
entry, err := NewEntry(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
|
@ -1,107 +0,0 @@
|
|||
package file // import "github.com/docker/docker/pkg/discovery/file"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
)
|
||||
|
||||
// Discovery is exported
|
||||
type Discovery struct {
|
||||
heartbeat time.Duration
|
||||
path string
|
||||
}
|
||||
|
||||
func init() {
|
||||
Init()
|
||||
}
|
||||
|
||||
// Init is exported
|
||||
func Init() {
|
||||
discovery.Register("file", &Discovery{})
|
||||
}
|
||||
|
||||
// Initialize is exported
|
||||
func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error {
|
||||
s.path = path
|
||||
s.heartbeat = heartbeat
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseFileContent(content []byte) []string {
|
||||
var result []string
|
||||
for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
// Ignoring line starts with #
|
||||
if strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
// Inlined # comment also ignored.
|
||||
if strings.Contains(line, "#") {
|
||||
line = line[0:strings.Index(line, "#")]
|
||||
// Trim additional spaces caused by above stripping.
|
||||
line = strings.TrimSpace(line)
|
||||
}
|
||||
result = append(result, discovery.Generate(line)...)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *Discovery) fetch() (discovery.Entries, error) {
|
||||
fileContent, err := os.ReadFile(s.path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read '%s': %v", s.path, err)
|
||||
}
|
||||
return discovery.CreateEntries(parseFileContent(fileContent))
|
||||
}
|
||||
|
||||
// Watch is exported
|
||||
func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
|
||||
ch := make(chan discovery.Entries, 1)
|
||||
errCh := make(chan error, 1)
|
||||
ticker := time.NewTicker(s.heartbeat)
|
||||
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
defer close(ch)
|
||||
|
||||
// Send the initial entries if available.
|
||||
currentEntries, err := s.fetch()
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
} else {
|
||||
ch <- currentEntries
|
||||
}
|
||||
|
||||
// Periodically send updates.
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
newEntries, err := s.fetch()
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the file has really changed.
|
||||
if !newEntries.Equals(currentEntries) {
|
||||
ch <- newEntries
|
||||
}
|
||||
currentEntries = newEntries
|
||||
case <-stopCh:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, errCh
|
||||
}
|
||||
|
||||
// Register is exported
|
||||
func (s *Discovery) Register(addr string) error {
|
||||
return discovery.ErrNotImplemented
|
||||
}
|
|
@ -1,113 +0,0 @@
|
|||
package file // import "github.com/docker/docker/pkg/discovery/file"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/internal/test/suite"
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) {
|
||||
suite.Run(t, &DiscoverySuite{})
|
||||
}
|
||||
|
||||
type DiscoverySuite struct{}
|
||||
|
||||
func (s *DiscoverySuite) TestInitialize(c *testing.T) {
|
||||
d := &Discovery{}
|
||||
d.Initialize("/path/to/file", 1000, 0, nil)
|
||||
assert.Equal(c, d.path, "/path/to/file")
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestNew(c *testing.T) {
|
||||
d, err := discovery.New("file:///path/to/file", 0, 0, nil)
|
||||
assert.Assert(c, err == nil)
|
||||
assert.Equal(c, d.(*Discovery).path, "/path/to/file")
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestContent(c *testing.T) {
|
||||
data := `
|
||||
1.1.1.[1:2]:1111
|
||||
2.2.2.[2:4]:2222
|
||||
`
|
||||
ips := parseFileContent([]byte(data))
|
||||
assert.Equal(c, len(ips), 5)
|
||||
assert.Equal(c, ips[0], "1.1.1.1:1111")
|
||||
assert.Equal(c, ips[1], "1.1.1.2:1111")
|
||||
assert.Equal(c, ips[2], "2.2.2.2:2222")
|
||||
assert.Equal(c, ips[3], "2.2.2.3:2222")
|
||||
assert.Equal(c, ips[4], "2.2.2.4:2222")
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestRegister(c *testing.T) {
|
||||
discovery := &Discovery{path: "/path/to/file"}
|
||||
assert.Assert(c, discovery.Register("0.0.0.0") != nil)
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestParsingContentsWithComments(c *testing.T) {
|
||||
data := `
|
||||
### test ###
|
||||
1.1.1.1:1111 # inline comment
|
||||
# 2.2.2.2:2222
|
||||
### empty line with comment
|
||||
3.3.3.3:3333
|
||||
### test ###
|
||||
`
|
||||
ips := parseFileContent([]byte(data))
|
||||
assert.Equal(c, len(ips), 2)
|
||||
assert.Equal(c, "1.1.1.1:1111", ips[0])
|
||||
assert.Equal(c, "3.3.3.3:3333", ips[1])
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestWatch(c *testing.T) {
|
||||
data := `
|
||||
1.1.1.1:1111
|
||||
2.2.2.2:2222
|
||||
`
|
||||
expected := discovery.Entries{
|
||||
&discovery.Entry{Host: "1.1.1.1", Port: "1111"},
|
||||
&discovery.Entry{Host: "2.2.2.2", Port: "2222"},
|
||||
}
|
||||
|
||||
// Create a temporary file and remove it.
|
||||
tmp, err := os.CreateTemp(os.TempDir(), "discovery-file-test")
|
||||
assert.Assert(c, err == nil)
|
||||
assert.Assert(c, tmp.Close() == nil)
|
||||
assert.Assert(c, os.Remove(tmp.Name()) == nil)
|
||||
|
||||
// Set up file discovery.
|
||||
d := &Discovery{}
|
||||
d.Initialize(tmp.Name(), 1000, 0, nil)
|
||||
stopCh := make(chan struct{})
|
||||
ch, errCh := d.Watch(stopCh)
|
||||
|
||||
// Make sure it fires errors since the file doesn't exist.
|
||||
assert.Assert(c, <-errCh != nil)
|
||||
// We have to drain the error channel otherwise Watch will get stuck.
|
||||
go func() {
|
||||
for range errCh {
|
||||
}
|
||||
}()
|
||||
|
||||
// Write the file and make sure we get the expected value back.
|
||||
assert.Assert(c, os.WriteFile(tmp.Name(), []byte(data), 0600) == nil)
|
||||
assert.DeepEqual(c, <-ch, expected)
|
||||
|
||||
// Add a new entry and look it up.
|
||||
expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"})
|
||||
f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600)
|
||||
assert.Assert(c, err == nil)
|
||||
assert.Assert(c, f != nil)
|
||||
_, err = f.WriteString("\n3.3.3.3:3333\n")
|
||||
assert.Assert(c, err == nil)
|
||||
f.Close()
|
||||
assert.DeepEqual(c, <-ch, expected)
|
||||
|
||||
// Stop and make sure it closes all channels.
|
||||
close(stopCh)
|
||||
assert.Assert(c, <-ch == nil)
|
||||
assert.Assert(c, <-errCh == nil)
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package discovery // import "github.com/docker/docker/pkg/discovery"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Generate takes care of IP generation
|
||||
func Generate(pattern string) []string {
|
||||
re, _ := regexp.Compile(`\[(.+):(.+)\]`)
|
||||
submatch := re.FindStringSubmatch(pattern)
|
||||
if submatch == nil {
|
||||
return []string{pattern}
|
||||
}
|
||||
|
||||
from, err := strconv.Atoi(submatch[1])
|
||||
if err != nil {
|
||||
return []string{pattern}
|
||||
}
|
||||
to, err := strconv.Atoi(submatch[2])
|
||||
if err != nil {
|
||||
return []string{pattern}
|
||||
}
|
||||
|
||||
template := re.ReplaceAllString(pattern, "%d")
|
||||
|
||||
var result []string
|
||||
for val := from; val <= to; val++ {
|
||||
entry := fmt.Sprintf(template, val)
|
||||
result = append(result, entry)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
package discovery // import "github.com/docker/docker/pkg/discovery"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func (s *DiscoverySuite) TestGeneratorNotGenerate(c *testing.T) {
|
||||
ips := Generate("127.0.0.1")
|
||||
assert.Equal(c, len(ips), 1)
|
||||
assert.Equal(c, ips[0], "127.0.0.1")
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestGeneratorWithPortNotGenerate(c *testing.T) {
|
||||
ips := Generate("127.0.0.1:8080")
|
||||
assert.Equal(c, len(ips), 1)
|
||||
assert.Equal(c, ips[0], "127.0.0.1:8080")
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestGeneratorMatchFailedNotGenerate(c *testing.T) {
|
||||
ips := Generate("127.0.0.[1]")
|
||||
assert.Equal(c, len(ips), 1)
|
||||
assert.Equal(c, ips[0], "127.0.0.[1]")
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestGeneratorWithPort(c *testing.T) {
|
||||
ips := Generate("127.0.0.[1:11]:2375")
|
||||
assert.Equal(c, len(ips), 11)
|
||||
assert.Equal(c, ips[0], "127.0.0.1:2375")
|
||||
assert.Equal(c, ips[1], "127.0.0.2:2375")
|
||||
assert.Equal(c, ips[2], "127.0.0.3:2375")
|
||||
assert.Equal(c, ips[3], "127.0.0.4:2375")
|
||||
assert.Equal(c, ips[4], "127.0.0.5:2375")
|
||||
assert.Equal(c, ips[5], "127.0.0.6:2375")
|
||||
assert.Equal(c, ips[6], "127.0.0.7:2375")
|
||||
assert.Equal(c, ips[7], "127.0.0.8:2375")
|
||||
assert.Equal(c, ips[8], "127.0.0.9:2375")
|
||||
assert.Equal(c, ips[9], "127.0.0.10:2375")
|
||||
assert.Equal(c, ips[10], "127.0.0.11:2375")
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeStart(c *testing.T) {
|
||||
malformedInput := "127.0.0.[x:11]:2375"
|
||||
ips := Generate(malformedInput)
|
||||
assert.Equal(c, len(ips), 1)
|
||||
assert.Equal(c, ips[0], malformedInput)
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeEnd(c *testing.T) {
|
||||
malformedInput := "127.0.0.[1:x]:2375"
|
||||
ips := Generate(malformedInput)
|
||||
assert.Equal(c, len(ips), 1)
|
||||
assert.Equal(c, ips[0], malformedInput)
|
||||
}
|
|
@ -1,192 +0,0 @@
|
|||
package kv // import "github.com/docker/docker/pkg/discovery/kv"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/docker/libkv"
|
||||
"github.com/docker/libkv/store"
|
||||
"github.com/docker/libkv/store/consul"
|
||||
"github.com/docker/libkv/store/etcd"
|
||||
"github.com/docker/libkv/store/zookeeper"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDiscoveryPath = "docker/nodes"
|
||||
)
|
||||
|
||||
// Discovery is exported
|
||||
type Discovery struct {
|
||||
backend store.Backend
|
||||
store store.Store
|
||||
heartbeat time.Duration
|
||||
ttl time.Duration
|
||||
prefix string
|
||||
path string
|
||||
}
|
||||
|
||||
func init() {
|
||||
Init()
|
||||
}
|
||||
|
||||
// Init is exported
|
||||
func Init() {
|
||||
// Register to libkv
|
||||
zookeeper.Register()
|
||||
consul.Register()
|
||||
etcd.Register()
|
||||
|
||||
// Register to internal discovery service
|
||||
discovery.Register("zk", &Discovery{backend: store.ZK})
|
||||
discovery.Register("consul", &Discovery{backend: store.CONSUL})
|
||||
discovery.Register("etcd", &Discovery{backend: store.ETCD})
|
||||
}
|
||||
|
||||
// Initialize is exported
|
||||
func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error {
|
||||
var (
|
||||
parts = strings.SplitN(uris, "/", 2)
|
||||
addrs = strings.Split(parts[0], ",")
|
||||
err error
|
||||
)
|
||||
|
||||
// A custom prefix to the path can be optionally used.
|
||||
if len(parts) == 2 {
|
||||
s.prefix = parts[1]
|
||||
}
|
||||
|
||||
s.heartbeat = heartbeat
|
||||
s.ttl = ttl
|
||||
|
||||
// Use a custom path if specified in discovery options
|
||||
dpath := defaultDiscoveryPath
|
||||
if clusterOpts["kv.path"] != "" {
|
||||
dpath = clusterOpts["kv.path"]
|
||||
}
|
||||
|
||||
s.path = path.Join(s.prefix, dpath)
|
||||
|
||||
var config *store.Config
|
||||
if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" {
|
||||
logrus.Info("Initializing discovery with TLS")
|
||||
tlsConfig, err := tlsconfig.Client(tlsconfig.Options{
|
||||
CAFile: clusterOpts["kv.cacertfile"],
|
||||
CertFile: clusterOpts["kv.certfile"],
|
||||
KeyFile: clusterOpts["kv.keyfile"],
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config = &store.Config{
|
||||
// Set ClientTLS to trigger https (bug in libkv/etcd)
|
||||
ClientTLS: &store.ClientTLSConfig{
|
||||
CACertFile: clusterOpts["kv.cacertfile"],
|
||||
CertFile: clusterOpts["kv.certfile"],
|
||||
KeyFile: clusterOpts["kv.keyfile"],
|
||||
},
|
||||
// The actual TLS config that will be used
|
||||
TLS: tlsConfig,
|
||||
}
|
||||
} else {
|
||||
logrus.Info("Initializing discovery without TLS")
|
||||
}
|
||||
|
||||
// Creates a new store, will ignore options given
|
||||
// if not supported by the chosen store
|
||||
s.store, err = libkv.NewStore(s.backend, addrs, config)
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch the store until either there's a store error or we receive a stop request.
|
||||
// Returns false if we shouldn't attempt watching the store anymore (stop request received).
|
||||
func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool {
|
||||
for {
|
||||
select {
|
||||
case pairs := <-watchCh:
|
||||
if pairs == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
logrus.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs))
|
||||
|
||||
// Convert `KVPair` into `discovery.Entry`.
|
||||
addrs := make([]string, len(pairs))
|
||||
for _, pair := range pairs {
|
||||
addrs = append(addrs, string(pair.Value))
|
||||
}
|
||||
|
||||
entries, err := discovery.CreateEntries(addrs)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
} else {
|
||||
discoveryCh <- entries
|
||||
}
|
||||
case <-stopCh:
|
||||
// We were requested to stop watching.
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Watch is exported
|
||||
func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
|
||||
ch := make(chan discovery.Entries)
|
||||
errCh := make(chan error)
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
defer close(errCh)
|
||||
|
||||
// Forever: Create a store watch, watch until we get an error and then try again.
|
||||
// Will only stop if we receive a stopCh request.
|
||||
for {
|
||||
// Create the path to watch if it does not exist yet
|
||||
exists, err := s.store.Exists(s.path)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
if !exists {
|
||||
if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
}
|
||||
|
||||
// Set up a watch.
|
||||
watchCh, err := s.store.WatchTree(s.path, stopCh)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
} else {
|
||||
if !s.watchOnce(stopCh, watchCh, ch, errCh) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here it means the store watch channel was closed. This
|
||||
// is unexpected so let's retry later.
|
||||
errCh <- fmt.Errorf("Unexpected watch error")
|
||||
time.Sleep(s.heartbeat)
|
||||
}
|
||||
}()
|
||||
return ch, errCh
|
||||
}
|
||||
|
||||
// Register is exported
|
||||
func (s *Discovery) Register(addr string) error {
|
||||
opts := &store.WriteOptions{TTL: s.ttl}
|
||||
return s.store.Put(path.Join(s.path, addr), []byte(addr), opts)
|
||||
}
|
||||
|
||||
// Store returns the underlying store used by KV discovery.
|
||||
func (s *Discovery) Store() store.Store {
|
||||
return s.store
|
||||
}
|
||||
|
||||
// Prefix returns the store prefix
|
||||
func (s *Discovery) Prefix() string {
|
||||
return s.prefix
|
||||
}
|
|
@ -1,322 +0,0 @@
|
|||
package kv // import "github.com/docker/docker/pkg/discovery/kv"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/internal/test/suite"
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
"github.com/docker/libkv"
|
||||
"github.com/docker/libkv/store"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) {
|
||||
suite.Run(t, &DiscoverySuite{})
|
||||
}
|
||||
|
||||
type DiscoverySuite struct{}
|
||||
|
||||
func (ds *DiscoverySuite) TestInitialize(c *testing.T) {
|
||||
storeMock := &FakeStore{
|
||||
Endpoints: []string{"127.0.0.1"},
|
||||
}
|
||||
d := &Discovery{backend: store.CONSUL}
|
||||
d.Initialize("127.0.0.1", 0, 0, nil)
|
||||
d.store = storeMock
|
||||
|
||||
s := d.store.(*FakeStore)
|
||||
assert.Equal(c, len(s.Endpoints), 1)
|
||||
assert.Equal(c, s.Endpoints[0], "127.0.0.1")
|
||||
assert.Equal(c, d.path, defaultDiscoveryPath)
|
||||
|
||||
storeMock = &FakeStore{
|
||||
Endpoints: []string{"127.0.0.1:1234"},
|
||||
}
|
||||
d = &Discovery{backend: store.CONSUL}
|
||||
d.Initialize("127.0.0.1:1234/path", 0, 0, nil)
|
||||
d.store = storeMock
|
||||
|
||||
s = d.store.(*FakeStore)
|
||||
assert.Equal(c, len(s.Endpoints), 1)
|
||||
assert.Equal(c, s.Endpoints[0], "127.0.0.1:1234")
|
||||
assert.Equal(c, d.path, "path/"+defaultDiscoveryPath)
|
||||
|
||||
storeMock = &FakeStore{
|
||||
Endpoints: []string{"127.0.0.1:1234", "127.0.0.2:1234", "127.0.0.3:1234"},
|
||||
}
|
||||
d = &Discovery{backend: store.CONSUL}
|
||||
d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0, nil)
|
||||
d.store = storeMock
|
||||
|
||||
s = d.store.(*FakeStore)
|
||||
assert.Equal(c, len(s.Endpoints), 3)
|
||||
assert.Equal(c, s.Endpoints[0], "127.0.0.1:1234")
|
||||
assert.Equal(c, s.Endpoints[1], "127.0.0.2:1234")
|
||||
assert.Equal(c, s.Endpoints[2], "127.0.0.3:1234")
|
||||
|
||||
assert.Equal(c, d.path, "path/"+defaultDiscoveryPath)
|
||||
}
|
||||
|
||||
// Extremely limited mock store so we can test initialization
|
||||
type Mock struct {
|
||||
// Endpoints passed to InitializeMock
|
||||
Endpoints []string
|
||||
|
||||
// Options passed to InitializeMock
|
||||
Options *store.Config
|
||||
}
|
||||
|
||||
func NewMock(endpoints []string, options *store.Config) (store.Store, error) {
|
||||
s := &Mock{}
|
||||
s.Endpoints = endpoints
|
||||
s.Options = options
|
||||
return s, nil
|
||||
}
|
||||
func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error {
|
||||
return errors.New("Put not supported")
|
||||
}
|
||||
func (s *Mock) Get(key string) (*store.KVPair, error) {
|
||||
return nil, errors.New("Get not supported")
|
||||
}
|
||||
func (s *Mock) Delete(key string) error {
|
||||
return errors.New("Delete not supported")
|
||||
}
|
||||
|
||||
// Exists mock
|
||||
func (s *Mock) Exists(key string) (bool, error) {
|
||||
return false, errors.New("Exists not supported")
|
||||
}
|
||||
|
||||
// Watch mock
|
||||
func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
|
||||
return nil, errors.New("Watch not supported")
|
||||
}
|
||||
|
||||
// WatchTree mock
|
||||
func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
|
||||
return nil, errors.New("WatchTree not supported")
|
||||
}
|
||||
|
||||
// NewLock mock
|
||||
func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
|
||||
return nil, errors.New("NewLock not supported")
|
||||
}
|
||||
|
||||
// List mock
|
||||
func (s *Mock) List(prefix string) ([]*store.KVPair, error) {
|
||||
return nil, errors.New("List not supported")
|
||||
}
|
||||
|
||||
// DeleteTree mock
|
||||
func (s *Mock) DeleteTree(prefix string) error {
|
||||
return errors.New("DeleteTree not supported")
|
||||
}
|
||||
|
||||
// AtomicPut mock
|
||||
func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) {
|
||||
return false, nil, errors.New("AtomicPut not supported")
|
||||
}
|
||||
|
||||
// AtomicDelete mock
|
||||
func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
|
||||
return false, errors.New("AtomicDelete not supported")
|
||||
}
|
||||
|
||||
// Close mock
|
||||
func (s *Mock) Close() {
|
||||
}
|
||||
|
||||
func (ds *DiscoverySuite) TestInitializeWithCerts(c *testing.T) {
|
||||
cert := `-----BEGIN CERTIFICATE-----
|
||||
MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT
|
||||
B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD
|
||||
VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC
|
||||
O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds
|
||||
+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q
|
||||
V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb
|
||||
UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55
|
||||
Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT
|
||||
V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/
|
||||
BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j
|
||||
BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz
|
||||
7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI
|
||||
xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M
|
||||
ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY
|
||||
8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn
|
||||
t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX
|
||||
FpTxDmJHEV4bzUzh
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
key := `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4
|
||||
+zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR
|
||||
SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr
|
||||
pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe
|
||||
rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj
|
||||
xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj
|
||||
i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx
|
||||
qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO
|
||||
1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5
|
||||
5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony
|
||||
MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0
|
||||
ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP
|
||||
L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N
|
||||
XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT
|
||||
Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B
|
||||
LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU
|
||||
t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+
|
||||
QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV
|
||||
xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj
|
||||
xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc
|
||||
qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa
|
||||
V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV
|
||||
PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk
|
||||
dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL
|
||||
BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
`
|
||||
certFile, err := os.CreateTemp("", "cert")
|
||||
assert.Assert(c, err == nil)
|
||||
defer os.Remove(certFile.Name())
|
||||
certFile.Write([]byte(cert))
|
||||
certFile.Close()
|
||||
keyFile, err := os.CreateTemp("", "key")
|
||||
assert.Assert(c, err == nil)
|
||||
defer os.Remove(keyFile.Name())
|
||||
keyFile.Write([]byte(key))
|
||||
keyFile.Close()
|
||||
|
||||
libkv.AddStore("mock", NewMock)
|
||||
d := &Discovery{backend: "mock"}
|
||||
err = d.Initialize("127.0.0.3:1234", 0, 0, map[string]string{
|
||||
"kv.cacertfile": certFile.Name(),
|
||||
"kv.certfile": certFile.Name(),
|
||||
"kv.keyfile": keyFile.Name(),
|
||||
})
|
||||
assert.Assert(c, err == nil)
|
||||
s := d.store.(*Mock)
|
||||
assert.Assert(c, s.Options.TLS != nil)
|
||||
assert.Assert(c, s.Options.TLS.RootCAs != nil)
|
||||
assert.Equal(c, len(s.Options.TLS.Certificates), 1)
|
||||
}
|
||||
|
||||
func (ds *DiscoverySuite) TestWatch(c *testing.T) {
|
||||
mockCh := make(chan []*store.KVPair)
|
||||
|
||||
storeMock := &FakeStore{
|
||||
Endpoints: []string{"127.0.0.1:1234"},
|
||||
mockKVChan: mockCh,
|
||||
}
|
||||
|
||||
d := &Discovery{backend: store.CONSUL}
|
||||
d.Initialize("127.0.0.1:1234/path", 0, 0, nil)
|
||||
d.store = storeMock
|
||||
|
||||
expected := discovery.Entries{
|
||||
&discovery.Entry{Host: "1.1.1.1", Port: "1111"},
|
||||
&discovery.Entry{Host: "2.2.2.2", Port: "2222"},
|
||||
}
|
||||
kvs := []*store.KVPair{
|
||||
{Key: path.Join("path", defaultDiscoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")},
|
||||
{Key: path.Join("path", defaultDiscoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")},
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
ch, errCh := d.Watch(stopCh)
|
||||
|
||||
// It should fire an error since the first WatchTree call failed.
|
||||
assert.ErrorContains(c, <-errCh, "test error")
|
||||
// We have to drain the error channel otherwise Watch will get stuck.
|
||||
go func() {
|
||||
for range errCh {
|
||||
}
|
||||
}()
|
||||
|
||||
// Push the entries into the store channel and make sure discovery emits.
|
||||
mockCh <- kvs
|
||||
assert.DeepEqual(c, <-ch, expected)
|
||||
|
||||
// Add a new entry.
|
||||
expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"})
|
||||
kvs = append(kvs, &store.KVPair{Key: path.Join("path", defaultDiscoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")})
|
||||
mockCh <- kvs
|
||||
assert.DeepEqual(c, <-ch, expected)
|
||||
|
||||
close(mockCh)
|
||||
// Give it enough time to call WatchTree.
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Stop and make sure it closes all channels.
|
||||
close(stopCh)
|
||||
assert.Assert(c, <-ch == nil)
|
||||
assert.Assert(c, <-errCh == nil)
|
||||
}
|
||||
|
||||
// FakeStore implements store.Store methods. It mocks all store
|
||||
// function in a simple, naive way.
|
||||
type FakeStore struct {
|
||||
Endpoints []string
|
||||
Options *store.Config
|
||||
mockKVChan <-chan []*store.KVPair
|
||||
|
||||
watchTreeCallCount int
|
||||
}
|
||||
|
||||
func (s *FakeStore) Put(key string, value []byte, options *store.WriteOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FakeStore) Get(key string) (*store.KVPair, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *FakeStore) Delete(key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FakeStore) Exists(key string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// WatchTree will fail the first time, and return the mockKVchan afterwards.
|
||||
// This is the behavior we need for testing.. If we need 'moar', should update this.
|
||||
func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
|
||||
if s.watchTreeCallCount == 0 {
|
||||
s.watchTreeCallCount = 1
|
||||
return nil, errors.New("test error")
|
||||
}
|
||||
// First calls error
|
||||
return s.mockKVChan, nil
|
||||
}
|
||||
|
||||
func (s *FakeStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *FakeStore) List(directory string) ([]*store.KVPair, error) {
|
||||
return []*store.KVPair{}, nil
|
||||
}
|
||||
|
||||
func (s *FakeStore) DeleteTree(directory string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FakeStore) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func (s *FakeStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *FakeStore) Close() {
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
package memory // import "github.com/docker/docker/pkg/discovery/memory"
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
)
|
||||
|
||||
// Discovery implements a discovery backend that keeps
|
||||
// data in memory.
|
||||
type Discovery struct {
|
||||
heartbeat time.Duration
|
||||
values []string
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func init() {
|
||||
Init()
|
||||
}
|
||||
|
||||
// Init registers the memory backend on demand.
|
||||
func Init() {
|
||||
discovery.Register("memory", &Discovery{})
|
||||
}
|
||||
|
||||
// Initialize sets the heartbeat for the memory backend.
|
||||
func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error {
|
||||
s.heartbeat = heartbeat
|
||||
s.values = make([]string, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch sends periodic discovery updates to a channel.
|
||||
func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
|
||||
ch := make(chan discovery.Entries, 1)
|
||||
errCh := make(chan error, 1)
|
||||
ticker := time.NewTicker(s.heartbeat)
|
||||
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
defer close(ch)
|
||||
|
||||
// Send the initial entries if available.
|
||||
var currentEntries discovery.Entries
|
||||
var err error
|
||||
|
||||
s.mu.Lock()
|
||||
if len(s.values) > 0 {
|
||||
currentEntries, err = discovery.CreateEntries(s.values)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
} else if currentEntries != nil {
|
||||
ch <- currentEntries
|
||||
}
|
||||
|
||||
// Periodically send updates.
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
s.mu.Lock()
|
||||
newEntries, err := discovery.CreateEntries(s.values)
|
||||
s.mu.Unlock()
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the file has really changed.
|
||||
if !newEntries.Equals(currentEntries) {
|
||||
ch <- newEntries
|
||||
}
|
||||
currentEntries = newEntries
|
||||
case <-stopCh:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, errCh
|
||||
}
|
||||
|
||||
// Register adds a new address to the discovery.
|
||||
func (s *Discovery) Register(addr string) error {
|
||||
s.mu.Lock()
|
||||
s.values = append(s.values, addr)
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
package memory // import "github.com/docker/docker/pkg/discovery/memory"
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/internal/test/suite"
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) {
|
||||
suite.Run(t, &discoverySuite{})
|
||||
}
|
||||
|
||||
type discoverySuite struct{}
|
||||
|
||||
func (s *discoverySuite) TestWatch(c *testing.T) {
|
||||
d := &Discovery{}
|
||||
d.Initialize("foo", 1000, 0, nil)
|
||||
stopCh := make(chan struct{})
|
||||
ch, errCh := d.Watch(stopCh)
|
||||
|
||||
// We have to drain the error channel otherwise Watch will get stuck.
|
||||
go func() {
|
||||
for range errCh {
|
||||
}
|
||||
}()
|
||||
|
||||
expected := discovery.Entries{
|
||||
&discovery.Entry{Host: "1.1.1.1", Port: "1111"},
|
||||
}
|
||||
|
||||
assert.Assert(c, d.Register("1.1.1.1:1111") == nil)
|
||||
assert.DeepEqual(c, <-ch, expected)
|
||||
|
||||
expected = discovery.Entries{
|
||||
&discovery.Entry{Host: "1.1.1.1", Port: "1111"},
|
||||
&discovery.Entry{Host: "2.2.2.2", Port: "2222"},
|
||||
}
|
||||
|
||||
assert.Assert(c, d.Register("2.2.2.2:2222") == nil)
|
||||
assert.DeepEqual(c, <-ch, expected)
|
||||
|
||||
// Stop and make sure it closes all channels.
|
||||
close(stopCh)
|
||||
assert.Assert(c, <-ch == nil)
|
||||
assert.Assert(c, <-errCh == nil)
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
package nodes // import "github.com/docker/docker/pkg/discovery/nodes"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
)
|
||||
|
||||
// Discovery is exported
|
||||
type Discovery struct {
|
||||
entries discovery.Entries
|
||||
}
|
||||
|
||||
func init() {
|
||||
Init()
|
||||
}
|
||||
|
||||
// Init is exported
|
||||
func Init() {
|
||||
discovery.Register("nodes", &Discovery{})
|
||||
}
|
||||
|
||||
// Initialize is exported
|
||||
func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error {
|
||||
for _, input := range strings.Split(uris, ",") {
|
||||
for _, ip := range discovery.Generate(input) {
|
||||
entry, err := discovery.NewEntry(ip)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error())
|
||||
}
|
||||
s.entries = append(s.entries, entry)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch is exported
|
||||
func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
|
||||
ch := make(chan discovery.Entries, 1)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
ch <- s.entries
|
||||
<-stopCh
|
||||
}()
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
// Register is exported
|
||||
func (s *Discovery) Register(addr string) error {
|
||||
return discovery.ErrNotImplemented
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
package nodes // import "github.com/docker/docker/pkg/discovery/nodes"
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/internal/test/suite"
|
||||
"github.com/docker/docker/pkg/discovery"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
// Hook up gocheck into the "go test" runner.
|
||||
func Test(t *testing.T) {
|
||||
suite.Run(t, &DiscoverySuite{})
|
||||
}
|
||||
|
||||
type DiscoverySuite struct{}
|
||||
|
||||
func (s *DiscoverySuite) TestInitialize(c *testing.T) {
|
||||
d := &Discovery{}
|
||||
d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil)
|
||||
assert.Equal(c, len(d.entries), 2)
|
||||
assert.Equal(c, d.entries[0].String(), "1.1.1.1:1111")
|
||||
assert.Equal(c, d.entries[1].String(), "2.2.2.2:2222")
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestInitializeWithPattern(c *testing.T) {
|
||||
d := &Discovery{}
|
||||
d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0, nil)
|
||||
assert.Equal(c, len(d.entries), 5)
|
||||
assert.Equal(c, d.entries[0].String(), "1.1.1.1:1111")
|
||||
assert.Equal(c, d.entries[1].String(), "1.1.1.2:1111")
|
||||
assert.Equal(c, d.entries[2].String(), "2.2.2.2:2222")
|
||||
assert.Equal(c, d.entries[3].String(), "2.2.2.3:2222")
|
||||
assert.Equal(c, d.entries[4].String(), "2.2.2.4:2222")
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestWatch(c *testing.T) {
|
||||
d := &Discovery{}
|
||||
d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil)
|
||||
expected := discovery.Entries{
|
||||
&discovery.Entry{Host: "1.1.1.1", Port: "1111"},
|
||||
&discovery.Entry{Host: "2.2.2.2", Port: "2222"},
|
||||
}
|
||||
ch, _ := d.Watch(nil)
|
||||
assert.Equal(c, expected.Equals(<-ch), true)
|
||||
}
|
||||
|
||||
func (s *DiscoverySuite) TestRegister(c *testing.T) {
|
||||
d := &Discovery{}
|
||||
assert.Assert(c, d.Register("0.0.0.0") != nil)
|
||||
}
|
|
@ -60,17 +60,10 @@ github.com/vishvananda/netlink f049be6f391489d3f374498fe0c8
|
|||
github.com/moby/ipvs 4566ccea0e08d68e9614c3e7a64a23b850c4bb35 # v1.0.1
|
||||
github.com/google/btree 479b5e81b0a93ec038d201b0b33d17db599531d3 # v1.0.1
|
||||
|
||||
github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
|
||||
github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
|
||||
github.com/coreos/etcd 973882f697a8db3d59815bf132c6c506434334bd # v3.3.27
|
||||
github.com/coreos/go-semver 8ab6407b697782a06568d4b7f1db25550ec2e4c6 # v0.2.0
|
||||
github.com/hashicorp/consul 9a9cc9341bb487651a0399e3fc5e1e8a42e62dd9 # v0.5.2
|
||||
github.com/miekg/dns 6c0c4e6581f8e173cc562c8b3363ab984e4ae071 # v1.1.27
|
||||
github.com/ishidawataru/sctp f2269e66cdee387bd321445d5d300893449805be
|
||||
go.etcd.io/bbolt 232d8fc87f50244f9c808f4745759e08a304c029 # v1.3.5
|
||||
github.com/json-iterator/go a1ca0830781e007c66b225121d2cdb3a649421f6 # v1.1.10
|
||||
github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3
|
||||
github.com/modern-go/reflect2 94122c33edd36123c84d5368cfb2b69df93a0ec8 # v1.0.1
|
||||
|
||||
# get graph and distribution packages
|
||||
github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580
|
||||
|
@ -152,6 +145,7 @@ golang.org/x/time 3af7569d3a1e776fc2a3c1cec133
|
|||
github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
|
||||
github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
|
||||
github.com/hashicorp/golang-lru 7f827b33c0f158ec5dfbba01bb0b14a4541fd81d # v0.5.3
|
||||
github.com/coreos/etcd 973882f697a8db3d59815bf132c6c506434334bd # v3.3.27
|
||||
github.com/coreos/pkg 97fdf19511ea361ae1c100dd393cc47f8dcfa1e1 # v4
|
||||
code.cloudfoundry.org/clock 02e53af36e6c978af692887ed449b74026d76fec # v1.0.0
|
||||
|
||||
|
|
117
vendor/github.com/coreos/etcd/client/README.md
generated
vendored
117
vendor/github.com/coreos/etcd/client/README.md
generated
vendored
|
@ -1,117 +0,0 @@
|
|||
# etcd/client
|
||||
|
||||
etcd/client is the Go client library for etcd.
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client)
|
||||
|
||||
etcd uses `cmd/vendor` directory to store external dependencies, which are
|
||||
to be compiled into etcd release binaries. `client` can be imported without
|
||||
vendoring. For full compatibility, it is recommended to vendor builds using
|
||||
etcd's vendored packages, using tools like godep, as in
|
||||
[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
|
||||
For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get github.com/coreos/etcd/client
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
"context"
|
||||
|
||||
"github.com/coreos/etcd/client"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cfg := client.Config{
|
||||
Endpoints: []string{"http://127.0.0.1:2379"},
|
||||
Transport: client.DefaultTransport,
|
||||
// set timeout per request to fail fast when the target endpoint is unavailable
|
||||
HeaderTimeoutPerRequest: time.Second,
|
||||
}
|
||||
c, err := client.New(cfg)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
kapi := client.NewKeysAPI(c)
|
||||
// set "/foo" key with "bar" value
|
||||
log.Print("Setting '/foo' key with 'bar' value")
|
||||
resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
// print common key info
|
||||
log.Printf("Set is done. Metadata is %q\n", resp)
|
||||
}
|
||||
// get "/foo" key's value
|
||||
log.Print("Getting '/foo' key value")
|
||||
resp, err = kapi.Get(context.Background(), "/foo", nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
// print common key info
|
||||
log.Printf("Get is done. Metadata is %q\n", resp)
|
||||
// print value
|
||||
log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
etcd client might return three types of errors.
|
||||
|
||||
- context error
|
||||
|
||||
Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered.
|
||||
|
||||
- cluster error
|
||||
|
||||
Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned.
|
||||
|
||||
- response error
|
||||
|
||||
If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error.
|
||||
|
||||
Here is the example code to handle client errors:
|
||||
|
||||
```go
|
||||
cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
|
||||
c, err := client.New(cfg)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
kapi := client.NewKeysAPI(c)
|
||||
resp, err := kapi.Set(ctx, "test", "bar", nil)
|
||||
if err != nil {
|
||||
if err == context.Canceled {
|
||||
// ctx is canceled by another routine
|
||||
} else if err == context.DeadlineExceeded {
|
||||
// ctx is attached with a deadline and it exceeded
|
||||
} else if cerr, ok := err.(*client.ClusterError); ok {
|
||||
// process (cerr.Errors)
|
||||
} else {
|
||||
// bad cluster endpoints, which are not etcd servers
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Caveat
|
||||
|
||||
1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process.
|
||||
|
||||
2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened.
|
||||
|
||||
3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
|
||||
|
||||
4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
|
236
vendor/github.com/coreos/etcd/client/auth_role.go
generated
vendored
236
vendor/github.com/coreos/etcd/client/auth_role.go
generated
vendored
|
@ -1,236 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type Role struct {
|
||||
Role string `json:"role"`
|
||||
Permissions Permissions `json:"permissions"`
|
||||
Grant *Permissions `json:"grant,omitempty"`
|
||||
Revoke *Permissions `json:"revoke,omitempty"`
|
||||
}
|
||||
|
||||
type Permissions struct {
|
||||
KV rwPermission `json:"kv"`
|
||||
}
|
||||
|
||||
type rwPermission struct {
|
||||
Read []string `json:"read"`
|
||||
Write []string `json:"write"`
|
||||
}
|
||||
|
||||
type PermissionType int
|
||||
|
||||
const (
|
||||
ReadPermission PermissionType = iota
|
||||
WritePermission
|
||||
ReadWritePermission
|
||||
)
|
||||
|
||||
// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
|
||||
// interact with etcd's role creation and modification features.
|
||||
func NewAuthRoleAPI(c Client) AuthRoleAPI {
|
||||
return &httpAuthRoleAPI{
|
||||
client: c,
|
||||
}
|
||||
}
|
||||
|
||||
type AuthRoleAPI interface {
|
||||
// AddRole adds a role.
|
||||
AddRole(ctx context.Context, role string) error
|
||||
|
||||
// RemoveRole removes a role.
|
||||
RemoveRole(ctx context.Context, role string) error
|
||||
|
||||
// GetRole retrieves role details.
|
||||
GetRole(ctx context.Context, role string) (*Role, error)
|
||||
|
||||
// GrantRoleKV grants a role some permission prefixes for the KV store.
|
||||
GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
|
||||
|
||||
// RevokeRoleKV revokes some permission prefixes for a role on the KV store.
|
||||
RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
|
||||
|
||||
// ListRoles lists roles.
|
||||
ListRoles(ctx context.Context) ([]string, error)
|
||||
}
|
||||
|
||||
type httpAuthRoleAPI struct {
|
||||
client httpClient
|
||||
}
|
||||
|
||||
type authRoleAPIAction struct {
|
||||
verb string
|
||||
name string
|
||||
role *Role
|
||||
}
|
||||
|
||||
type authRoleAPIList struct{}
|
||||
|
||||
func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2AuthURL(ep, "roles", "")
|
||||
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
return req
|
||||
}
|
||||
|
||||
func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2AuthURL(ep, "roles", l.name)
|
||||
if l.role == nil {
|
||||
req, _ := http.NewRequest(l.verb, u.String(), nil)
|
||||
return req
|
||||
}
|
||||
b, err := json.Marshal(l.role)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
body := bytes.NewReader(b)
|
||||
req, _ := http.NewRequest(l.verb, u.String(), body)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
return req
|
||||
}
|
||||
|
||||
func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
|
||||
resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var roleList struct {
|
||||
Roles []Role `json:"roles"`
|
||||
}
|
||||
if err = json.Unmarshal(body, &roleList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := make([]string, 0, len(roleList.Roles))
|
||||
for _, r := range roleList.Roles {
|
||||
ret = append(ret, r.Role)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
|
||||
role := &Role{
|
||||
Role: rolename,
|
||||
}
|
||||
return r.addRemoveRole(ctx, &authRoleAPIAction{
|
||||
verb: "PUT",
|
||||
name: rolename,
|
||||
role: role,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
|
||||
return r.addRemoveRole(ctx, &authRoleAPIAction{
|
||||
verb: "DELETE",
|
||||
name: rolename,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
|
||||
resp, body, err := r.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
||||
var sec authError
|
||||
err := json.Unmarshal(body, &sec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return sec
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
|
||||
return r.modRole(ctx, &authRoleAPIAction{
|
||||
verb: "GET",
|
||||
name: rolename,
|
||||
})
|
||||
}
|
||||
|
||||
func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
|
||||
var out rwPermission
|
||||
switch permType {
|
||||
case ReadPermission:
|
||||
out.Read = prefixes
|
||||
case WritePermission:
|
||||
out.Write = prefixes
|
||||
case ReadWritePermission:
|
||||
out.Read = prefixes
|
||||
out.Write = prefixes
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
|
||||
rwp := buildRWPermission(prefixes, permType)
|
||||
role := &Role{
|
||||
Role: rolename,
|
||||
Grant: &Permissions{
|
||||
KV: rwp,
|
||||
},
|
||||
}
|
||||
return r.modRole(ctx, &authRoleAPIAction{
|
||||
verb: "PUT",
|
||||
name: rolename,
|
||||
role: role,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
|
||||
rwp := buildRWPermission(prefixes, permType)
|
||||
role := &Role{
|
||||
Role: rolename,
|
||||
Revoke: &Permissions{
|
||||
KV: rwp,
|
||||
},
|
||||
}
|
||||
return r.modRole(ctx, &authRoleAPIAction{
|
||||
verb: "PUT",
|
||||
name: rolename,
|
||||
role: role,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
|
||||
resp, body, err := r.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
var sec authError
|
||||
err = json.Unmarshal(body, &sec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, sec
|
||||
}
|
||||
var role Role
|
||||
if err = json.Unmarshal(body, &role); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &role, nil
|
||||
}
|
319
vendor/github.com/coreos/etcd/client/auth_user.go
generated
vendored
319
vendor/github.com/coreos/etcd/client/auth_user.go
generated
vendored
|
@ -1,319 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultV2AuthPrefix = "/v2/auth"
|
||||
)
|
||||
|
||||
type User struct {
|
||||
User string `json:"user"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Roles []string `json:"roles"`
|
||||
Grant []string `json:"grant,omitempty"`
|
||||
Revoke []string `json:"revoke,omitempty"`
|
||||
}
|
||||
|
||||
// userListEntry is the user representation given by the server for ListUsers
|
||||
type userListEntry struct {
|
||||
User string `json:"user"`
|
||||
Roles []Role `json:"roles"`
|
||||
}
|
||||
|
||||
type UserRoles struct {
|
||||
User string `json:"user"`
|
||||
Roles []Role `json:"roles"`
|
||||
}
|
||||
|
||||
func v2AuthURL(ep url.URL, action string, name string) *url.URL {
|
||||
if name != "" {
|
||||
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
|
||||
return &ep
|
||||
}
|
||||
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
|
||||
return &ep
|
||||
}
|
||||
|
||||
// NewAuthAPI constructs a new AuthAPI that uses HTTP to
|
||||
// interact with etcd's general auth features.
|
||||
func NewAuthAPI(c Client) AuthAPI {
|
||||
return &httpAuthAPI{
|
||||
client: c,
|
||||
}
|
||||
}
|
||||
|
||||
type AuthAPI interface {
|
||||
// Enable auth.
|
||||
Enable(ctx context.Context) error
|
||||
|
||||
// Disable auth.
|
||||
Disable(ctx context.Context) error
|
||||
}
|
||||
|
||||
type httpAuthAPI struct {
|
||||
client httpClient
|
||||
}
|
||||
|
||||
func (s *httpAuthAPI) Enable(ctx context.Context) error {
|
||||
return s.enableDisable(ctx, &authAPIAction{"PUT"})
|
||||
}
|
||||
|
||||
func (s *httpAuthAPI) Disable(ctx context.Context) error {
|
||||
return s.enableDisable(ctx, &authAPIAction{"DELETE"})
|
||||
}
|
||||
|
||||
func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
|
||||
resp, body, err := s.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
||||
var sec authError
|
||||
err = json.Unmarshal(body, &sec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return sec
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type authAPIAction struct {
|
||||
verb string
|
||||
}
|
||||
|
||||
func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2AuthURL(ep, "enable", "")
|
||||
req, _ := http.NewRequest(l.verb, u.String(), nil)
|
||||
return req
|
||||
}
|
||||
|
||||
type authError struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"-"`
|
||||
}
|
||||
|
||||
func (e authError) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
|
||||
// interact with etcd's user creation and modification features.
|
||||
func NewAuthUserAPI(c Client) AuthUserAPI {
|
||||
return &httpAuthUserAPI{
|
||||
client: c,
|
||||
}
|
||||
}
|
||||
|
||||
type AuthUserAPI interface {
|
||||
// AddUser adds a user.
|
||||
AddUser(ctx context.Context, username string, password string) error
|
||||
|
||||
// RemoveUser removes a user.
|
||||
RemoveUser(ctx context.Context, username string) error
|
||||
|
||||
// GetUser retrieves user details.
|
||||
GetUser(ctx context.Context, username string) (*User, error)
|
||||
|
||||
// GrantUser grants a user some permission roles.
|
||||
GrantUser(ctx context.Context, username string, roles []string) (*User, error)
|
||||
|
||||
// RevokeUser revokes some permission roles from a user.
|
||||
RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
|
||||
|
||||
// ChangePassword changes the user's password.
|
||||
ChangePassword(ctx context.Context, username string, password string) (*User, error)
|
||||
|
||||
// ListUsers lists the users.
|
||||
ListUsers(ctx context.Context) ([]string, error)
|
||||
}
|
||||
|
||||
type httpAuthUserAPI struct {
|
||||
client httpClient
|
||||
}
|
||||
|
||||
type authUserAPIAction struct {
|
||||
verb string
|
||||
username string
|
||||
user *User
|
||||
}
|
||||
|
||||
type authUserAPIList struct{}
|
||||
|
||||
func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2AuthURL(ep, "users", "")
|
||||
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
return req
|
||||
}
|
||||
|
||||
func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2AuthURL(ep, "users", l.username)
|
||||
if l.user == nil {
|
||||
req, _ := http.NewRequest(l.verb, u.String(), nil)
|
||||
return req
|
||||
}
|
||||
b, err := json.Marshal(l.user)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
body := bytes.NewReader(b)
|
||||
req, _ := http.NewRequest(l.verb, u.String(), body)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
return req
|
||||
}
|
||||
|
||||
func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
|
||||
resp, body, err := u.client.Do(ctx, &authUserAPIList{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
var sec authError
|
||||
err = json.Unmarshal(body, &sec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, sec
|
||||
}
|
||||
|
||||
var userList struct {
|
||||
Users []userListEntry `json:"users"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(body, &userList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := make([]string, 0, len(userList.Users))
|
||||
for _, u := range userList.Users {
|
||||
ret = append(ret, u.User)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
|
||||
user := &User{
|
||||
User: username,
|
||||
Password: password,
|
||||
}
|
||||
return u.addRemoveUser(ctx, &authUserAPIAction{
|
||||
verb: "PUT",
|
||||
username: username,
|
||||
user: user,
|
||||
})
|
||||
}
|
||||
|
||||
func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
|
||||
return u.addRemoveUser(ctx, &authUserAPIAction{
|
||||
verb: "DELETE",
|
||||
username: username,
|
||||
})
|
||||
}
|
||||
|
||||
func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
|
||||
resp, body, err := u.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
|
||||
var sec authError
|
||||
err = json.Unmarshal(body, &sec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return sec
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
|
||||
return u.modUser(ctx, &authUserAPIAction{
|
||||
verb: "GET",
|
||||
username: username,
|
||||
})
|
||||
}
|
||||
|
||||
func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
|
||||
user := &User{
|
||||
User: username,
|
||||
Grant: roles,
|
||||
}
|
||||
return u.modUser(ctx, &authUserAPIAction{
|
||||
verb: "PUT",
|
||||
username: username,
|
||||
user: user,
|
||||
})
|
||||
}
|
||||
|
||||
func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
|
||||
user := &User{
|
||||
User: username,
|
||||
Revoke: roles,
|
||||
}
|
||||
return u.modUser(ctx, &authUserAPIAction{
|
||||
verb: "PUT",
|
||||
username: username,
|
||||
user: user,
|
||||
})
|
||||
}
|
||||
|
||||
func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
|
||||
user := &User{
|
||||
User: username,
|
||||
Password: password,
|
||||
}
|
||||
return u.modUser(ctx, &authUserAPIAction{
|
||||
verb: "PUT",
|
||||
username: username,
|
||||
user: user,
|
||||
})
|
||||
}
|
||||
|
||||
func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
|
||||
resp, body, err := u.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
var sec authError
|
||||
err = json.Unmarshal(body, &sec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, sec
|
||||
}
|
||||
var user User
|
||||
if err = json.Unmarshal(body, &user); err != nil {
|
||||
var userR UserRoles
|
||||
if urerr := json.Unmarshal(body, &userR); urerr != nil {
|
||||
return nil, err
|
||||
}
|
||||
user.User = userR.User
|
||||
for _, r := range userR.Roles {
|
||||
user.Roles = append(user.Roles, r.Role)
|
||||
}
|
||||
}
|
||||
return &user, nil
|
||||
}
|
18
vendor/github.com/coreos/etcd/client/cancelreq.go
generated
vendored
18
vendor/github.com/coreos/etcd/client/cancelreq.go
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// borrowed from golang/net/context/ctxhttp/cancelreq.go
|
||||
|
||||
package client
|
||||
|
||||
import "net/http"
|
||||
|
||||
func requestCanceler(tr CancelableTransport, req *http.Request) func() {
|
||||
ch := make(chan struct{})
|
||||
req.Cancel = ch
|
||||
|
||||
return func() {
|
||||
close(ch)
|
||||
}
|
||||
}
|
710
vendor/github.com/coreos/etcd/client/client.go
generated
vendored
710
vendor/github.com/coreos/etcd/client/client.go
generated
vendored
|
@ -1,710 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/version"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoEndpoints = errors.New("client: no endpoints available")
|
||||
ErrTooManyRedirects = errors.New("client: too many redirects")
|
||||
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
|
||||
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
|
||||
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
|
||||
|
||||
// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
|
||||
// that Do() will not retry a request
|
||||
oneShotCtxValue interface{}
|
||||
)
|
||||
|
||||
var DefaultRequestTimeout = 5 * time.Second
|
||||
|
||||
var DefaultTransport CancelableTransport = &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
type EndpointSelectionMode int
|
||||
|
||||
const (
|
||||
// EndpointSelectionRandom is the default value of the 'SelectionMode'.
|
||||
// As the name implies, the client object will pick a node from the members
|
||||
// of the cluster in a random fashion. If the cluster has three members, A, B,
|
||||
// and C, the client picks any node from its three members as its request
|
||||
// destination.
|
||||
EndpointSelectionRandom EndpointSelectionMode = iota
|
||||
|
||||
// If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
|
||||
// requests are sent directly to the cluster leader. This reduces
|
||||
// forwarding roundtrips compared to making requests to etcd followers
|
||||
// who then forward them to the cluster leader. In the event of a leader
|
||||
// failure, however, clients configured this way cannot prioritize among
|
||||
// the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
|
||||
// to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
|
||||
// maintain its knowledge of current cluster state.
|
||||
//
|
||||
// This mode should be used with Client.AutoSync().
|
||||
EndpointSelectionPrioritizeLeader
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// Endpoints defines a set of URLs (schemes, hosts and ports only)
|
||||
// that can be used to communicate with a logical etcd cluster. For
|
||||
// example, a three-node cluster could be provided like so:
|
||||
//
|
||||
// Endpoints: []string{
|
||||
// "http://node1.example.com:2379",
|
||||
// "http://node2.example.com:2379",
|
||||
// "http://node3.example.com:2379",
|
||||
// }
|
||||
//
|
||||
// If multiple endpoints are provided, the Client will attempt to
|
||||
// use them all in the event that one or more of them are unusable.
|
||||
//
|
||||
// If Client.Sync is ever called, the Client may cache an alternate
|
||||
// set of endpoints to continue operation.
|
||||
Endpoints []string
|
||||
|
||||
// Transport is used by the Client to drive HTTP requests. If not
|
||||
// provided, DefaultTransport will be used.
|
||||
Transport CancelableTransport
|
||||
|
||||
// CheckRedirect specifies the policy for handling HTTP redirects.
|
||||
// If CheckRedirect is not nil, the Client calls it before
|
||||
// following an HTTP redirect. The sole argument is the number of
|
||||
// requests that have already been made. If CheckRedirect returns
|
||||
// an error, Client.Do will not make any further requests and return
|
||||
// the error back it to the caller.
|
||||
//
|
||||
// If CheckRedirect is nil, the Client uses its default policy,
|
||||
// which is to stop after 10 consecutive requests.
|
||||
CheckRedirect CheckRedirectFunc
|
||||
|
||||
// Username specifies the user credential to add as an authorization header
|
||||
Username string
|
||||
|
||||
// Password is the password for the specified user to add as an authorization header
|
||||
// to the request.
|
||||
Password string
|
||||
|
||||
// HeaderTimeoutPerRequest specifies the time limit to wait for response
|
||||
// header in a single request made by the Client. The timeout includes
|
||||
// connection time, any redirects, and header wait time.
|
||||
//
|
||||
// For non-watch GET request, server returns the response body immediately.
|
||||
// For PUT/POST/DELETE request, server will attempt to commit request
|
||||
// before responding, which is expected to take `100ms + 2 * RTT`.
|
||||
// For watch request, server returns the header immediately to notify Client
|
||||
// watch start. But if server is behind some kind of proxy, the response
|
||||
// header may be cached at proxy, and Client cannot rely on this behavior.
|
||||
//
|
||||
// Especially, wait request will ignore this timeout.
|
||||
//
|
||||
// One API call may send multiple requests to different etcd servers until it
|
||||
// succeeds. Use context of the API to specify the overall timeout.
|
||||
//
|
||||
// A HeaderTimeoutPerRequest of zero means no timeout.
|
||||
HeaderTimeoutPerRequest time.Duration
|
||||
|
||||
// SelectionMode is an EndpointSelectionMode enum that specifies the
|
||||
// policy for choosing the etcd cluster node to which requests are sent.
|
||||
SelectionMode EndpointSelectionMode
|
||||
}
|
||||
|
||||
func (cfg *Config) transport() CancelableTransport {
|
||||
if cfg.Transport == nil {
|
||||
return DefaultTransport
|
||||
}
|
||||
return cfg.Transport
|
||||
}
|
||||
|
||||
func (cfg *Config) checkRedirect() CheckRedirectFunc {
|
||||
if cfg.CheckRedirect == nil {
|
||||
return DefaultCheckRedirect
|
||||
}
|
||||
return cfg.CheckRedirect
|
||||
}
|
||||
|
||||
// CancelableTransport mimics net/http.Transport, but requires that
|
||||
// the object also support request cancellation.
|
||||
type CancelableTransport interface {
|
||||
http.RoundTripper
|
||||
CancelRequest(req *http.Request)
|
||||
}
|
||||
|
||||
type CheckRedirectFunc func(via int) error
|
||||
|
||||
// DefaultCheckRedirect follows up to 10 redirects, but no more.
|
||||
var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
|
||||
if via > 10 {
|
||||
return ErrTooManyRedirects
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Client interface {
|
||||
// Sync updates the internal cache of the etcd cluster's membership.
|
||||
Sync(context.Context) error
|
||||
|
||||
// AutoSync periodically calls Sync() every given interval.
|
||||
// The recommended sync interval is 10 seconds to 1 minute, which does
|
||||
// not bring too much overhead to server and makes client catch up the
|
||||
// cluster change in time.
|
||||
//
|
||||
// The example to use it:
|
||||
//
|
||||
// for {
|
||||
// err := client.AutoSync(ctx, 10*time.Second)
|
||||
// if err == context.DeadlineExceeded || err == context.Canceled {
|
||||
// break
|
||||
// }
|
||||
// log.Print(err)
|
||||
// }
|
||||
AutoSync(context.Context, time.Duration) error
|
||||
|
||||
// Endpoints returns a copy of the current set of API endpoints used
|
||||
// by Client to resolve HTTP requests. If Sync has ever been called,
|
||||
// this may differ from the initial Endpoints provided in the Config.
|
||||
Endpoints() []string
|
||||
|
||||
// SetEndpoints sets the set of API endpoints used by Client to resolve
|
||||
// HTTP requests. If the given endpoints are not valid, an error will be
|
||||
// returned
|
||||
SetEndpoints(eps []string) error
|
||||
|
||||
// GetVersion retrieves the current etcd server and cluster version
|
||||
GetVersion(ctx context.Context) (*version.Versions, error)
|
||||
|
||||
httpClient
|
||||
}
|
||||
|
||||
func New(cfg Config) (Client, error) {
|
||||
c := &httpClusterClient{
|
||||
clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
|
||||
rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
|
||||
selectionMode: cfg.SelectionMode,
|
||||
}
|
||||
if cfg.Username != "" {
|
||||
c.credentials = &credentials{
|
||||
username: cfg.Username,
|
||||
password: cfg.Password,
|
||||
}
|
||||
}
|
||||
if err := c.SetEndpoints(cfg.Endpoints); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
type httpClient interface {
|
||||
Do(context.Context, httpAction) (*http.Response, []byte, error)
|
||||
}
|
||||
|
||||
func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
|
||||
return func(ep url.URL) httpClient {
|
||||
return &redirectFollowingHTTPClient{
|
||||
checkRedirect: cr,
|
||||
client: &simpleHTTPClient{
|
||||
transport: tr,
|
||||
endpoint: ep,
|
||||
headerTimeout: headerTimeout,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type credentials struct {
|
||||
username string
|
||||
password string
|
||||
}
|
||||
|
||||
type httpClientFactory func(url.URL) httpClient
|
||||
|
||||
type httpAction interface {
|
||||
HTTPRequest(url.URL) *http.Request
|
||||
}
|
||||
|
||||
type httpClusterClient struct {
|
||||
clientFactory httpClientFactory
|
||||
endpoints []url.URL
|
||||
pinned int
|
||||
credentials *credentials
|
||||
sync.RWMutex
|
||||
rand *rand.Rand
|
||||
selectionMode EndpointSelectionMode
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
|
||||
ceps := make([]url.URL, len(eps))
|
||||
copy(ceps, eps)
|
||||
|
||||
// To perform a lookup on the new endpoint list without using the current
|
||||
// client, we'll copy it
|
||||
clientCopy := &httpClusterClient{
|
||||
clientFactory: c.clientFactory,
|
||||
credentials: c.credentials,
|
||||
rand: c.rand,
|
||||
|
||||
pinned: 0,
|
||||
endpoints: ceps,
|
||||
}
|
||||
|
||||
mAPI := NewMembersAPI(clientCopy)
|
||||
leader, err := mAPI.Leader(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(leader.ClientURLs) == 0 {
|
||||
return "", ErrNoLeaderEndpoint
|
||||
}
|
||||
|
||||
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
|
||||
if len(eps) == 0 {
|
||||
return []url.URL{}, ErrNoEndpoints
|
||||
}
|
||||
|
||||
neps := make([]url.URL, len(eps))
|
||||
for i, ep := range eps {
|
||||
u, err := url.Parse(ep)
|
||||
if err != nil {
|
||||
return []url.URL{}, err
|
||||
}
|
||||
neps[i] = *u
|
||||
}
|
||||
return neps, nil
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) SetEndpoints(eps []string) error {
|
||||
neps, err := c.parseEndpoints(eps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
c.endpoints = shuffleEndpoints(c.rand, neps)
|
||||
// We're not doing anything for PrioritizeLeader here. This is
|
||||
// due to not having a context meaning we can't call getLeaderEndpoint
|
||||
// However, if you're using PrioritizeLeader, you've already been told
|
||||
// to regularly call sync, where we do have a ctx, and can figure the
|
||||
// leader. PrioritizeLeader is also quite a loose guarantee, so deal
|
||||
// with it
|
||||
c.pinned = 0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
|
||||
action := act
|
||||
c.RLock()
|
||||
leps := len(c.endpoints)
|
||||
eps := make([]url.URL, leps)
|
||||
n := copy(eps, c.endpoints)
|
||||
pinned := c.pinned
|
||||
|
||||
if c.credentials != nil {
|
||||
action = &authedAction{
|
||||
act: act,
|
||||
credentials: *c.credentials,
|
||||
}
|
||||
}
|
||||
c.RUnlock()
|
||||
|
||||
if leps == 0 {
|
||||
return nil, nil, ErrNoEndpoints
|
||||
}
|
||||
|
||||
if leps != n {
|
||||
return nil, nil, errors.New("unable to pick endpoint: copy failed")
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
var err error
|
||||
cerr := &ClusterError{}
|
||||
isOneShot := ctx.Value(&oneShotCtxValue) != nil
|
||||
|
||||
for i := pinned; i < leps+pinned; i++ {
|
||||
k := i % leps
|
||||
hc := c.clientFactory(eps[k])
|
||||
resp, body, err = hc.Do(ctx, action)
|
||||
if err != nil {
|
||||
cerr.Errors = append(cerr.Errors, err)
|
||||
if err == ctx.Err() {
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else if resp.StatusCode/100 == 5 {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusInternalServerError, http.StatusServiceUnavailable:
|
||||
// TODO: make sure this is a no leader response
|
||||
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
|
||||
default:
|
||||
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
|
||||
}
|
||||
err = cerr.Errors[0]
|
||||
}
|
||||
if err != nil {
|
||||
if !isOneShot {
|
||||
continue
|
||||
}
|
||||
c.Lock()
|
||||
c.pinned = (k + 1) % leps
|
||||
c.Unlock()
|
||||
return nil, nil, err
|
||||
}
|
||||
if k != pinned {
|
||||
c.Lock()
|
||||
c.pinned = k
|
||||
c.Unlock()
|
||||
}
|
||||
return resp, body, nil
|
||||
}
|
||||
|
||||
return nil, nil, cerr
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) Endpoints() []string {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
eps := make([]string, len(c.endpoints))
|
||||
for i, ep := range c.endpoints {
|
||||
eps[i] = ep.String()
|
||||
}
|
||||
|
||||
return eps
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) Sync(ctx context.Context) error {
|
||||
mAPI := NewMembersAPI(c)
|
||||
ms, err := mAPI.List(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var eps []string
|
||||
for _, m := range ms {
|
||||
eps = append(eps, m.ClientURLs...)
|
||||
}
|
||||
|
||||
neps, err := c.parseEndpoints(eps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
npin := 0
|
||||
|
||||
switch c.selectionMode {
|
||||
case EndpointSelectionRandom:
|
||||
c.RLock()
|
||||
eq := endpointsEqual(c.endpoints, neps)
|
||||
c.RUnlock()
|
||||
|
||||
if eq {
|
||||
return nil
|
||||
}
|
||||
// When items in the endpoint list changes, we choose a new pin
|
||||
neps = shuffleEndpoints(c.rand, neps)
|
||||
case EndpointSelectionPrioritizeLeader:
|
||||
nle, err := c.getLeaderEndpoint(ctx, neps)
|
||||
if err != nil {
|
||||
return ErrNoLeaderEndpoint
|
||||
}
|
||||
|
||||
for i, n := range neps {
|
||||
if n.String() == nle {
|
||||
npin = i
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.endpoints = neps
|
||||
c.pinned = npin
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
err := c.Sync(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
|
||||
act := &getAction{Prefix: "/version"}
|
||||
|
||||
resp, body, err := c.Do(ctx, act)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
if len(body) == 0 {
|
||||
return nil, ErrEmptyBody
|
||||
}
|
||||
var vresp version.Versions
|
||||
if err := json.Unmarshal(body, &vresp); err != nil {
|
||||
return nil, ErrInvalidJSON
|
||||
}
|
||||
return &vresp, nil
|
||||
default:
|
||||
var etcdErr Error
|
||||
if err := json.Unmarshal(body, &etcdErr); err != nil {
|
||||
return nil, ErrInvalidJSON
|
||||
}
|
||||
return nil, etcdErr
|
||||
}
|
||||
}
|
||||
|
||||
type roundTripResponse struct {
|
||||
resp *http.Response
|
||||
err error
|
||||
}
|
||||
|
||||
type simpleHTTPClient struct {
|
||||
transport CancelableTransport
|
||||
endpoint url.URL
|
||||
headerTimeout time.Duration
|
||||
}
|
||||
|
||||
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
|
||||
req := act.HTTPRequest(c.endpoint)
|
||||
|
||||
if err := printcURL(req); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
isWait := false
|
||||
if req != nil && req.URL != nil {
|
||||
ws := req.URL.Query().Get("wait")
|
||||
if len(ws) != 0 {
|
||||
var err error
|
||||
isWait, err = strconv.ParseBool(ws)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var hctx context.Context
|
||||
var hcancel context.CancelFunc
|
||||
if !isWait && c.headerTimeout > 0 {
|
||||
hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
|
||||
} else {
|
||||
hctx, hcancel = context.WithCancel(ctx)
|
||||
}
|
||||
defer hcancel()
|
||||
|
||||
reqcancel := requestCanceler(c.transport, req)
|
||||
|
||||
rtchan := make(chan roundTripResponse, 1)
|
||||
go func() {
|
||||
resp, err := c.transport.RoundTrip(req)
|
||||
rtchan <- roundTripResponse{resp: resp, err: err}
|
||||
close(rtchan)
|
||||
}()
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
|
||||
select {
|
||||
case rtresp := <-rtchan:
|
||||
resp, err = rtresp.resp, rtresp.err
|
||||
case <-hctx.Done():
|
||||
// cancel and wait for request to actually exit before continuing
|
||||
reqcancel()
|
||||
rtresp := <-rtchan
|
||||
resp = rtresp.resp
|
||||
switch {
|
||||
case ctx.Err() != nil:
|
||||
err = ctx.Err()
|
||||
case hctx.Err() != nil:
|
||||
err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
|
||||
default:
|
||||
panic("failed to get error from context")
|
||||
}
|
||||
}
|
||||
|
||||
// always check for resp nil-ness to deal with possible
|
||||
// race conditions between channels above
|
||||
defer func() {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var body []byte
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
done <- struct{}{}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
resp.Body.Close()
|
||||
<-done
|
||||
return nil, nil, ctx.Err()
|
||||
case <-done:
|
||||
}
|
||||
|
||||
return resp, body, err
|
||||
}
|
||||
|
||||
type authedAction struct {
|
||||
act httpAction
|
||||
credentials credentials
|
||||
}
|
||||
|
||||
func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
|
||||
r := a.act.HTTPRequest(url)
|
||||
r.SetBasicAuth(a.credentials.username, a.credentials.password)
|
||||
return r
|
||||
}
|
||||
|
||||
type redirectFollowingHTTPClient struct {
|
||||
client httpClient
|
||||
checkRedirect CheckRedirectFunc
|
||||
}
|
||||
|
||||
func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
|
||||
next := act
|
||||
for i := 0; i < 100; i++ {
|
||||
if i > 0 {
|
||||
if err := r.checkRedirect(i); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
resp, body, err := r.client.Do(ctx, next)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if resp.StatusCode/100 == 3 {
|
||||
hdr := resp.Header.Get("Location")
|
||||
if hdr == "" {
|
||||
return nil, nil, fmt.Errorf("Location header not set")
|
||||
}
|
||||
loc, err := url.Parse(hdr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr)
|
||||
}
|
||||
next = &redirectedHTTPAction{
|
||||
action: act,
|
||||
location: *loc,
|
||||
}
|
||||
continue
|
||||
}
|
||||
return resp, body, nil
|
||||
}
|
||||
|
||||
return nil, nil, errTooManyRedirectChecks
|
||||
}
|
||||
|
||||
type redirectedHTTPAction struct {
|
||||
action httpAction
|
||||
location url.URL
|
||||
}
|
||||
|
||||
func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
|
||||
orig := r.action.HTTPRequest(ep)
|
||||
orig.URL = &r.location
|
||||
return orig
|
||||
}
|
||||
|
||||
func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
|
||||
// copied from Go 1.9<= rand.Rand.Perm
|
||||
n := len(eps)
|
||||
p := make([]int, n)
|
||||
for i := 0; i < n; i++ {
|
||||
j := r.Intn(i + 1)
|
||||
p[i] = p[j]
|
||||
p[j] = i
|
||||
}
|
||||
neps := make([]url.URL, n)
|
||||
for i, k := range p {
|
||||
neps[i] = eps[k]
|
||||
}
|
||||
return neps
|
||||
}
|
||||
|
||||
func endpointsEqual(left, right []url.URL) bool {
|
||||
if len(left) != len(right) {
|
||||
return false
|
||||
}
|
||||
|
||||
sLeft := make([]string, len(left))
|
||||
sRight := make([]string, len(right))
|
||||
for i, l := range left {
|
||||
sLeft[i] = l.String()
|
||||
}
|
||||
for i, r := range right {
|
||||
sRight[i] = r.String()
|
||||
}
|
||||
|
||||
sort.Strings(sLeft)
|
||||
sort.Strings(sRight)
|
||||
for i := range sLeft {
|
||||
if sLeft[i] != sRight[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
37
vendor/github.com/coreos/etcd/client/cluster_error.go
generated
vendored
37
vendor/github.com/coreos/etcd/client/cluster_error.go
generated
vendored
|
@ -1,37 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import "fmt"
|
||||
|
||||
type ClusterError struct {
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func (ce *ClusterError) Error() string {
|
||||
s := ErrClusterUnavailable.Error()
|
||||
for i, e := range ce.Errors {
|
||||
s += fmt.Sprintf("; error #%d: %s\n", i, e)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (ce *ClusterError) Detail() string {
|
||||
s := ""
|
||||
for i, e := range ce.Errors {
|
||||
s += fmt.Sprintf("error #%d: %s\n", i, e)
|
||||
}
|
||||
return s
|
||||
}
|
70
vendor/github.com/coreos/etcd/client/curl.go
generated
vendored
70
vendor/github.com/coreos/etcd/client/curl.go
generated
vendored
|
@ -1,70 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
cURLDebug = false
|
||||
)
|
||||
|
||||
func EnablecURLDebug() {
|
||||
cURLDebug = true
|
||||
}
|
||||
|
||||
func DisablecURLDebug() {
|
||||
cURLDebug = false
|
||||
}
|
||||
|
||||
// printcURL prints the cURL equivalent request to stderr.
|
||||
// It returns an error if the body of the request cannot
|
||||
// be read.
|
||||
// The caller MUST cancel the request if there is an error.
|
||||
func printcURL(req *http.Request) error {
|
||||
if !cURLDebug {
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
command string
|
||||
b []byte
|
||||
err error
|
||||
)
|
||||
|
||||
if req.URL != nil {
|
||||
command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
|
||||
}
|
||||
|
||||
if req.Body != nil {
|
||||
b, err = ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
command += fmt.Sprintf(" -d %q", string(b))
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
|
||||
|
||||
// reset body
|
||||
body := bytes.NewBuffer(b)
|
||||
req.Body = ioutil.NopCloser(body)
|
||||
|
||||
return nil
|
||||
}
|
40
vendor/github.com/coreos/etcd/client/discover.go
generated
vendored
40
vendor/github.com/coreos/etcd/client/discover.go
generated
vendored
|
@ -1,40 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/pkg/srv"
|
||||
)
|
||||
|
||||
// Discoverer is an interface that wraps the Discover method.
|
||||
type Discoverer interface {
|
||||
// Discover looks up the etcd servers for the domain.
|
||||
Discover(domain string) ([]string, error)
|
||||
}
|
||||
|
||||
type srvDiscover struct{}
|
||||
|
||||
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
|
||||
func NewSRVDiscover() Discoverer {
|
||||
return &srvDiscover{}
|
||||
}
|
||||
|
||||
func (d *srvDiscover) Discover(domain string) ([]string, error) {
|
||||
srvs, err := srv.GetClient("etcd-client", domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return srvs.Endpoints, nil
|
||||
}
|
73
vendor/github.com/coreos/etcd/client/doc.go
generated
vendored
73
vendor/github.com/coreos/etcd/client/doc.go
generated
vendored
|
@ -1,73 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package client provides bindings for the etcd APIs.
|
||||
|
||||
Create a Config and exchange it for a Client:
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"context"
|
||||
|
||||
"github.com/coreos/etcd/client"
|
||||
)
|
||||
|
||||
cfg := client.Config{
|
||||
Endpoints: []string{"http://127.0.0.1:2379"},
|
||||
Transport: DefaultTransport,
|
||||
}
|
||||
|
||||
c, err := client.New(cfg)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
Clients are safe for concurrent use by multiple goroutines.
|
||||
|
||||
Create a KeysAPI using the Client, then use it to interact with etcd:
|
||||
|
||||
kAPI := client.NewKeysAPI(c)
|
||||
|
||||
// create a new key /foo with the value "bar"
|
||||
_, err = kAPI.Create(context.Background(), "/foo", "bar")
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
// delete the newly created key only if the value is still "bar"
|
||||
_, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
|
||||
Use a custom context to set timeouts on your operations:
|
||||
|
||||
import "time"
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// set a new key, ignoring its previous state
|
||||
_, err := kAPI.Set(ctx, "/ping", "pong", nil)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
// request took longer than 5s
|
||||
} else {
|
||||
// handle error
|
||||
}
|
||||
}
|
||||
|
||||
*/
|
||||
package client
|
72
vendor/github.com/coreos/etcd/client/json.go
generated
vendored
72
vendor/github.com/coreos/etcd/client/json.go
generated
vendored
|
@ -1,72 +0,0 @@
|
|||
// Copyright 2019 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/json-iterator/go"
|
||||
"github.com/modern-go/reflect2"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type customNumberExtension struct {
|
||||
jsoniter.DummyExtension
|
||||
}
|
||||
|
||||
func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder {
|
||||
if typ.String() == "interface {}" {
|
||||
return customNumberDecoder{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type customNumberDecoder struct {
|
||||
}
|
||||
|
||||
func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
switch iter.WhatIsNext() {
|
||||
case jsoniter.NumberValue:
|
||||
var number jsoniter.Number
|
||||
iter.ReadVal(&number)
|
||||
i64, err := strconv.ParseInt(string(number), 10, 64)
|
||||
if err == nil {
|
||||
*(*interface{})(ptr) = i64
|
||||
return
|
||||
}
|
||||
f64, err := strconv.ParseFloat(string(number), 64)
|
||||
if err == nil {
|
||||
*(*interface{})(ptr) = f64
|
||||
return
|
||||
}
|
||||
iter.ReportError("DecodeNumber", err.Error())
|
||||
default:
|
||||
*(*interface{})(ptr) = iter.Read()
|
||||
}
|
||||
}
|
||||
|
||||
// caseSensitiveJsonIterator returns a jsoniterator API that's configured to be
|
||||
// case-sensitive when unmarshalling, and otherwise compatible with
|
||||
// the encoding/json standard library.
|
||||
func caseSensitiveJsonIterator() jsoniter.API {
|
||||
config := jsoniter.Config{
|
||||
EscapeHTML: true,
|
||||
SortMapKeys: true,
|
||||
ValidateJsonRawMessage: true,
|
||||
CaseSensitive: true,
|
||||
}.Froze()
|
||||
// Force jsoniter to decode number to interface{} via int64/float64, if possible.
|
||||
config.RegisterExtension(&customNumberExtension{})
|
||||
return config
|
||||
}
|
680
vendor/github.com/coreos/etcd/client/keys.go
generated
vendored
680
vendor/github.com/coreos/etcd/client/keys.go
generated
vendored
|
@ -1,680 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/pkg/pathutil"
|
||||
)
|
||||
|
||||
const (
|
||||
ErrorCodeKeyNotFound = 100
|
||||
ErrorCodeTestFailed = 101
|
||||
ErrorCodeNotFile = 102
|
||||
ErrorCodeNotDir = 104
|
||||
ErrorCodeNodeExist = 105
|
||||
ErrorCodeRootROnly = 107
|
||||
ErrorCodeDirNotEmpty = 108
|
||||
ErrorCodeUnauthorized = 110
|
||||
|
||||
ErrorCodePrevValueRequired = 201
|
||||
ErrorCodeTTLNaN = 202
|
||||
ErrorCodeIndexNaN = 203
|
||||
ErrorCodeInvalidField = 209
|
||||
ErrorCodeInvalidForm = 210
|
||||
|
||||
ErrorCodeRaftInternal = 300
|
||||
ErrorCodeLeaderElect = 301
|
||||
|
||||
ErrorCodeWatcherCleared = 400
|
||||
ErrorCodeEventIndexCleared = 401
|
||||
)
|
||||
|
||||
type Error struct {
|
||||
Code int `json:"errorCode"`
|
||||
Message string `json:"message"`
|
||||
Cause string `json:"cause"`
|
||||
Index uint64 `json:"index"`
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index)
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.")
|
||||
ErrEmptyBody = errors.New("client: response body is empty")
|
||||
)
|
||||
|
||||
// PrevExistType is used to define an existence condition when setting
|
||||
// or deleting Nodes.
|
||||
type PrevExistType string
|
||||
|
||||
const (
|
||||
PrevIgnore = PrevExistType("")
|
||||
PrevExist = PrevExistType("true")
|
||||
PrevNoExist = PrevExistType("false")
|
||||
)
|
||||
|
||||
var (
|
||||
defaultV2KeysPrefix = "/v2/keys"
|
||||
)
|
||||
|
||||
// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
|
||||
// API over HTTP.
|
||||
func NewKeysAPI(c Client) KeysAPI {
|
||||
return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix)
|
||||
}
|
||||
|
||||
// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller
|
||||
// to provide a custom base URL path. This should only be used in
|
||||
// very rare cases.
|
||||
func NewKeysAPIWithPrefix(c Client, p string) KeysAPI {
|
||||
return &httpKeysAPI{
|
||||
client: c,
|
||||
prefix: p,
|
||||
}
|
||||
}
|
||||
|
||||
type KeysAPI interface {
|
||||
// Get retrieves a set of Nodes from etcd
|
||||
Get(ctx context.Context, key string, opts *GetOptions) (*Response, error)
|
||||
|
||||
// Set assigns a new value to a Node identified by a given key. The caller
|
||||
// may define a set of conditions in the SetOptions. If SetOptions.Dir=true
|
||||
// then value is ignored.
|
||||
Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
|
||||
|
||||
// Delete removes a Node identified by the given key, optionally destroying
|
||||
// all of its children as well. The caller may define a set of required
|
||||
// conditions in an DeleteOptions object.
|
||||
Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error)
|
||||
|
||||
// Create is an alias for Set w/ PrevExist=false
|
||||
Create(ctx context.Context, key, value string) (*Response, error)
|
||||
|
||||
// CreateInOrder is used to atomically create in-order keys within the given directory.
|
||||
CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error)
|
||||
|
||||
// Update is an alias for Set w/ PrevExist=true
|
||||
Update(ctx context.Context, key, value string) (*Response, error)
|
||||
|
||||
// Watcher builds a new Watcher targeted at a specific Node identified
|
||||
// by the given key. The Watcher may be configured at creation time
|
||||
// through a WatcherOptions object. The returned Watcher is designed
|
||||
// to emit events that happen to a Node, and optionally to its children.
|
||||
Watcher(key string, opts *WatcherOptions) Watcher
|
||||
}
|
||||
|
||||
type WatcherOptions struct {
|
||||
// AfterIndex defines the index after-which the Watcher should
|
||||
// start emitting events. For example, if a value of 5 is
|
||||
// provided, the first event will have an index >= 6.
|
||||
//
|
||||
// Setting AfterIndex to 0 (default) means that the Watcher
|
||||
// should start watching for events starting at the current
|
||||
// index, whatever that may be.
|
||||
AfterIndex uint64
|
||||
|
||||
// Recursive specifies whether or not the Watcher should emit
|
||||
// events that occur in children of the given keyspace. If set
|
||||
// to false (default), events will be limited to those that
|
||||
// occur for the exact key.
|
||||
Recursive bool
|
||||
}
|
||||
|
||||
type CreateInOrderOptions struct {
|
||||
// TTL defines a period of time after-which the Node should
|
||||
// expire and no longer exist. Values <= 0 are ignored. Given
|
||||
// that the zero-value is ignored, TTL cannot be used to set
|
||||
// a TTL of 0.
|
||||
TTL time.Duration
|
||||
}
|
||||
|
||||
type SetOptions struct {
|
||||
// PrevValue specifies what the current value of the Node must
|
||||
// be in order for the Set operation to succeed.
|
||||
//
|
||||
// Leaving this field empty means that the caller wishes to
|
||||
// ignore the current value of the Node. This cannot be used
|
||||
// to compare the Node's current value to an empty string.
|
||||
//
|
||||
// PrevValue is ignored if Dir=true
|
||||
PrevValue string
|
||||
|
||||
// PrevIndex indicates what the current ModifiedIndex of the
|
||||
// Node must be in order for the Set operation to succeed.
|
||||
//
|
||||
// If PrevIndex is set to 0 (default), no comparison is made.
|
||||
PrevIndex uint64
|
||||
|
||||
// PrevExist specifies whether the Node must currently exist
|
||||
// (PrevExist) or not (PrevNoExist). If the caller does not
|
||||
// care about existence, set PrevExist to PrevIgnore, or simply
|
||||
// leave it unset.
|
||||
PrevExist PrevExistType
|
||||
|
||||
// TTL defines a period of time after-which the Node should
|
||||
// expire and no longer exist. Values <= 0 are ignored. Given
|
||||
// that the zero-value is ignored, TTL cannot be used to set
|
||||
// a TTL of 0.
|
||||
TTL time.Duration
|
||||
|
||||
// Refresh set to true means a TTL value can be updated
|
||||
// without firing a watch or changing the node value. A
|
||||
// value must not be provided when refreshing a key.
|
||||
Refresh bool
|
||||
|
||||
// Dir specifies whether or not this Node should be created as a directory.
|
||||
Dir bool
|
||||
|
||||
// NoValueOnSuccess specifies whether the response contains the current value of the Node.
|
||||
// If set, the response will only contain the current value when the request fails.
|
||||
NoValueOnSuccess bool
|
||||
}
|
||||
|
||||
type GetOptions struct {
|
||||
// Recursive defines whether or not all children of the Node
|
||||
// should be returned.
|
||||
Recursive bool
|
||||
|
||||
// Sort instructs the server whether or not to sort the Nodes.
|
||||
// If true, the Nodes are sorted alphabetically by key in
|
||||
// ascending order (A to z). If false (default), the Nodes will
|
||||
// not be sorted and the ordering used should not be considered
|
||||
// predictable.
|
||||
Sort bool
|
||||
|
||||
// Quorum specifies whether it gets the latest committed value that
|
||||
// has been applied in quorum of members, which ensures external
|
||||
// consistency (or linearizability).
|
||||
Quorum bool
|
||||
}
|
||||
|
||||
type DeleteOptions struct {
|
||||
// PrevValue specifies what the current value of the Node must
|
||||
// be in order for the Delete operation to succeed.
|
||||
//
|
||||
// Leaving this field empty means that the caller wishes to
|
||||
// ignore the current value of the Node. This cannot be used
|
||||
// to compare the Node's current value to an empty string.
|
||||
PrevValue string
|
||||
|
||||
// PrevIndex indicates what the current ModifiedIndex of the
|
||||
// Node must be in order for the Delete operation to succeed.
|
||||
//
|
||||
// If PrevIndex is set to 0 (default), no comparison is made.
|
||||
PrevIndex uint64
|
||||
|
||||
// Recursive defines whether or not all children of the Node
|
||||
// should be deleted. If set to true, all children of the Node
|
||||
// identified by the given key will be deleted. If left unset
|
||||
// or explicitly set to false, only a single Node will be
|
||||
// deleted.
|
||||
Recursive bool
|
||||
|
||||
// Dir specifies whether or not this Node should be removed as a directory.
|
||||
Dir bool
|
||||
}
|
||||
|
||||
type Watcher interface {
|
||||
// Next blocks until an etcd event occurs, then returns a Response
|
||||
// representing that event. The behavior of Next depends on the
|
||||
// WatcherOptions used to construct the Watcher. Next is designed to
|
||||
// be called repeatedly, each time blocking until a subsequent event
|
||||
// is available.
|
||||
//
|
||||
// If the provided context is cancelled, Next will return a non-nil
|
||||
// error. Any other failures encountered while waiting for the next
|
||||
// event (connection issues, deserialization failures, etc) will
|
||||
// also result in a non-nil error.
|
||||
Next(context.Context) (*Response, error)
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
// Action is the name of the operation that occurred. Possible values
|
||||
// include get, set, delete, update, create, compareAndSwap,
|
||||
// compareAndDelete and expire.
|
||||
Action string `json:"action"`
|
||||
|
||||
// Node represents the state of the relevant etcd Node.
|
||||
Node *Node `json:"node"`
|
||||
|
||||
// PrevNode represents the previous state of the Node. PrevNode is non-nil
|
||||
// only if the Node existed before the action occurred and the action
|
||||
// caused a change to the Node.
|
||||
PrevNode *Node `json:"prevNode"`
|
||||
|
||||
// Index holds the cluster-level index at the time the Response was generated.
|
||||
// This index is not tied to the Node(s) contained in this Response.
|
||||
Index uint64 `json:"-"`
|
||||
|
||||
// ClusterID holds the cluster-level ID reported by the server. This
|
||||
// should be different for different etcd clusters.
|
||||
ClusterID string `json:"-"`
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
// Key represents the unique location of this Node (e.g. "/foo/bar").
|
||||
Key string `json:"key"`
|
||||
|
||||
// Dir reports whether node describes a directory.
|
||||
Dir bool `json:"dir,omitempty"`
|
||||
|
||||
// Value is the current data stored on this Node. If this Node
|
||||
// is a directory, Value will be empty.
|
||||
Value string `json:"value"`
|
||||
|
||||
// Nodes holds the children of this Node, only if this Node is a directory.
|
||||
// This slice of will be arbitrarily deep (children, grandchildren, great-
|
||||
// grandchildren, etc.) if a recursive Get or Watch request were made.
|
||||
Nodes Nodes `json:"nodes"`
|
||||
|
||||
// CreatedIndex is the etcd index at-which this Node was created.
|
||||
CreatedIndex uint64 `json:"createdIndex"`
|
||||
|
||||
// ModifiedIndex is the etcd index at-which this Node was last modified.
|
||||
ModifiedIndex uint64 `json:"modifiedIndex"`
|
||||
|
||||
// Expiration is the server side expiration time of the key.
|
||||
Expiration *time.Time `json:"expiration,omitempty"`
|
||||
|
||||
// TTL is the time to live of the key in second.
|
||||
TTL int64 `json:"ttl,omitempty"`
|
||||
}
|
||||
|
||||
func (n *Node) String() string {
|
||||
return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL)
|
||||
}
|
||||
|
||||
// TTLDuration returns the Node's TTL as a time.Duration object
|
||||
func (n *Node) TTLDuration() time.Duration {
|
||||
return time.Duration(n.TTL) * time.Second
|
||||
}
|
||||
|
||||
type Nodes []*Node
|
||||
|
||||
// interfaces for sorting
|
||||
|
||||
func (ns Nodes) Len() int { return len(ns) }
|
||||
func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
|
||||
func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
|
||||
|
||||
type httpKeysAPI struct {
|
||||
client httpClient
|
||||
prefix string
|
||||
}
|
||||
|
||||
func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) {
|
||||
act := &setAction{
|
||||
Prefix: k.prefix,
|
||||
Key: key,
|
||||
Value: val,
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
act.PrevValue = opts.PrevValue
|
||||
act.PrevIndex = opts.PrevIndex
|
||||
act.PrevExist = opts.PrevExist
|
||||
act.TTL = opts.TTL
|
||||
act.Refresh = opts.Refresh
|
||||
act.Dir = opts.Dir
|
||||
act.NoValueOnSuccess = opts.NoValueOnSuccess
|
||||
}
|
||||
|
||||
doCtx := ctx
|
||||
if act.PrevExist == PrevNoExist {
|
||||
doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
|
||||
}
|
||||
resp, body, err := k.client.Do(doCtx, act)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
|
||||
}
|
||||
|
||||
func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) {
|
||||
return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist})
|
||||
}
|
||||
|
||||
func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) {
|
||||
act := &createInOrderAction{
|
||||
Prefix: k.prefix,
|
||||
Dir: dir,
|
||||
Value: val,
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
act.TTL = opts.TTL
|
||||
}
|
||||
|
||||
resp, body, err := k.client.Do(ctx, act)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
|
||||
}
|
||||
|
||||
func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) {
|
||||
return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist})
|
||||
}
|
||||
|
||||
func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) {
|
||||
act := &deleteAction{
|
||||
Prefix: k.prefix,
|
||||
Key: key,
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
act.PrevValue = opts.PrevValue
|
||||
act.PrevIndex = opts.PrevIndex
|
||||
act.Dir = opts.Dir
|
||||
act.Recursive = opts.Recursive
|
||||
}
|
||||
|
||||
doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
|
||||
resp, body, err := k.client.Do(doCtx, act)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
|
||||
}
|
||||
|
||||
func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) {
|
||||
act := &getAction{
|
||||
Prefix: k.prefix,
|
||||
Key: key,
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
act.Recursive = opts.Recursive
|
||||
act.Sorted = opts.Sort
|
||||
act.Quorum = opts.Quorum
|
||||
}
|
||||
|
||||
resp, body, err := k.client.Do(ctx, act)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
|
||||
}
|
||||
|
||||
func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher {
|
||||
act := waitAction{
|
||||
Prefix: k.prefix,
|
||||
Key: key,
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
act.Recursive = opts.Recursive
|
||||
if opts.AfterIndex > 0 {
|
||||
act.WaitIndex = opts.AfterIndex + 1
|
||||
}
|
||||
}
|
||||
|
||||
return &httpWatcher{
|
||||
client: k.client,
|
||||
nextWait: act,
|
||||
}
|
||||
}
|
||||
|
||||
type httpWatcher struct {
|
||||
client httpClient
|
||||
nextWait waitAction
|
||||
}
|
||||
|
||||
func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
|
||||
for {
|
||||
httpresp, body, err := hw.client.Do(ctx, &hw.nextWait)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
|
||||
if err != nil {
|
||||
if err == ErrEmptyBody {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1
|
||||
return resp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// v2KeysURL forms a URL representing the location of a key.
|
||||
// The endpoint argument represents the base URL of an etcd
|
||||
// server. The prefix is the path needed to route from the
|
||||
// provided endpoint's path to the root of the keys API
|
||||
// (typically "/v2/keys").
|
||||
func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
|
||||
// We concatenate all parts together manually. We cannot use
|
||||
// path.Join because it does not reserve trailing slash.
|
||||
// We call CanonicalURLPath to further cleanup the path.
|
||||
if prefix != "" && prefix[0] != '/' {
|
||||
prefix = "/" + prefix
|
||||
}
|
||||
if key != "" && key[0] != '/' {
|
||||
key = "/" + key
|
||||
}
|
||||
ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key)
|
||||
return &ep
|
||||
}
|
||||
|
||||
type getAction struct {
|
||||
Prefix string
|
||||
Key string
|
||||
Recursive bool
|
||||
Sorted bool
|
||||
Quorum bool
|
||||
}
|
||||
|
||||
func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2KeysURL(ep, g.Prefix, g.Key)
|
||||
|
||||
params := u.Query()
|
||||
params.Set("recursive", strconv.FormatBool(g.Recursive))
|
||||
params.Set("sorted", strconv.FormatBool(g.Sorted))
|
||||
params.Set("quorum", strconv.FormatBool(g.Quorum))
|
||||
u.RawQuery = params.Encode()
|
||||
|
||||
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||
return req
|
||||
}
|
||||
|
||||
type waitAction struct {
|
||||
Prefix string
|
||||
Key string
|
||||
WaitIndex uint64
|
||||
Recursive bool
|
||||
}
|
||||
|
||||
func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2KeysURL(ep, w.Prefix, w.Key)
|
||||
|
||||
params := u.Query()
|
||||
params.Set("wait", "true")
|
||||
params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10))
|
||||
params.Set("recursive", strconv.FormatBool(w.Recursive))
|
||||
u.RawQuery = params.Encode()
|
||||
|
||||
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||
return req
|
||||
}
|
||||
|
||||
type setAction struct {
|
||||
Prefix string
|
||||
Key string
|
||||
Value string
|
||||
PrevValue string
|
||||
PrevIndex uint64
|
||||
PrevExist PrevExistType
|
||||
TTL time.Duration
|
||||
Refresh bool
|
||||
Dir bool
|
||||
NoValueOnSuccess bool
|
||||
}
|
||||
|
||||
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2KeysURL(ep, a.Prefix, a.Key)
|
||||
|
||||
params := u.Query()
|
||||
form := url.Values{}
|
||||
|
||||
// we're either creating a directory or setting a key
|
||||
if a.Dir {
|
||||
params.Set("dir", strconv.FormatBool(a.Dir))
|
||||
} else {
|
||||
// These options are only valid for setting a key
|
||||
if a.PrevValue != "" {
|
||||
params.Set("prevValue", a.PrevValue)
|
||||
}
|
||||
form.Add("value", a.Value)
|
||||
}
|
||||
|
||||
// Options which apply to both setting a key and creating a dir
|
||||
if a.PrevIndex != 0 {
|
||||
params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
|
||||
}
|
||||
if a.PrevExist != PrevIgnore {
|
||||
params.Set("prevExist", string(a.PrevExist))
|
||||
}
|
||||
if a.TTL > 0 {
|
||||
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
|
||||
}
|
||||
|
||||
if a.Refresh {
|
||||
form.Add("refresh", "true")
|
||||
}
|
||||
if a.NoValueOnSuccess {
|
||||
params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
|
||||
}
|
||||
|
||||
u.RawQuery = params.Encode()
|
||||
body := strings.NewReader(form.Encode())
|
||||
|
||||
req, _ := http.NewRequest("PUT", u.String(), body)
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
type deleteAction struct {
|
||||
Prefix string
|
||||
Key string
|
||||
PrevValue string
|
||||
PrevIndex uint64
|
||||
Dir bool
|
||||
Recursive bool
|
||||
}
|
||||
|
||||
func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2KeysURL(ep, a.Prefix, a.Key)
|
||||
|
||||
params := u.Query()
|
||||
if a.PrevValue != "" {
|
||||
params.Set("prevValue", a.PrevValue)
|
||||
}
|
||||
if a.PrevIndex != 0 {
|
||||
params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
|
||||
}
|
||||
if a.Dir {
|
||||
params.Set("dir", "true")
|
||||
}
|
||||
if a.Recursive {
|
||||
params.Set("recursive", "true")
|
||||
}
|
||||
u.RawQuery = params.Encode()
|
||||
|
||||
req, _ := http.NewRequest("DELETE", u.String(), nil)
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
type createInOrderAction struct {
|
||||
Prefix string
|
||||
Dir string
|
||||
Value string
|
||||
TTL time.Duration
|
||||
}
|
||||
|
||||
func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2KeysURL(ep, a.Prefix, a.Dir)
|
||||
|
||||
form := url.Values{}
|
||||
form.Add("value", a.Value)
|
||||
if a.TTL > 0 {
|
||||
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
|
||||
}
|
||||
body := strings.NewReader(form.Encode())
|
||||
|
||||
req, _ := http.NewRequest("POST", u.String(), body)
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
return req
|
||||
}
|
||||
|
||||
func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) {
|
||||
switch code {
|
||||
case http.StatusOK, http.StatusCreated:
|
||||
if len(body) == 0 {
|
||||
return nil, ErrEmptyBody
|
||||
}
|
||||
res, err = unmarshalSuccessfulKeysResponse(header, body)
|
||||
default:
|
||||
err = unmarshalFailedKeysResponse(body)
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
var jsonIterator = caseSensitiveJsonIterator()
|
||||
|
||||
func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
|
||||
var res Response
|
||||
err := jsonIterator.Unmarshal(body, &res)
|
||||
if err != nil {
|
||||
return nil, ErrInvalidJSON
|
||||
}
|
||||
if header.Get("X-Etcd-Index") != "" {
|
||||
res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
res.ClusterID = header.Get("X-Etcd-Cluster-ID")
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func unmarshalFailedKeysResponse(body []byte) error {
|
||||
var etcdErr Error
|
||||
if err := json.Unmarshal(body, &etcdErr); err != nil {
|
||||
return ErrInvalidJSON
|
||||
}
|
||||
return etcdErr
|
||||
}
|
303
vendor/github.com/coreos/etcd/client/members.go
generated
vendored
303
vendor/github.com/coreos/etcd/client/members.go
generated
vendored
|
@ -1,303 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultV2MembersPrefix = "/v2/members"
|
||||
defaultLeaderSuffix = "/leader"
|
||||
)
|
||||
|
||||
type Member struct {
|
||||
// ID is the unique identifier of this Member.
|
||||
ID string `json:"id"`
|
||||
|
||||
// Name is a human-readable, non-unique identifier of this Member.
|
||||
Name string `json:"name"`
|
||||
|
||||
// PeerURLs represents the HTTP(S) endpoints this Member uses to
|
||||
// participate in etcd's consensus protocol.
|
||||
PeerURLs []string `json:"peerURLs"`
|
||||
|
||||
// ClientURLs represents the HTTP(S) endpoints on which this Member
|
||||
// serves its client-facing APIs.
|
||||
ClientURLs []string `json:"clientURLs"`
|
||||
}
|
||||
|
||||
type memberCollection []Member
|
||||
|
||||
func (c *memberCollection) UnmarshalJSON(data []byte) error {
|
||||
d := struct {
|
||||
Members []Member
|
||||
}{}
|
||||
|
||||
if err := json.Unmarshal(data, &d); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d.Members == nil {
|
||||
*c = make([]Member, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
*c = d.Members
|
||||
return nil
|
||||
}
|
||||
|
||||
type memberCreateOrUpdateRequest struct {
|
||||
PeerURLs types.URLs
|
||||
}
|
||||
|
||||
func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) {
|
||||
s := struct {
|
||||
PeerURLs []string `json:"peerURLs"`
|
||||
}{
|
||||
PeerURLs: make([]string, len(m.PeerURLs)),
|
||||
}
|
||||
|
||||
for i, u := range m.PeerURLs {
|
||||
s.PeerURLs[i] = u.String()
|
||||
}
|
||||
|
||||
return json.Marshal(&s)
|
||||
}
|
||||
|
||||
// NewMembersAPI constructs a new MembersAPI that uses HTTP to
|
||||
// interact with etcd's membership API.
|
||||
func NewMembersAPI(c Client) MembersAPI {
|
||||
return &httpMembersAPI{
|
||||
client: c,
|
||||
}
|
||||
}
|
||||
|
||||
type MembersAPI interface {
|
||||
// List enumerates the current cluster membership.
|
||||
List(ctx context.Context) ([]Member, error)
|
||||
|
||||
// Add instructs etcd to accept a new Member into the cluster.
|
||||
Add(ctx context.Context, peerURL string) (*Member, error)
|
||||
|
||||
// Remove demotes an existing Member out of the cluster.
|
||||
Remove(ctx context.Context, mID string) error
|
||||
|
||||
// Update instructs etcd to update an existing Member in the cluster.
|
||||
Update(ctx context.Context, mID string, peerURLs []string) error
|
||||
|
||||
// Leader gets current leader of the cluster
|
||||
Leader(ctx context.Context) (*Member, error)
|
||||
}
|
||||
|
||||
type httpMembersAPI struct {
|
||||
client httpClient
|
||||
}
|
||||
|
||||
func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
|
||||
req := &membersAPIActionList{}
|
||||
resp, body, err := m.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mCollection memberCollection
|
||||
if err := json.Unmarshal(body, &mCollection); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []Member(mCollection), nil
|
||||
}
|
||||
|
||||
func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
|
||||
urls, err := types.NewURLs([]string{peerURL})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := &membersAPIActionAdd{peerURLs: urls}
|
||||
resp, body, err := m.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
var merr membersError
|
||||
if err := json.Unmarshal(body, &merr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, merr
|
||||
}
|
||||
|
||||
var memb Member
|
||||
if err := json.Unmarshal(body, &memb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &memb, nil
|
||||
}
|
||||
|
||||
func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error {
|
||||
urls, err := types.NewURLs(peerURLs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID}
|
||||
resp, body, err := m.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
var merr membersError
|
||||
if err := json.Unmarshal(body, &merr); err != nil {
|
||||
return err
|
||||
}
|
||||
return merr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
|
||||
req := &membersAPIActionRemove{memberID: memberID}
|
||||
resp, _, err := m.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
|
||||
}
|
||||
|
||||
func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
|
||||
req := &membersAPIActionLeader{}
|
||||
resp, body, err := m.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var leader Member
|
||||
if err := json.Unmarshal(body, &leader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &leader, nil
|
||||
}
|
||||
|
||||
type membersAPIActionList struct{}
|
||||
|
||||
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2MembersURL(ep)
|
||||
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||
return req
|
||||
}
|
||||
|
||||
type membersAPIActionRemove struct {
|
||||
memberID string
|
||||
}
|
||||
|
||||
func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2MembersURL(ep)
|
||||
u.Path = path.Join(u.Path, d.memberID)
|
||||
req, _ := http.NewRequest("DELETE", u.String(), nil)
|
||||
return req
|
||||
}
|
||||
|
||||
type membersAPIActionAdd struct {
|
||||
peerURLs types.URLs
|
||||
}
|
||||
|
||||
func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2MembersURL(ep)
|
||||
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
|
||||
b, _ := json.Marshal(&m)
|
||||
req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
return req
|
||||
}
|
||||
|
||||
type membersAPIActionUpdate struct {
|
||||
memberID string
|
||||
peerURLs types.URLs
|
||||
}
|
||||
|
||||
func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2MembersURL(ep)
|
||||
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
|
||||
u.Path = path.Join(u.Path, a.memberID)
|
||||
b, _ := json.Marshal(&m)
|
||||
req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
return req
|
||||
}
|
||||
|
||||
func assertStatusCode(got int, want ...int) (err error) {
|
||||
for _, w := range want {
|
||||
if w == got {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("unexpected status code %d", got)
|
||||
}
|
||||
|
||||
type membersAPIActionLeader struct{}
|
||||
|
||||
func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
|
||||
u := v2MembersURL(ep)
|
||||
u.Path = path.Join(u.Path, defaultLeaderSuffix)
|
||||
req, _ := http.NewRequest("GET", u.String(), nil)
|
||||
return req
|
||||
}
|
||||
|
||||
// v2MembersURL add the necessary path to the provided endpoint
|
||||
// to route requests to the default v2 members API.
|
||||
func v2MembersURL(ep url.URL) *url.URL {
|
||||
ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
|
||||
return &ep
|
||||
}
|
||||
|
||||
type membersError struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"-"`
|
||||
}
|
||||
|
||||
func (e membersError) Error() string {
|
||||
return e.Message
|
||||
}
|
53
vendor/github.com/coreos/etcd/client/util.go
generated
vendored
53
vendor/github.com/coreos/etcd/client/util.go
generated
vendored
|
@ -1,53 +0,0 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
roleNotFoundRegExp *regexp.Regexp
|
||||
userNotFoundRegExp *regexp.Regexp
|
||||
)
|
||||
|
||||
func init() {
|
||||
roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
|
||||
userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
|
||||
}
|
||||
|
||||
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
|
||||
func IsKeyNotFound(err error) bool {
|
||||
if cErr, ok := err.(Error); ok {
|
||||
return cErr.Code == ErrorCodeKeyNotFound
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsRoleNotFound returns true if the error means role not found of v2 API.
|
||||
func IsRoleNotFound(err error) bool {
|
||||
if ae, ok := err.(authError); ok {
|
||||
return roleNotFoundRegExp.MatchString(ae.Message)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsUserNotFound returns true if the error means user not found of v2 API.
|
||||
func IsUserNotFound(err error) bool {
|
||||
if ae, ok := err.(authError); ok {
|
||||
return userNotFoundRegExp.MatchString(ae.Message)
|
||||
}
|
||||
return false
|
||||
}
|
31
vendor/github.com/coreos/etcd/pkg/pathutil/path.go
generated
vendored
31
vendor/github.com/coreos/etcd/pkg/pathutil/path.go
generated
vendored
|
@ -1,31 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package pathutil implements utility functions for handling slash-separated
|
||||
// paths.
|
||||
package pathutil
|
||||
|
||||
import "path"
|
||||
|
||||
// CanonicalURLPath returns the canonical url path for p, which follows the rules:
|
||||
// 1. the path always starts with "/"
|
||||
// 2. replace multiple slashes with a single slash
|
||||
// 3. replace each '.' '..' path name element with equivalent one
|
||||
// 4. keep the trailing slash
|
||||
// The function is borrowed from stdlib http.cleanPath in server.go.
|
||||
func CanonicalURLPath(p string) string {
|
||||
if p == "" {
|
||||
return "/"
|
||||
}
|
||||
if p[0] != '/' {
|
||||
p = "/" + p
|
||||
}
|
||||
np := path.Clean(p)
|
||||
// path.Clean removes trailing slash except for root,
|
||||
// put the trailing slash back if necessary.
|
||||
if p[len(p)-1] == '/' && np != "/" {
|
||||
np += "/"
|
||||
}
|
||||
return np
|
||||
}
|
141
vendor/github.com/coreos/etcd/pkg/srv/srv.go
generated
vendored
141
vendor/github.com/coreos/etcd/pkg/srv/srv.go
generated
vendored
|
@ -1,141 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package srv looks up DNS SRV records.
|
||||
package srv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// indirection for testing
|
||||
lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
|
||||
resolveTCPAddr = net.ResolveTCPAddr
|
||||
)
|
||||
|
||||
// GetCluster gets the cluster information via DNS discovery.
|
||||
// Also sees each entry as a separate instance.
|
||||
func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) {
|
||||
tempName := int(0)
|
||||
tcp2ap := make(map[string]url.URL)
|
||||
|
||||
// First, resolve the apurls
|
||||
for _, url := range apurls {
|
||||
tcpAddr, err := resolveTCPAddr("tcp", url.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tcp2ap[tcpAddr.String()] = url
|
||||
}
|
||||
|
||||
stringParts := []string{}
|
||||
updateNodeMap := func(service, scheme string) error {
|
||||
_, addrs, err := lookupSRV(service, "tcp", dns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, srv := range addrs {
|
||||
port := fmt.Sprintf("%d", srv.Port)
|
||||
host := net.JoinHostPort(srv.Target, port)
|
||||
tcpAddr, terr := resolveTCPAddr("tcp", host)
|
||||
if terr != nil {
|
||||
err = terr
|
||||
continue
|
||||
}
|
||||
n := ""
|
||||
url, ok := tcp2ap[tcpAddr.String()]
|
||||
if ok {
|
||||
n = name
|
||||
}
|
||||
if n == "" {
|
||||
n = fmt.Sprintf("%d", tempName)
|
||||
tempName++
|
||||
}
|
||||
// SRV records have a trailing dot but URL shouldn't.
|
||||
shortHost := strings.TrimSuffix(srv.Target, ".")
|
||||
urlHost := net.JoinHostPort(shortHost, port)
|
||||
if ok && url.Scheme != scheme {
|
||||
err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
|
||||
} else {
|
||||
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
|
||||
}
|
||||
}
|
||||
if len(stringParts) == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
failCount := 0
|
||||
err := updateNodeMap(service+"-ssl", "https")
|
||||
srvErr := make([]string, 2)
|
||||
if err != nil {
|
||||
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err)
|
||||
failCount++
|
||||
}
|
||||
err = updateNodeMap(service, "http")
|
||||
if err != nil {
|
||||
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err)
|
||||
failCount++
|
||||
}
|
||||
if failCount == 2 {
|
||||
return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1])
|
||||
}
|
||||
return stringParts, nil
|
||||
}
|
||||
|
||||
type SRVClients struct {
|
||||
Endpoints []string
|
||||
SRVs []*net.SRV
|
||||
}
|
||||
|
||||
// GetClient looks up the client endpoints for a service and domain.
|
||||
func GetClient(service, domain string) (*SRVClients, error) {
|
||||
var urls []*url.URL
|
||||
var srvs []*net.SRV
|
||||
|
||||
updateURLs := func(service, scheme string) error {
|
||||
_, addrs, err := lookupSRV(service, "tcp", domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, srv := range addrs {
|
||||
urls = append(urls, &url.URL{
|
||||
Scheme: scheme,
|
||||
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
|
||||
})
|
||||
}
|
||||
srvs = append(srvs, addrs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
errHTTPS := updateURLs(service+"-ssl", "https")
|
||||
errHTTP := updateURLs(service, "http")
|
||||
|
||||
if errHTTPS != nil && errHTTP != nil {
|
||||
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
|
||||
}
|
||||
|
||||
endpoints := make([]string, len(urls))
|
||||
for i := range urls {
|
||||
endpoints[i] = urls[i].String()
|
||||
}
|
||||
return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
|
||||
}
|
17
vendor/github.com/coreos/etcd/pkg/types/doc.go
generated
vendored
17
vendor/github.com/coreos/etcd/pkg/types/doc.go
generated
vendored
|
@ -1,17 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package types declares various data types and implements type-checking
|
||||
// functions.
|
||||
package types
|
41
vendor/github.com/coreos/etcd/pkg/types/id.go
generated
vendored
41
vendor/github.com/coreos/etcd/pkg/types/id.go
generated
vendored
|
@ -1,41 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// ID represents a generic identifier which is canonically
|
||||
// stored as a uint64 but is typically represented as a
|
||||
// base-16 string for input/output
|
||||
type ID uint64
|
||||
|
||||
func (i ID) String() string {
|
||||
return strconv.FormatUint(uint64(i), 16)
|
||||
}
|
||||
|
||||
// IDFromString attempts to create an ID from a base-16 string.
|
||||
func IDFromString(s string) (ID, error) {
|
||||
i, err := strconv.ParseUint(s, 16, 64)
|
||||
return ID(i), err
|
||||
}
|
||||
|
||||
// IDSlice implements the sort interface
|
||||
type IDSlice []ID
|
||||
|
||||
func (p IDSlice) Len() int { return len(p) }
|
||||
func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
|
||||
func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
178
vendor/github.com/coreos/etcd/pkg/types/set.go
generated
vendored
178
vendor/github.com/coreos/etcd/pkg/types/set.go
generated
vendored
|
@ -1,178 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Set interface {
|
||||
Add(string)
|
||||
Remove(string)
|
||||
Contains(string) bool
|
||||
Equals(Set) bool
|
||||
Length() int
|
||||
Values() []string
|
||||
Copy() Set
|
||||
Sub(Set) Set
|
||||
}
|
||||
|
||||
func NewUnsafeSet(values ...string) *unsafeSet {
|
||||
set := &unsafeSet{make(map[string]struct{})}
|
||||
for _, v := range values {
|
||||
set.Add(v)
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
func NewThreadsafeSet(values ...string) *tsafeSet {
|
||||
us := NewUnsafeSet(values...)
|
||||
return &tsafeSet{us, sync.RWMutex{}}
|
||||
}
|
||||
|
||||
type unsafeSet struct {
|
||||
d map[string]struct{}
|
||||
}
|
||||
|
||||
// Add adds a new value to the set (no-op if the value is already present)
|
||||
func (us *unsafeSet) Add(value string) {
|
||||
us.d[value] = struct{}{}
|
||||
}
|
||||
|
||||
// Remove removes the given value from the set
|
||||
func (us *unsafeSet) Remove(value string) {
|
||||
delete(us.d, value)
|
||||
}
|
||||
|
||||
// Contains returns whether the set contains the given value
|
||||
func (us *unsafeSet) Contains(value string) (exists bool) {
|
||||
_, exists = us.d[value]
|
||||
return exists
|
||||
}
|
||||
|
||||
// ContainsAll returns whether the set contains all given values
|
||||
func (us *unsafeSet) ContainsAll(values []string) bool {
|
||||
for _, s := range values {
|
||||
if !us.Contains(s) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equals returns whether the contents of two sets are identical
|
||||
func (us *unsafeSet) Equals(other Set) bool {
|
||||
v1 := sort.StringSlice(us.Values())
|
||||
v2 := sort.StringSlice(other.Values())
|
||||
v1.Sort()
|
||||
v2.Sort()
|
||||
return reflect.DeepEqual(v1, v2)
|
||||
}
|
||||
|
||||
// Length returns the number of elements in the set
|
||||
func (us *unsafeSet) Length() int {
|
||||
return len(us.d)
|
||||
}
|
||||
|
||||
// Values returns the values of the Set in an unspecified order.
|
||||
func (us *unsafeSet) Values() (values []string) {
|
||||
values = make([]string, 0)
|
||||
for val := range us.d {
|
||||
values = append(values, val)
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// Copy creates a new Set containing the values of the first
|
||||
func (us *unsafeSet) Copy() Set {
|
||||
cp := NewUnsafeSet()
|
||||
for val := range us.d {
|
||||
cp.Add(val)
|
||||
}
|
||||
|
||||
return cp
|
||||
}
|
||||
|
||||
// Sub removes all elements in other from the set
|
||||
func (us *unsafeSet) Sub(other Set) Set {
|
||||
oValues := other.Values()
|
||||
result := us.Copy().(*unsafeSet)
|
||||
|
||||
for _, val := range oValues {
|
||||
if _, ok := result.d[val]; !ok {
|
||||
continue
|
||||
}
|
||||
delete(result.d, val)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
type tsafeSet struct {
|
||||
us *unsafeSet
|
||||
m sync.RWMutex
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Add(value string) {
|
||||
ts.m.Lock()
|
||||
defer ts.m.Unlock()
|
||||
ts.us.Add(value)
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Remove(value string) {
|
||||
ts.m.Lock()
|
||||
defer ts.m.Unlock()
|
||||
ts.us.Remove(value)
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Contains(value string) (exists bool) {
|
||||
ts.m.RLock()
|
||||
defer ts.m.RUnlock()
|
||||
return ts.us.Contains(value)
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Equals(other Set) bool {
|
||||
ts.m.RLock()
|
||||
defer ts.m.RUnlock()
|
||||
return ts.us.Equals(other)
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Length() int {
|
||||
ts.m.RLock()
|
||||
defer ts.m.RUnlock()
|
||||
return ts.us.Length()
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Values() (values []string) {
|
||||
ts.m.RLock()
|
||||
defer ts.m.RUnlock()
|
||||
return ts.us.Values()
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Copy() Set {
|
||||
ts.m.RLock()
|
||||
defer ts.m.RUnlock()
|
||||
usResult := ts.us.Copy().(*unsafeSet)
|
||||
return &tsafeSet{usResult, sync.RWMutex{}}
|
||||
}
|
||||
|
||||
func (ts *tsafeSet) Sub(other Set) Set {
|
||||
ts.m.RLock()
|
||||
defer ts.m.RUnlock()
|
||||
usResult := ts.us.Sub(other).(*unsafeSet)
|
||||
return &tsafeSet{usResult, sync.RWMutex{}}
|
||||
}
|
22
vendor/github.com/coreos/etcd/pkg/types/slice.go
generated
vendored
22
vendor/github.com/coreos/etcd/pkg/types/slice.go
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
// Uint64Slice implements sort interface
|
||||
type Uint64Slice []uint64
|
||||
|
||||
func (p Uint64Slice) Len() int { return len(p) }
|
||||
func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
82
vendor/github.com/coreos/etcd/pkg/types/urls.go
generated
vendored
82
vendor/github.com/coreos/etcd/pkg/types/urls.go
generated
vendored
|
@ -1,82 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type URLs []url.URL
|
||||
|
||||
func NewURLs(strs []string) (URLs, error) {
|
||||
all := make([]url.URL, len(strs))
|
||||
if len(all) == 0 {
|
||||
return nil, errors.New("no valid URLs given")
|
||||
}
|
||||
for i, in := range strs {
|
||||
in = strings.TrimSpace(in)
|
||||
u, err := url.Parse(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
|
||||
return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
|
||||
}
|
||||
if _, _, err := net.SplitHostPort(u.Host); err != nil {
|
||||
return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
|
||||
}
|
||||
if u.Path != "" {
|
||||
return nil, fmt.Errorf("URL must not contain a path: %s", in)
|
||||
}
|
||||
all[i] = *u
|
||||
}
|
||||
us := URLs(all)
|
||||
us.Sort()
|
||||
|
||||
return us, nil
|
||||
}
|
||||
|
||||
func MustNewURLs(strs []string) URLs {
|
||||
urls, err := NewURLs(strs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return urls
|
||||
}
|
||||
|
||||
func (us URLs) String() string {
|
||||
return strings.Join(us.StringSlice(), ",")
|
||||
}
|
||||
|
||||
func (us *URLs) Sort() {
|
||||
sort.Sort(us)
|
||||
}
|
||||
func (us URLs) Len() int { return len(us) }
|
||||
func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
|
||||
func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] }
|
||||
|
||||
func (us URLs) StringSlice() []string {
|
||||
out := make([]string, len(us))
|
||||
for i := range us {
|
||||
out[i] = us[i].String()
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
107
vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
generated
vendored
107
vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
generated
vendored
|
@ -1,107 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// URLsMap is a map from a name to its URLs.
|
||||
type URLsMap map[string]URLs
|
||||
|
||||
// NewURLsMap returns a URLsMap instantiated from the given string,
|
||||
// which consists of discovery-formatted names-to-URLs, like:
|
||||
// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
|
||||
func NewURLsMap(s string) (URLsMap, error) {
|
||||
m := parse(s)
|
||||
|
||||
cl := URLsMap{}
|
||||
for name, urls := range m {
|
||||
us, err := NewURLs(urls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cl[name] = us
|
||||
}
|
||||
return cl, nil
|
||||
}
|
||||
|
||||
// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The
|
||||
// string values in the map can be multiple values separated by the sep string.
|
||||
func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) {
|
||||
var err error
|
||||
um := URLsMap{}
|
||||
for k, v := range m {
|
||||
um[k], err = NewURLs(strings.Split(v, sep))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return um, nil
|
||||
}
|
||||
|
||||
// String turns URLsMap into discovery-formatted name-to-URLs sorted by name.
|
||||
func (c URLsMap) String() string {
|
||||
var pairs []string
|
||||
for name, urls := range c {
|
||||
for _, url := range urls {
|
||||
pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String()))
|
||||
}
|
||||
}
|
||||
sort.Strings(pairs)
|
||||
return strings.Join(pairs, ",")
|
||||
}
|
||||
|
||||
// URLs returns a list of all URLs.
|
||||
// The returned list is sorted in ascending lexicographical order.
|
||||
func (c URLsMap) URLs() []string {
|
||||
var urls []string
|
||||
for _, us := range c {
|
||||
for _, u := range us {
|
||||
urls = append(urls, u.String())
|
||||
}
|
||||
}
|
||||
sort.Strings(urls)
|
||||
return urls
|
||||
}
|
||||
|
||||
// Len returns the size of URLsMap.
|
||||
func (c URLsMap) Len() int {
|
||||
return len(c)
|
||||
}
|
||||
|
||||
// parse parses the given string and returns a map listing the values specified for each key.
|
||||
func parse(s string) map[string][]string {
|
||||
m := make(map[string][]string)
|
||||
for s != "" {
|
||||
key := s
|
||||
if i := strings.IndexAny(key, ","); i >= 0 {
|
||||
key, s = key[:i], key[i+1:]
|
||||
} else {
|
||||
s = ""
|
||||
}
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
value := ""
|
||||
if i := strings.Index(key, "="); i >= 0 {
|
||||
key, value = key[:i], key[i+1:]
|
||||
}
|
||||
m[key] = append(m[key], value)
|
||||
}
|
||||
return m
|
||||
}
|
56
vendor/github.com/coreos/etcd/version/version.go
generated
vendored
56
vendor/github.com/coreos/etcd/version/version.go
generated
vendored
|
@ -1,56 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package version implements etcd version parsing and contains latest version
|
||||
// information.
|
||||
package version
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
var (
|
||||
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
|
||||
MinClusterVersion = "3.0.0"
|
||||
Version = "3.3.27"
|
||||
APIVersion = "unknown"
|
||||
|
||||
// Git SHA Value will be set during build
|
||||
GitSHA = "Not provided (use ./build instead of go build)"
|
||||
)
|
||||
|
||||
func init() {
|
||||
ver, err := semver.NewVersion(Version)
|
||||
if err == nil {
|
||||
APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
|
||||
}
|
||||
}
|
||||
|
||||
type Versions struct {
|
||||
Server string `json:"etcdserver"`
|
||||
Cluster string `json:"etcdcluster"`
|
||||
// TODO: raft state machine version
|
||||
}
|
||||
|
||||
// Cluster only keeps the major.minor.
|
||||
func Cluster(v string) string {
|
||||
vs := strings.Split(v, ".")
|
||||
if len(vs) <= 2 {
|
||||
return v
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", vs[0], vs[1])
|
||||
}
|
202
vendor/github.com/coreos/go-semver/LICENSE
generated
vendored
202
vendor/github.com/coreos/go-semver/LICENSE
generated
vendored
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
28
vendor/github.com/coreos/go-semver/README.md
generated
vendored
28
vendor/github.com/coreos/go-semver/README.md
generated
vendored
|
@ -1,28 +0,0 @@
|
|||
# go-semver - Semantic Versioning Library
|
||||
|
||||
[![Build Status](https://travis-ci.org/coreos/go-semver.svg?branch=master)](https://travis-ci.org/coreos/go-semver)
|
||||
[![GoDoc](https://godoc.org/github.com/coreos/go-semver/semver?status.svg)](https://godoc.org/github.com/coreos/go-semver/semver)
|
||||
|
||||
go-semver is a [semantic versioning][semver] library for Go. It lets you parse
|
||||
and compare two semantic version strings.
|
||||
|
||||
[semver]: http://semver.org/
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
vA := semver.New("1.2.3")
|
||||
vB := semver.New("3.2.1")
|
||||
|
||||
fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB))
|
||||
```
|
||||
|
||||
## Example Application
|
||||
|
||||
```
|
||||
$ go run example.go 1.2.3 3.2.1
|
||||
1.2.3 < 3.2.1 == true
|
||||
|
||||
$ go run example.go 5.2.3 3.2.1
|
||||
5.2.3 < 3.2.1 == false
|
||||
```
|
268
vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
268
vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
|
@ -1,268 +0,0 @@
|
|||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Semantic Versions http://semver.org
|
||||
package semver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Version struct {
|
||||
Major int64
|
||||
Minor int64
|
||||
Patch int64
|
||||
PreRelease PreRelease
|
||||
Metadata string
|
||||
}
|
||||
|
||||
type PreRelease string
|
||||
|
||||
func splitOff(input *string, delim string) (val string) {
|
||||
parts := strings.SplitN(*input, delim, 2)
|
||||
|
||||
if len(parts) == 2 {
|
||||
*input = parts[0]
|
||||
val = parts[1]
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func New(version string) *Version {
|
||||
return Must(NewVersion(version))
|
||||
}
|
||||
|
||||
func NewVersion(version string) (*Version, error) {
|
||||
v := Version{}
|
||||
|
||||
if err := v.Set(version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v, nil
|
||||
}
|
||||
|
||||
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
|
||||
func Must(v *Version, err error) *Version {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Set parses and updates v from the given version string. Implements flag.Value
|
||||
func (v *Version) Set(version string) error {
|
||||
metadata := splitOff(&version, "+")
|
||||
preRelease := PreRelease(splitOff(&version, "-"))
|
||||
dotParts := strings.SplitN(version, ".", 3)
|
||||
|
||||
if len(dotParts) != 3 {
|
||||
return fmt.Errorf("%s is not in dotted-tri format", version)
|
||||
}
|
||||
|
||||
parsed := make([]int64, 3, 3)
|
||||
|
||||
for i, v := range dotParts[:3] {
|
||||
val, err := strconv.ParseInt(v, 10, 64)
|
||||
parsed[i] = val
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
v.Metadata = metadata
|
||||
v.PreRelease = preRelease
|
||||
v.Major = parsed[0]
|
||||
v.Minor = parsed[1]
|
||||
v.Patch = parsed[2]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||
|
||||
if v.PreRelease != "" {
|
||||
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
|
||||
}
|
||||
|
||||
if v.Metadata != "" {
|
||||
fmt.Fprintf(&buffer, "+%s", v.Metadata)
|
||||
}
|
||||
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var data string
|
||||
if err := unmarshal(&data); err != nil {
|
||||
return err
|
||||
}
|
||||
return v.Set(data)
|
||||
}
|
||||
|
||||
func (v Version) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + v.String() + `"`), nil
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalJSON(data []byte) error {
|
||||
l := len(data)
|
||||
if l == 0 || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
if l < 2 || data[0] != '"' || data[l-1] != '"' {
|
||||
return errors.New("invalid semver string")
|
||||
}
|
||||
return v.Set(string(data[1 : l-1]))
|
||||
}
|
||||
|
||||
// Compare tests if v is less than, equal to, or greater than versionB,
|
||||
// returning -1, 0, or +1 respectively.
|
||||
func (v Version) Compare(versionB Version) int {
|
||||
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return preReleaseCompare(v, versionB)
|
||||
}
|
||||
|
||||
// Equal tests if v is equal to versionB.
|
||||
func (v Version) Equal(versionB Version) bool {
|
||||
return v.Compare(versionB) == 0
|
||||
}
|
||||
|
||||
// LessThan tests if v is less than versionB.
|
||||
func (v Version) LessThan(versionB Version) bool {
|
||||
return v.Compare(versionB) < 0
|
||||
}
|
||||
|
||||
// Slice converts the comparable parts of the semver into a slice of integers.
|
||||
func (v Version) Slice() []int64 {
|
||||
return []int64{v.Major, v.Minor, v.Patch}
|
||||
}
|
||||
|
||||
func (p PreRelease) Slice() []string {
|
||||
preRelease := string(p)
|
||||
return strings.Split(preRelease, ".")
|
||||
}
|
||||
|
||||
func preReleaseCompare(versionA Version, versionB Version) int {
|
||||
a := versionA.PreRelease
|
||||
b := versionB.PreRelease
|
||||
|
||||
/* Handle the case where if two versions are otherwise equal it is the
|
||||
* one without a PreRelease that is greater */
|
||||
if len(a) == 0 && (len(b) > 0) {
|
||||
return 1
|
||||
} else if len(b) == 0 && (len(a) > 0) {
|
||||
return -1
|
||||
}
|
||||
|
||||
// If there is a prerelease, check and compare each part.
|
||||
return recursivePreReleaseCompare(a.Slice(), b.Slice())
|
||||
}
|
||||
|
||||
func recursiveCompare(versionA []int64, versionB []int64) int {
|
||||
if len(versionA) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
a := versionA[0]
|
||||
b := versionB[0]
|
||||
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
return recursiveCompare(versionA[1:], versionB[1:])
|
||||
}
|
||||
|
||||
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
|
||||
// A larger set of pre-release fields has a higher precedence than a smaller set,
|
||||
// if all of the preceding identifiers are equal.
|
||||
if len(versionA) == 0 {
|
||||
if len(versionB) > 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
} else if len(versionB) == 0 {
|
||||
// We're longer than versionB so return 1.
|
||||
return 1
|
||||
}
|
||||
|
||||
a := versionA[0]
|
||||
b := versionB[0]
|
||||
|
||||
aInt := false
|
||||
bInt := false
|
||||
|
||||
aI, err := strconv.Atoi(versionA[0])
|
||||
if err == nil {
|
||||
aInt = true
|
||||
}
|
||||
|
||||
bI, err := strconv.Atoi(versionB[0])
|
||||
if err == nil {
|
||||
bInt = true
|
||||
}
|
||||
|
||||
// Handle Integer Comparison
|
||||
if aInt && bInt {
|
||||
if aI > bI {
|
||||
return 1
|
||||
} else if aI < bI {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// Handle String Comparison
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
|
||||
}
|
||||
|
||||
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMajor() {
|
||||
v.Major += 1
|
||||
v.Minor = 0
|
||||
v.Patch = 0
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMinor() {
|
||||
v.Minor += 1
|
||||
v.Patch = 0
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpPatch() {
|
||||
v.Patch += 1
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
38
vendor/github.com/coreos/go-semver/semver/sort.go
generated
vendored
38
vendor/github.com/coreos/go-semver/semver/sort.go
generated
vendored
|
@ -1,38 +0,0 @@
|
|||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package semver
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Versions []*Version
|
||||
|
||||
func (s Versions) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s Versions) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s Versions) Less(i, j int) bool {
|
||||
return s[i].LessThan(*s[j])
|
||||
}
|
||||
|
||||
// Sort sorts the given slice of Version
|
||||
func Sort(versions []*Version) {
|
||||
sort.Sort(Versions(versions))
|
||||
}
|
558
vendor/github.com/docker/libkv/store/consul/consul.go
generated
vendored
558
vendor/github.com/docker/libkv/store/consul/consul.go
generated
vendored
|
@ -1,558 +0,0 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libkv"
|
||||
"github.com/docker/libkv/store"
|
||||
api "github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultWatchWaitTime is how long we block for at a
|
||||
// time to check if the watched key has changed. This
|
||||
// affects the minimum time it takes to cancel a watch.
|
||||
DefaultWatchWaitTime = 15 * time.Second
|
||||
|
||||
// RenewSessionRetryMax is the number of time we should try
|
||||
// to renew the session before giving up and throwing an error
|
||||
RenewSessionRetryMax = 5
|
||||
|
||||
// MaxSessionDestroyAttempts is the maximum times we will try
|
||||
// to explicitely destroy the session attached to a lock after
|
||||
// the connectivity to the store has been lost
|
||||
MaxSessionDestroyAttempts = 5
|
||||
|
||||
// defaultLockTTL is the default ttl for the consul lock
|
||||
defaultLockTTL = 20 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMultipleEndpointsUnsupported is thrown when there are
|
||||
// multiple endpoints specified for Consul
|
||||
ErrMultipleEndpointsUnsupported = errors.New("consul does not support multiple endpoints")
|
||||
|
||||
// ErrSessionRenew is thrown when the session can't be
|
||||
// renewed because the Consul version does not support sessions
|
||||
ErrSessionRenew = errors.New("cannot set or renew session for ttl, unable to operate on sessions")
|
||||
)
|
||||
|
||||
// Consul is the receiver type for the
|
||||
// Store interface
|
||||
type Consul struct {
|
||||
sync.Mutex
|
||||
config *api.Config
|
||||
client *api.Client
|
||||
}
|
||||
|
||||
type consulLock struct {
|
||||
lock *api.Lock
|
||||
renewCh chan struct{}
|
||||
}
|
||||
|
||||
// Register registers consul to libkv
|
||||
func Register() {
|
||||
libkv.AddStore(store.CONSUL, New)
|
||||
}
|
||||
|
||||
// New creates a new Consul client given a list
|
||||
// of endpoints and optional tls config
|
||||
func New(endpoints []string, options *store.Config) (store.Store, error) {
|
||||
if len(endpoints) > 1 {
|
||||
return nil, ErrMultipleEndpointsUnsupported
|
||||
}
|
||||
|
||||
s := &Consul{}
|
||||
|
||||
// Create Consul client
|
||||
config := api.DefaultConfig()
|
||||
s.config = config
|
||||
config.HttpClient = http.DefaultClient
|
||||
config.Address = endpoints[0]
|
||||
config.Scheme = "http"
|
||||
|
||||
// Set options
|
||||
if options != nil {
|
||||
if options.TLS != nil {
|
||||
s.setTLS(options.TLS)
|
||||
}
|
||||
if options.ConnectionTimeout != 0 {
|
||||
s.setTimeout(options.ConnectionTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a new client
|
||||
client, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.client = client
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// SetTLS sets Consul TLS options
|
||||
func (s *Consul) setTLS(tls *tls.Config) {
|
||||
s.config.HttpClient.Transport = &http.Transport{
|
||||
TLSClientConfig: tls,
|
||||
}
|
||||
s.config.Scheme = "https"
|
||||
}
|
||||
|
||||
// SetTimeout sets the timeout for connecting to Consul
|
||||
func (s *Consul) setTimeout(time time.Duration) {
|
||||
s.config.WaitTime = time
|
||||
}
|
||||
|
||||
// Normalize the key for usage in Consul
|
||||
func (s *Consul) normalize(key string) string {
|
||||
key = store.Normalize(key)
|
||||
return strings.TrimPrefix(key, "/")
|
||||
}
|
||||
|
||||
func (s *Consul) renewSession(pair *api.KVPair, ttl time.Duration) error {
|
||||
// Check if there is any previous session with an active TTL
|
||||
session, err := s.getActiveSession(pair.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if session == "" {
|
||||
entry := &api.SessionEntry{
|
||||
Behavior: api.SessionBehaviorDelete, // Delete the key when the session expires
|
||||
TTL: (ttl / 2).String(), // Consul multiplies the TTL by 2x
|
||||
LockDelay: 1 * time.Millisecond, // Virtually disable lock delay
|
||||
}
|
||||
|
||||
// Create the key session
|
||||
session, _, err = s.client.Session().Create(entry, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lockOpts := &api.LockOptions{
|
||||
Key: pair.Key,
|
||||
Session: session,
|
||||
}
|
||||
|
||||
// Lock and ignore if lock is held
|
||||
// It's just a placeholder for the
|
||||
// ephemeral behavior
|
||||
lock, _ := s.client.LockOpts(lockOpts)
|
||||
if lock != nil {
|
||||
lock.Lock(nil)
|
||||
}
|
||||
}
|
||||
|
||||
_, _, err = s.client.Session().Renew(session, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// getActiveSession checks if the key already has
|
||||
// a session attached
|
||||
func (s *Consul) getActiveSession(key string) (string, error) {
|
||||
pair, _, err := s.client.KV().Get(key, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if pair != nil && pair.Session != "" {
|
||||
return pair.Session, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Get the value at "key", returns the last modified index
|
||||
// to use in conjunction to CAS calls
|
||||
func (s *Consul) Get(key string) (*store.KVPair, error) {
|
||||
options := &api.QueryOptions{
|
||||
AllowStale: false,
|
||||
RequireConsistent: true,
|
||||
}
|
||||
|
||||
pair, meta, err := s.client.KV().Get(s.normalize(key), options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If pair is nil then the key does not exist
|
||||
if pair == nil {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
|
||||
return &store.KVPair{Key: pair.Key, Value: pair.Value, LastIndex: meta.LastIndex}, nil
|
||||
}
|
||||
|
||||
// Put a value at "key"
|
||||
func (s *Consul) Put(key string, value []byte, opts *store.WriteOptions) error {
|
||||
key = s.normalize(key)
|
||||
|
||||
p := &api.KVPair{
|
||||
Key: key,
|
||||
Value: value,
|
||||
Flags: api.LockFlagValue,
|
||||
}
|
||||
|
||||
if opts != nil && opts.TTL > 0 {
|
||||
// Create or renew a session holding a TTL. Operations on sessions
|
||||
// are not deterministic: creating or renewing a session can fail
|
||||
for retry := 1; retry <= RenewSessionRetryMax; retry++ {
|
||||
err := s.renewSession(p, opts.TTL)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if retry == RenewSessionRetryMax {
|
||||
return ErrSessionRenew
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err := s.client.KV().Put(p, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete a value at "key"
|
||||
func (s *Consul) Delete(key string) error {
|
||||
if _, err := s.Get(key); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := s.client.KV().Delete(s.normalize(key), nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Exists checks that the key exists inside the store
|
||||
func (s *Consul) Exists(key string) (bool, error) {
|
||||
_, err := s.Get(key)
|
||||
if err != nil {
|
||||
if err == store.ErrKeyNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// List child nodes of a given directory
|
||||
func (s *Consul) List(directory string) ([]*store.KVPair, error) {
|
||||
pairs, _, err := s.client.KV().List(s.normalize(directory), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pairs) == 0 {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
|
||||
kv := []*store.KVPair{}
|
||||
|
||||
for _, pair := range pairs {
|
||||
if pair.Key == directory {
|
||||
continue
|
||||
}
|
||||
kv = append(kv, &store.KVPair{
|
||||
Key: pair.Key,
|
||||
Value: pair.Value,
|
||||
LastIndex: pair.ModifyIndex,
|
||||
})
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
// DeleteTree deletes a range of keys under a given directory
|
||||
func (s *Consul) DeleteTree(directory string) error {
|
||||
if _, err := s.List(directory); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := s.client.KV().DeleteTree(s.normalize(directory), nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch for changes on a "key"
|
||||
// It returns a channel that will receive changes or pass
|
||||
// on errors. Upon creation, the current value will first
|
||||
// be sent to the channel. Providing a non-nil stopCh can
|
||||
// be used to stop watching.
|
||||
func (s *Consul) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
|
||||
kv := s.client.KV()
|
||||
watchCh := make(chan *store.KVPair)
|
||||
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
|
||||
// Use a wait time in order to check if we should quit
|
||||
// from time to time.
|
||||
opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
|
||||
|
||||
for {
|
||||
// Check if we should quit
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Get the key
|
||||
pair, meta, err := kv.Get(key, opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If LastIndex didn't change then it means `Get` returned
|
||||
// because of the WaitTime and the key didn't changed.
|
||||
if opts.WaitIndex == meta.LastIndex {
|
||||
continue
|
||||
}
|
||||
opts.WaitIndex = meta.LastIndex
|
||||
|
||||
// Return the value to the channel
|
||||
// FIXME: What happens when a key is deleted?
|
||||
if pair != nil {
|
||||
watchCh <- &store.KVPair{
|
||||
Key: pair.Key,
|
||||
Value: pair.Value,
|
||||
LastIndex: pair.ModifyIndex,
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// WatchTree watches for changes on a "directory"
|
||||
// It returns a channel that will receive changes or pass
|
||||
// on errors. Upon creating a watch, the current childs values
|
||||
// will be sent to the channel .Providing a non-nil stopCh can
|
||||
// be used to stop watching.
|
||||
func (s *Consul) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
|
||||
kv := s.client.KV()
|
||||
watchCh := make(chan []*store.KVPair)
|
||||
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
|
||||
// Use a wait time in order to check if we should quit
|
||||
// from time to time.
|
||||
opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime}
|
||||
for {
|
||||
// Check if we should quit
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Get all the childrens
|
||||
pairs, meta, err := kv.List(directory, opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If LastIndex didn't change then it means `Get` returned
|
||||
// because of the WaitTime and the child keys didn't change.
|
||||
if opts.WaitIndex == meta.LastIndex {
|
||||
continue
|
||||
}
|
||||
opts.WaitIndex = meta.LastIndex
|
||||
|
||||
// Return children KV pairs to the channel
|
||||
kvpairs := []*store.KVPair{}
|
||||
for _, pair := range pairs {
|
||||
if pair.Key == directory {
|
||||
continue
|
||||
}
|
||||
kvpairs = append(kvpairs, &store.KVPair{
|
||||
Key: pair.Key,
|
||||
Value: pair.Value,
|
||||
LastIndex: pair.ModifyIndex,
|
||||
})
|
||||
}
|
||||
watchCh <- kvpairs
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// NewLock returns a handle to a lock struct which can
|
||||
// be used to provide mutual exclusion on a key
|
||||
func (s *Consul) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
|
||||
lockOpts := &api.LockOptions{
|
||||
Key: s.normalize(key),
|
||||
}
|
||||
|
||||
lock := &consulLock{}
|
||||
|
||||
ttl := defaultLockTTL
|
||||
|
||||
if options != nil {
|
||||
// Set optional TTL on Lock
|
||||
if options.TTL != 0 {
|
||||
ttl = options.TTL
|
||||
}
|
||||
// Set optional value on Lock
|
||||
if options.Value != nil {
|
||||
lockOpts.Value = options.Value
|
||||
}
|
||||
}
|
||||
|
||||
entry := &api.SessionEntry{
|
||||
Behavior: api.SessionBehaviorRelease, // Release the lock when the session expires
|
||||
TTL: (ttl / 2).String(), // Consul multiplies the TTL by 2x
|
||||
LockDelay: 1 * time.Millisecond, // Virtually disable lock delay
|
||||
}
|
||||
|
||||
// Create the key session
|
||||
session, _, err := s.client.Session().Create(entry, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Place the session and renew chan on lock
|
||||
lockOpts.Session = session
|
||||
lock.renewCh = options.RenewLock
|
||||
|
||||
l, err := s.client.LockOpts(lockOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Renew the session ttl lock periodically
|
||||
s.renewLockSession(entry.TTL, session, options.RenewLock)
|
||||
|
||||
lock.lock = l
|
||||
return lock, nil
|
||||
}
|
||||
|
||||
// renewLockSession is used to renew a session Lock, it takes
|
||||
// a stopRenew chan which is used to explicitely stop the session
|
||||
// renew process. The renew routine never stops until a signal is
|
||||
// sent to this channel. If deleting the session fails because the
|
||||
// connection to the store is lost, it keeps trying to delete the
|
||||
// session periodically until it can contact the store, this ensures
|
||||
// that the lock is not maintained indefinitely which ensures liveness
|
||||
// over safety for the lock when the store becomes unavailable.
|
||||
func (s *Consul) renewLockSession(initialTTL string, id string, stopRenew chan struct{}) {
|
||||
sessionDestroyAttempts := 0
|
||||
ttl, err := time.ParseDuration(initialTTL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(ttl / 2):
|
||||
entry, _, err := s.client.Session().Renew(id, nil)
|
||||
if err != nil {
|
||||
// If an error occurs, continue until the
|
||||
// session gets destroyed explicitely or
|
||||
// the session ttl times out
|
||||
continue
|
||||
}
|
||||
if entry == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle the server updating the TTL
|
||||
ttl, _ = time.ParseDuration(entry.TTL)
|
||||
|
||||
case <-stopRenew:
|
||||
// Attempt a session destroy
|
||||
_, err := s.client.Session().Destroy(id, nil)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if sessionDestroyAttempts >= MaxSessionDestroyAttempts {
|
||||
return
|
||||
}
|
||||
|
||||
// We can't destroy the session because the store
|
||||
// is unavailable, wait for the session renew period
|
||||
sessionDestroyAttempts++
|
||||
time.Sleep(ttl / 2)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Lock attempts to acquire the lock and blocks while
|
||||
// doing so. It returns a channel that is closed if our
|
||||
// lock is lost or if an error occurs
|
||||
func (l *consulLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
|
||||
return l.lock.Lock(stopChan)
|
||||
}
|
||||
|
||||
// Unlock the "key". Calling unlock while
|
||||
// not holding the lock will throw an error
|
||||
func (l *consulLock) Unlock() error {
|
||||
if l.renewCh != nil {
|
||||
close(l.renewCh)
|
||||
}
|
||||
return l.lock.Unlock()
|
||||
}
|
||||
|
||||
// AtomicPut put a value at "key" if the key has not been
|
||||
// modified in the meantime, throws an error if this is the case
|
||||
func (s *Consul) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) {
|
||||
|
||||
p := &api.KVPair{Key: s.normalize(key), Value: value, Flags: api.LockFlagValue}
|
||||
|
||||
if previous == nil {
|
||||
// Consul interprets ModifyIndex = 0 as new key.
|
||||
p.ModifyIndex = 0
|
||||
} else {
|
||||
p.ModifyIndex = previous.LastIndex
|
||||
}
|
||||
|
||||
ok, _, err := s.client.KV().CAS(p, nil)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
if !ok {
|
||||
if previous == nil {
|
||||
return false, nil, store.ErrKeyExists
|
||||
}
|
||||
return false, nil, store.ErrKeyModified
|
||||
}
|
||||
|
||||
pair, err := s.Get(key)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
return true, pair, nil
|
||||
}
|
||||
|
||||
// AtomicDelete deletes a value at "key" if the key has not
|
||||
// been modified in the meantime, throws an error if this is the case
|
||||
func (s *Consul) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
|
||||
if previous == nil {
|
||||
return false, store.ErrPreviousNotSpecified
|
||||
}
|
||||
|
||||
p := &api.KVPair{Key: s.normalize(key), ModifyIndex: previous.LastIndex, Flags: api.LockFlagValue}
|
||||
|
||||
// Extra Get operation to check on the key
|
||||
_, err := s.Get(key)
|
||||
if err != nil && err == store.ErrKeyNotFound {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if work, _, err := s.client.KV().DeleteCAS(p, nil); err != nil {
|
||||
return false, err
|
||||
} else if !work {
|
||||
return false, store.ErrKeyModified
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Close closes the client connection
|
||||
func (s *Consul) Close() {
|
||||
return
|
||||
}
|
604
vendor/github.com/docker/libkv/store/etcd/etcd.go
generated
vendored
604
vendor/github.com/docker/libkv/store/etcd/etcd.go
generated
vendored
|
@ -1,604 +0,0 @@
|
|||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
etcd "github.com/coreos/etcd/client"
|
||||
"github.com/docker/libkv"
|
||||
"github.com/docker/libkv/store"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrAbortTryLock is thrown when a user stops trying to seek the lock
|
||||
// by sending a signal to the stop chan, this is used to verify if the
|
||||
// operation succeeded
|
||||
ErrAbortTryLock = errors.New("lock operation aborted")
|
||||
)
|
||||
|
||||
// Etcd is the receiver type for the
|
||||
// Store interface
|
||||
type Etcd struct {
|
||||
client etcd.KeysAPI
|
||||
}
|
||||
|
||||
type etcdLock struct {
|
||||
client etcd.KeysAPI
|
||||
key string
|
||||
value string
|
||||
ttl time.Duration
|
||||
|
||||
// Closed when the caller wants to stop renewing the lock. I'm not sure
|
||||
// why this is even used - you could just call the Unlock() method.
|
||||
stopRenew chan struct{}
|
||||
// When the lock is held, this is the last modified index of the key.
|
||||
// Used for conditional updates when extending the lock TTL and when
|
||||
// conditionall deleteing when Unlock() is called.
|
||||
lastIndex uint64
|
||||
// When the lock is held, this function will cancel the locked context.
|
||||
// This is called both by the Unlock() method in order to stop the
|
||||
// background holding goroutine and in a deferred call in that background
|
||||
// holding goroutine in case the lock is lost due to an error or the
|
||||
// stopRenew channel is closed. Calling this function also closes the chan
|
||||
// returned by the Lock() method.
|
||||
cancel context.CancelFunc
|
||||
// Used to sync the Unlock() call with the background holding goroutine.
|
||||
// This channel is closed when that background goroutine exits, signalling
|
||||
// that it is okay to conditionally delete the key.
|
||||
doneHolding chan struct{}
|
||||
}
|
||||
|
||||
const (
|
||||
periodicSync = 5 * time.Minute
|
||||
defaultLockTTL = 20 * time.Second
|
||||
defaultUpdateTime = 5 * time.Second
|
||||
)
|
||||
|
||||
// Register registers etcd to libkv
|
||||
func Register() {
|
||||
libkv.AddStore(store.ETCD, New)
|
||||
}
|
||||
|
||||
// New creates a new Etcd client given a list
|
||||
// of endpoints and an optional tls config
|
||||
func New(addrs []string, options *store.Config) (store.Store, error) {
|
||||
s := &Etcd{}
|
||||
|
||||
var (
|
||||
entries []string
|
||||
err error
|
||||
)
|
||||
|
||||
entries = store.CreateEndpoints(addrs, "http")
|
||||
cfg := &etcd.Config{
|
||||
Endpoints: entries,
|
||||
Transport: etcd.DefaultTransport,
|
||||
HeaderTimeoutPerRequest: 3 * time.Second,
|
||||
}
|
||||
|
||||
// Set options
|
||||
if options != nil {
|
||||
if options.TLS != nil {
|
||||
setTLS(cfg, options.TLS, addrs)
|
||||
}
|
||||
if options.ConnectionTimeout != 0 {
|
||||
setTimeout(cfg, options.ConnectionTimeout)
|
||||
}
|
||||
if options.Username != "" {
|
||||
setCredentials(cfg, options.Username, options.Password)
|
||||
}
|
||||
}
|
||||
|
||||
c, err := etcd.New(*cfg)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
s.client = etcd.NewKeysAPI(c)
|
||||
|
||||
// Periodic Cluster Sync
|
||||
go func() {
|
||||
for {
|
||||
if err := c.AutoSync(context.Background(), periodicSync); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// SetTLS sets the tls configuration given a tls.Config scheme
|
||||
func setTLS(cfg *etcd.Config, tls *tls.Config, addrs []string) {
|
||||
entries := store.CreateEndpoints(addrs, "https")
|
||||
cfg.Endpoints = entries
|
||||
|
||||
// Set transport
|
||||
t := http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: tls,
|
||||
}
|
||||
|
||||
cfg.Transport = &t
|
||||
}
|
||||
|
||||
// setTimeout sets the timeout used for connecting to the store
|
||||
func setTimeout(cfg *etcd.Config, time time.Duration) {
|
||||
cfg.HeaderTimeoutPerRequest = time
|
||||
}
|
||||
|
||||
// setCredentials sets the username/password credentials for connecting to Etcd
|
||||
func setCredentials(cfg *etcd.Config, username, password string) {
|
||||
cfg.Username = username
|
||||
cfg.Password = password
|
||||
}
|
||||
|
||||
// Normalize the key for usage in Etcd
|
||||
func (s *Etcd) normalize(key string) string {
|
||||
key = store.Normalize(key)
|
||||
return strings.TrimPrefix(key, "/")
|
||||
}
|
||||
|
||||
// keyNotFound checks on the error returned by the KeysAPI
|
||||
// to verify if the key exists in the store or not
|
||||
func keyNotFound(err error) bool {
|
||||
if err != nil {
|
||||
if etcdError, ok := err.(etcd.Error); ok {
|
||||
if etcdError.Code == etcd.ErrorCodeKeyNotFound ||
|
||||
etcdError.Code == etcd.ErrorCodeNotFile ||
|
||||
etcdError.Code == etcd.ErrorCodeNotDir {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Get the value at "key", returns the last modified
|
||||
// index to use in conjunction to Atomic calls
|
||||
func (s *Etcd) Get(key string) (pair *store.KVPair, err error) {
|
||||
getOpts := &etcd.GetOptions{
|
||||
Quorum: true,
|
||||
}
|
||||
|
||||
result, err := s.client.Get(context.Background(), s.normalize(key), getOpts)
|
||||
if err != nil {
|
||||
if keyNotFound(err) {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pair = &store.KVPair{
|
||||
Key: key,
|
||||
Value: []byte(result.Node.Value),
|
||||
LastIndex: result.Node.ModifiedIndex,
|
||||
}
|
||||
|
||||
return pair, nil
|
||||
}
|
||||
|
||||
// Put a value at "key"
|
||||
func (s *Etcd) Put(key string, value []byte, opts *store.WriteOptions) error {
|
||||
setOpts := &etcd.SetOptions{}
|
||||
|
||||
// Set options
|
||||
if opts != nil {
|
||||
setOpts.Dir = opts.IsDir
|
||||
setOpts.TTL = opts.TTL
|
||||
}
|
||||
|
||||
_, err := s.client.Set(context.Background(), s.normalize(key), string(value), setOpts)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete a value at "key"
|
||||
func (s *Etcd) Delete(key string) error {
|
||||
opts := &etcd.DeleteOptions{
|
||||
Recursive: false,
|
||||
}
|
||||
|
||||
_, err := s.client.Delete(context.Background(), s.normalize(key), opts)
|
||||
if keyNotFound(err) {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Exists checks if the key exists inside the store
|
||||
func (s *Etcd) Exists(key string) (bool, error) {
|
||||
_, err := s.Get(key)
|
||||
if err != nil {
|
||||
if err == store.ErrKeyNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Watch for changes on a "key"
|
||||
// It returns a channel that will receive changes or pass
|
||||
// on errors. Upon creation, the current value will first
|
||||
// be sent to the channel. Providing a non-nil stopCh can
|
||||
// be used to stop watching.
|
||||
func (s *Etcd) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
|
||||
opts := &etcd.WatcherOptions{Recursive: false}
|
||||
watcher := s.client.Watcher(s.normalize(key), opts)
|
||||
|
||||
// watchCh is sending back events to the caller
|
||||
watchCh := make(chan *store.KVPair)
|
||||
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
|
||||
// Get the current value
|
||||
pair, err := s.Get(key)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Push the current value through the channel.
|
||||
watchCh <- pair
|
||||
|
||||
for {
|
||||
// Check if the watch was stopped by the caller
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
result, err := watcher.Next(context.Background())
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
watchCh <- &store.KVPair{
|
||||
Key: key,
|
||||
Value: []byte(result.Node.Value),
|
||||
LastIndex: result.Node.ModifiedIndex,
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// WatchTree watches for changes on a "directory"
|
||||
// It returns a channel that will receive changes or pass
|
||||
// on errors. Upon creating a watch, the current childs values
|
||||
// will be sent to the channel. Providing a non-nil stopCh can
|
||||
// be used to stop watching.
|
||||
func (s *Etcd) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
|
||||
watchOpts := &etcd.WatcherOptions{Recursive: true}
|
||||
watcher := s.client.Watcher(s.normalize(directory), watchOpts)
|
||||
|
||||
// watchCh is sending back events to the caller
|
||||
watchCh := make(chan []*store.KVPair)
|
||||
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
|
||||
// Get child values
|
||||
list, err := s.List(directory)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Push the current value through the channel.
|
||||
watchCh <- list
|
||||
|
||||
for {
|
||||
// Check if the watch was stopped by the caller
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
_, err := watcher.Next(context.Background())
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
list, err = s.List(directory)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
watchCh <- list
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// AtomicPut puts a value at "key" if the key has not been
|
||||
// modified in the meantime, throws an error if this is the case
|
||||
func (s *Etcd) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) {
|
||||
var (
|
||||
meta *etcd.Response
|
||||
err error
|
||||
)
|
||||
|
||||
setOpts := &etcd.SetOptions{}
|
||||
|
||||
if previous != nil {
|
||||
setOpts.PrevExist = etcd.PrevExist
|
||||
setOpts.PrevIndex = previous.LastIndex
|
||||
if previous.Value != nil {
|
||||
setOpts.PrevValue = string(previous.Value)
|
||||
}
|
||||
} else {
|
||||
setOpts.PrevExist = etcd.PrevNoExist
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
if opts.TTL > 0 {
|
||||
setOpts.TTL = opts.TTL
|
||||
}
|
||||
}
|
||||
|
||||
meta, err = s.client.Set(context.Background(), s.normalize(key), string(value), setOpts)
|
||||
if err != nil {
|
||||
if etcdError, ok := err.(etcd.Error); ok {
|
||||
// Compare failed
|
||||
if etcdError.Code == etcd.ErrorCodeTestFailed {
|
||||
return false, nil, store.ErrKeyModified
|
||||
}
|
||||
// Node exists error (when PrevNoExist)
|
||||
if etcdError.Code == etcd.ErrorCodeNodeExist {
|
||||
return false, nil, store.ErrKeyExists
|
||||
}
|
||||
}
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
updated := &store.KVPair{
|
||||
Key: key,
|
||||
Value: value,
|
||||
LastIndex: meta.Node.ModifiedIndex,
|
||||
}
|
||||
|
||||
return true, updated, nil
|
||||
}
|
||||
|
||||
// AtomicDelete deletes a value at "key" if the key
|
||||
// has not been modified in the meantime, throws an
|
||||
// error if this is the case
|
||||
func (s *Etcd) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
|
||||
if previous == nil {
|
||||
return false, store.ErrPreviousNotSpecified
|
||||
}
|
||||
|
||||
delOpts := &etcd.DeleteOptions{}
|
||||
|
||||
if previous != nil {
|
||||
delOpts.PrevIndex = previous.LastIndex
|
||||
if previous.Value != nil {
|
||||
delOpts.PrevValue = string(previous.Value)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := s.client.Delete(context.Background(), s.normalize(key), delOpts)
|
||||
if err != nil {
|
||||
if etcdError, ok := err.(etcd.Error); ok {
|
||||
// Key Not Found
|
||||
if etcdError.Code == etcd.ErrorCodeKeyNotFound {
|
||||
return false, store.ErrKeyNotFound
|
||||
}
|
||||
// Compare failed
|
||||
if etcdError.Code == etcd.ErrorCodeTestFailed {
|
||||
return false, store.ErrKeyModified
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// List child nodes of a given directory
|
||||
func (s *Etcd) List(directory string) ([]*store.KVPair, error) {
|
||||
getOpts := &etcd.GetOptions{
|
||||
Quorum: true,
|
||||
Recursive: true,
|
||||
Sort: true,
|
||||
}
|
||||
|
||||
resp, err := s.client.Get(context.Background(), s.normalize(directory), getOpts)
|
||||
if err != nil {
|
||||
if keyNotFound(err) {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kv := []*store.KVPair{}
|
||||
for _, n := range resp.Node.Nodes {
|
||||
kv = append(kv, &store.KVPair{
|
||||
Key: n.Key,
|
||||
Value: []byte(n.Value),
|
||||
LastIndex: n.ModifiedIndex,
|
||||
})
|
||||
}
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
// DeleteTree deletes a range of keys under a given directory
|
||||
func (s *Etcd) DeleteTree(directory string) error {
|
||||
delOpts := &etcd.DeleteOptions{
|
||||
Recursive: true,
|
||||
}
|
||||
|
||||
_, err := s.client.Delete(context.Background(), s.normalize(directory), delOpts)
|
||||
if keyNotFound(err) {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewLock returns a handle to a lock struct which can
|
||||
// be used to provide mutual exclusion on a key
|
||||
func (s *Etcd) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
|
||||
var value string
|
||||
ttl := defaultLockTTL
|
||||
renewCh := make(chan struct{})
|
||||
|
||||
// Apply options on Lock
|
||||
if options != nil {
|
||||
if options.Value != nil {
|
||||
value = string(options.Value)
|
||||
}
|
||||
if options.TTL != 0 {
|
||||
ttl = options.TTL
|
||||
}
|
||||
if options.RenewLock != nil {
|
||||
renewCh = options.RenewLock
|
||||
}
|
||||
}
|
||||
|
||||
// Create lock object
|
||||
lock = &etcdLock{
|
||||
client: s.client,
|
||||
stopRenew: renewCh,
|
||||
key: s.normalize(key),
|
||||
value: value,
|
||||
ttl: ttl,
|
||||
}
|
||||
|
||||
return lock, nil
|
||||
}
|
||||
|
||||
// Lock attempts to acquire the lock and blocks while
|
||||
// doing so. It returns a channel that is closed if our
|
||||
// lock is lost or if an error occurs
|
||||
func (l *etcdLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
|
||||
// Conditional Set - only if the key does not exist.
|
||||
setOpts := &etcd.SetOptions{
|
||||
TTL: l.ttl,
|
||||
PrevExist: etcd.PrevNoExist,
|
||||
}
|
||||
|
||||
for {
|
||||
resp, err := l.client.Set(context.Background(), l.key, l.value, setOpts)
|
||||
if err == nil {
|
||||
// Acquired the lock!
|
||||
l.lastIndex = resp.Node.ModifiedIndex
|
||||
lockedCtx, cancel := context.WithCancel(context.Background())
|
||||
l.cancel = cancel
|
||||
l.doneHolding = make(chan struct{})
|
||||
|
||||
go l.holdLock(lockedCtx)
|
||||
|
||||
return lockedCtx.Done(), nil
|
||||
}
|
||||
|
||||
etcdErr, ok := err.(etcd.Error)
|
||||
if !ok || etcdErr.Code != etcd.ErrorCodeNodeExist {
|
||||
return nil, err // Unexpected error.
|
||||
}
|
||||
|
||||
// Need to wait for the lock key to expire or be deleted.
|
||||
if err := l.waitLock(stopChan, etcdErr.Index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete or Expire event occurred.
|
||||
// Retry
|
||||
}
|
||||
}
|
||||
|
||||
// Hold the lock as long as we can.
|
||||
// Updates the key ttl periodically until we receive
|
||||
// an explicit stop signal from the Unlock method OR
|
||||
// the stopRenew channel is closed.
|
||||
func (l *etcdLock) holdLock(ctx context.Context) {
|
||||
defer close(l.doneHolding)
|
||||
defer l.cancel()
|
||||
|
||||
update := time.NewTicker(l.ttl / 3)
|
||||
defer update.Stop()
|
||||
|
||||
setOpts := &etcd.SetOptions{TTL: l.ttl}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-update.C:
|
||||
setOpts.PrevIndex = l.lastIndex
|
||||
resp, err := l.client.Set(ctx, l.key, l.value, setOpts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
l.lastIndex = resp.Node.ModifiedIndex
|
||||
case <-l.stopRenew:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WaitLock simply waits for the key to be available for creation.
|
||||
func (l *etcdLock) waitLock(stopWait <-chan struct{}, afterIndex uint64) error {
|
||||
waitCtx, waitCancel := context.WithCancel(context.Background())
|
||||
defer waitCancel()
|
||||
go func() {
|
||||
select {
|
||||
case <-stopWait:
|
||||
// If the caller closes the stopWait, cancel the wait context.
|
||||
waitCancel()
|
||||
case <-waitCtx.Done():
|
||||
// No longer waiting.
|
||||
}
|
||||
}()
|
||||
|
||||
watcher := l.client.Watcher(l.key, &etcd.WatcherOptions{AfterIndex: afterIndex})
|
||||
for {
|
||||
event, err := watcher.Next(waitCtx)
|
||||
if err != nil {
|
||||
if err == context.Canceled {
|
||||
return ErrAbortTryLock
|
||||
}
|
||||
return err
|
||||
}
|
||||
switch event.Action {
|
||||
case "delete", "compareAndDelete", "expire":
|
||||
return nil // The key has been deleted or expired.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock the "key". Calling unlock while
|
||||
// not holding the lock will throw an error
|
||||
func (l *etcdLock) Unlock() error {
|
||||
l.cancel() // Will signal the holdLock goroutine to exit.
|
||||
<-l.doneHolding // Wait for the holdLock goroutine to exit.
|
||||
|
||||
var err error
|
||||
if l.lastIndex != 0 {
|
||||
delOpts := &etcd.DeleteOptions{
|
||||
PrevIndex: l.lastIndex,
|
||||
}
|
||||
_, err = l.client.Delete(context.Background(), l.key, delOpts)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Close closes the client connection
|
||||
func (s *Etcd) Close() {
|
||||
return
|
||||
}
|
429
vendor/github.com/docker/libkv/store/zookeeper/zookeeper.go
generated
vendored
429
vendor/github.com/docker/libkv/store/zookeeper/zookeeper.go
generated
vendored
|
@ -1,429 +0,0 @@
|
|||
package zookeeper
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libkv"
|
||||
"github.com/docker/libkv/store"
|
||||
zk "github.com/samuel/go-zookeeper/zk"
|
||||
)
|
||||
|
||||
const (
|
||||
// SOH control character
|
||||
SOH = "\x01"
|
||||
|
||||
defaultTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// Zookeeper is the receiver type for
|
||||
// the Store interface
|
||||
type Zookeeper struct {
|
||||
timeout time.Duration
|
||||
client *zk.Conn
|
||||
}
|
||||
|
||||
type zookeeperLock struct {
|
||||
client *zk.Conn
|
||||
lock *zk.Lock
|
||||
key string
|
||||
value []byte
|
||||
}
|
||||
|
||||
// Register registers zookeeper to libkv
|
||||
func Register() {
|
||||
libkv.AddStore(store.ZK, New)
|
||||
}
|
||||
|
||||
// New creates a new Zookeeper client given a
|
||||
// list of endpoints and an optional tls config
|
||||
func New(endpoints []string, options *store.Config) (store.Store, error) {
|
||||
s := &Zookeeper{}
|
||||
s.timeout = defaultTimeout
|
||||
|
||||
// Set options
|
||||
if options != nil {
|
||||
if options.ConnectionTimeout != 0 {
|
||||
s.setTimeout(options.ConnectionTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// Connect to Zookeeper
|
||||
conn, _, err := zk.Connect(endpoints, s.timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.client = conn
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// setTimeout sets the timeout for connecting to Zookeeper
|
||||
func (s *Zookeeper) setTimeout(time time.Duration) {
|
||||
s.timeout = time
|
||||
}
|
||||
|
||||
// Get the value at "key", returns the last modified index
|
||||
// to use in conjunction to Atomic calls
|
||||
func (s *Zookeeper) Get(key string) (pair *store.KVPair, err error) {
|
||||
resp, meta, err := s.client.Get(s.normalize(key))
|
||||
|
||||
if err != nil {
|
||||
if err == zk.ErrNoNode {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FIXME handle very rare cases where Get returns the
|
||||
// SOH control character instead of the actual value
|
||||
if string(resp) == SOH {
|
||||
return s.Get(store.Normalize(key))
|
||||
}
|
||||
|
||||
pair = &store.KVPair{
|
||||
Key: key,
|
||||
Value: resp,
|
||||
LastIndex: uint64(meta.Version),
|
||||
}
|
||||
|
||||
return pair, nil
|
||||
}
|
||||
|
||||
// createFullPath creates the entire path for a directory
|
||||
// that does not exist
|
||||
func (s *Zookeeper) createFullPath(path []string, ephemeral bool) error {
|
||||
for i := 1; i <= len(path); i++ {
|
||||
newpath := "/" + strings.Join(path[:i], "/")
|
||||
if i == len(path) && ephemeral {
|
||||
_, err := s.client.Create(newpath, []byte{}, zk.FlagEphemeral, zk.WorldACL(zk.PermAll))
|
||||
return err
|
||||
}
|
||||
_, err := s.client.Create(newpath, []byte{}, 0, zk.WorldACL(zk.PermAll))
|
||||
if err != nil {
|
||||
// Skip if node already exists
|
||||
if err != zk.ErrNodeExists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put a value at "key"
|
||||
func (s *Zookeeper) Put(key string, value []byte, opts *store.WriteOptions) error {
|
||||
fkey := s.normalize(key)
|
||||
|
||||
exists, err := s.Exists(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
if opts != nil && opts.TTL > 0 {
|
||||
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), true)
|
||||
} else {
|
||||
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), false)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = s.client.Set(fkey, value, -1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete a value at "key"
|
||||
func (s *Zookeeper) Delete(key string) error {
|
||||
err := s.client.Delete(s.normalize(key), -1)
|
||||
if err == zk.ErrNoNode {
|
||||
return store.ErrKeyNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Exists checks if the key exists inside the store
|
||||
func (s *Zookeeper) Exists(key string) (bool, error) {
|
||||
exists, _, err := s.client.Exists(s.normalize(key))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// Watch for changes on a "key"
|
||||
// It returns a channel that will receive changes or pass
|
||||
// on errors. Upon creation, the current value will first
|
||||
// be sent to the channel. Providing a non-nil stopCh can
|
||||
// be used to stop watching.
|
||||
func (s *Zookeeper) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
|
||||
// Get the key first
|
||||
pair, err := s.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Catch zk notifications and fire changes into the channel.
|
||||
watchCh := make(chan *store.KVPair)
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
|
||||
// Get returns the current value to the channel prior
|
||||
// to listening to any event that may occur on that key
|
||||
watchCh <- pair
|
||||
for {
|
||||
_, _, eventCh, err := s.client.GetW(s.normalize(key))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case e := <-eventCh:
|
||||
if e.Type == zk.EventNodeDataChanged {
|
||||
if entry, err := s.Get(key); err == nil {
|
||||
watchCh <- entry
|
||||
}
|
||||
}
|
||||
case <-stopCh:
|
||||
// There is no way to stop GetW so just quit
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// WatchTree watches for changes on a "directory"
|
||||
// It returns a channel that will receive changes or pass
|
||||
// on errors. Upon creating a watch, the current childs values
|
||||
// will be sent to the channel .Providing a non-nil stopCh can
|
||||
// be used to stop watching.
|
||||
func (s *Zookeeper) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
|
||||
// List the childrens first
|
||||
entries, err := s.List(directory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Catch zk notifications and fire changes into the channel.
|
||||
watchCh := make(chan []*store.KVPair)
|
||||
go func() {
|
||||
defer close(watchCh)
|
||||
|
||||
// List returns the children values to the channel
|
||||
// prior to listening to any events that may occur
|
||||
// on those keys
|
||||
watchCh <- entries
|
||||
|
||||
for {
|
||||
_, _, eventCh, err := s.client.ChildrenW(s.normalize(directory))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case e := <-eventCh:
|
||||
if e.Type == zk.EventNodeChildrenChanged {
|
||||
if kv, err := s.List(directory); err == nil {
|
||||
watchCh <- kv
|
||||
}
|
||||
}
|
||||
case <-stopCh:
|
||||
// There is no way to stop GetW so just quit
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return watchCh, nil
|
||||
}
|
||||
|
||||
// List child nodes of a given directory
|
||||
func (s *Zookeeper) List(directory string) ([]*store.KVPair, error) {
|
||||
keys, stat, err := s.client.Children(s.normalize(directory))
|
||||
if err != nil {
|
||||
if err == zk.ErrNoNode {
|
||||
return nil, store.ErrKeyNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kv := []*store.KVPair{}
|
||||
|
||||
// FIXME Costly Get request for each child key..
|
||||
for _, key := range keys {
|
||||
pair, err := s.Get(strings.TrimSuffix(directory, "/") + s.normalize(key))
|
||||
if err != nil {
|
||||
// If node is not found: List is out of date, retry
|
||||
if err == store.ErrKeyNotFound {
|
||||
return s.List(directory)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kv = append(kv, &store.KVPair{
|
||||
Key: key,
|
||||
Value: []byte(pair.Value),
|
||||
LastIndex: uint64(stat.Version),
|
||||
})
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
// DeleteTree deletes a range of keys under a given directory
|
||||
func (s *Zookeeper) DeleteTree(directory string) error {
|
||||
pairs, err := s.List(directory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var reqs []interface{}
|
||||
|
||||
for _, pair := range pairs {
|
||||
reqs = append(reqs, &zk.DeleteRequest{
|
||||
Path: s.normalize(directory + "/" + pair.Key),
|
||||
Version: -1,
|
||||
})
|
||||
}
|
||||
|
||||
_, err = s.client.Multi(reqs...)
|
||||
return err
|
||||
}
|
||||
|
||||
// AtomicPut put a value at "key" if the key has not been
|
||||
// modified in the meantime, throws an error if this is the case
|
||||
func (s *Zookeeper) AtomicPut(key string, value []byte, previous *store.KVPair, _ *store.WriteOptions) (bool, *store.KVPair, error) {
|
||||
var lastIndex uint64
|
||||
|
||||
if previous != nil {
|
||||
meta, err := s.client.Set(s.normalize(key), value, int32(previous.LastIndex))
|
||||
if err != nil {
|
||||
// Compare Failed
|
||||
if err == zk.ErrBadVersion {
|
||||
return false, nil, store.ErrKeyModified
|
||||
}
|
||||
return false, nil, err
|
||||
}
|
||||
lastIndex = uint64(meta.Version)
|
||||
} else {
|
||||
// Interpret previous == nil as create operation.
|
||||
_, err := s.client.Create(s.normalize(key), value, 0, zk.WorldACL(zk.PermAll))
|
||||
if err != nil {
|
||||
// Directory does not exist
|
||||
if err == zk.ErrNoNode {
|
||||
|
||||
// Create the directory
|
||||
parts := store.SplitKey(strings.TrimSuffix(key, "/"))
|
||||
parts = parts[:len(parts)-1]
|
||||
if err = s.createFullPath(parts, false); err != nil {
|
||||
// Failed to create the directory.
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// Create the node
|
||||
if _, err := s.client.Create(s.normalize(key), value, 0, zk.WorldACL(zk.PermAll)); err != nil {
|
||||
// Node exist error (when previous nil)
|
||||
if err == zk.ErrNodeExists {
|
||||
return false, nil, store.ErrKeyExists
|
||||
}
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
// Node Exists error (when previous nil)
|
||||
if err == zk.ErrNodeExists {
|
||||
return false, nil, store.ErrKeyExists
|
||||
}
|
||||
|
||||
// Unhandled error
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
lastIndex = 0 // Newly created nodes have version 0.
|
||||
}
|
||||
|
||||
pair := &store.KVPair{
|
||||
Key: key,
|
||||
Value: value,
|
||||
LastIndex: lastIndex,
|
||||
}
|
||||
|
||||
return true, pair, nil
|
||||
}
|
||||
|
||||
// AtomicDelete deletes a value at "key" if the key
|
||||
// has not been modified in the meantime, throws an
|
||||
// error if this is the case
|
||||
func (s *Zookeeper) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
|
||||
if previous == nil {
|
||||
return false, store.ErrPreviousNotSpecified
|
||||
}
|
||||
|
||||
err := s.client.Delete(s.normalize(key), int32(previous.LastIndex))
|
||||
if err != nil {
|
||||
// Key not found
|
||||
if err == zk.ErrNoNode {
|
||||
return false, store.ErrKeyNotFound
|
||||
}
|
||||
// Compare failed
|
||||
if err == zk.ErrBadVersion {
|
||||
return false, store.ErrKeyModified
|
||||
}
|
||||
// General store error
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// NewLock returns a handle to a lock struct which can
|
||||
// be used to provide mutual exclusion on a key
|
||||
func (s *Zookeeper) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
|
||||
value := []byte("")
|
||||
|
||||
// Apply options
|
||||
if options != nil {
|
||||
if options.Value != nil {
|
||||
value = options.Value
|
||||
}
|
||||
}
|
||||
|
||||
lock = &zookeeperLock{
|
||||
client: s.client,
|
||||
key: s.normalize(key),
|
||||
value: value,
|
||||
lock: zk.NewLock(s.client, s.normalize(key), zk.WorldACL(zk.PermAll)),
|
||||
}
|
||||
|
||||
return lock, err
|
||||
}
|
||||
|
||||
// Lock attempts to acquire the lock and blocks while
|
||||
// doing so. It returns a channel that is closed if our
|
||||
// lock is lost or if an error occurs
|
||||
func (l *zookeeperLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
|
||||
err := l.lock.Lock()
|
||||
|
||||
if err == nil {
|
||||
// We hold the lock, we can set our value
|
||||
// FIXME: The value is left behind
|
||||
// (problematic for leader election)
|
||||
_, err = l.client.Set(l.key, l.value, -1)
|
||||
}
|
||||
|
||||
return make(chan struct{}), err
|
||||
}
|
||||
|
||||
// Unlock the "key". Calling unlock while
|
||||
// not holding the lock will throw an error
|
||||
func (l *zookeeperLock) Unlock() error {
|
||||
return l.lock.Unlock()
|
||||
}
|
||||
|
||||
// Close closes the client connection
|
||||
func (s *Zookeeper) Close() {
|
||||
s.client.Close()
|
||||
}
|
||||
|
||||
// Normalize the key for usage in Zookeeper
|
||||
func (s *Zookeeper) normalize(key string) string {
|
||||
key = store.Normalize(key)
|
||||
return strings.TrimSuffix(key, "/")
|
||||
}
|
354
vendor/github.com/hashicorp/consul/LICENSE
generated
vendored
354
vendor/github.com/hashicorp/consul/LICENSE
generated
vendored
|
@ -1,354 +0,0 @@
|
|||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
85
vendor/github.com/hashicorp/consul/README.md
generated
vendored
85
vendor/github.com/hashicorp/consul/README.md
generated
vendored
|
@ -1,85 +0,0 @@
|
|||
# Consul [![Build Status](https://travis-ci.org/hashicorp/consul.png)](https://travis-ci.org/hashicorp/consul)
|
||||
|
||||
* Website: http://www.consul.io
|
||||
* IRC: `#consul` on Freenode
|
||||
* Mailing list: [Google Groups](https://groups.google.com/group/consul-tool/)
|
||||
|
||||
Consul is a tool for service discovery and configuration. Consul is
|
||||
distributed, highly available, and extremely scalable.
|
||||
|
||||
Consul provides several key features:
|
||||
|
||||
* **Service Discovery** - Consul makes it simple for services to register
|
||||
themselves and to discover other services via a DNS or HTTP interface.
|
||||
External services such as SaaS providers can be registered as well.
|
||||
|
||||
* **Health Checking** - Health Checking enables Consul to quickly alert
|
||||
operators about any issues in a cluster. The integration with service
|
||||
discovery prevents routing traffic to unhealthy hosts and enables service
|
||||
level circuit breakers.
|
||||
|
||||
* **Key/Value Storage** - A flexible key/value store enables storing
|
||||
dynamic configuration, feature flagging, coordination, leader election and
|
||||
more. The simple HTTP API makes it easy to use anywhere.
|
||||
|
||||
* **Multi-Datacenter** - Consul is built to be datacenter aware, and can
|
||||
support any number of regions without complex configuration.
|
||||
|
||||
Consul runs on Linux, Mac OS X, and Windows. It is recommended to run the
|
||||
Consul servers only on Linux, however.
|
||||
|
||||
## Quick Start
|
||||
|
||||
An extensive quick quick start is viewable on the Consul website:
|
||||
|
||||
http://www.consul.io/intro/getting-started/install.html
|
||||
|
||||
## Documentation
|
||||
|
||||
Full, comprehensive documentation is viewable on the Consul website:
|
||||
|
||||
http://www.consul.io/docs
|
||||
|
||||
## Developing Consul
|
||||
|
||||
If you wish to work on Consul itself, you'll first need [Go](https://golang.org)
|
||||
installed (version 1.4+ is _required_). Make sure you have Go properly installed,
|
||||
including setting up your [GOPATH](https://golang.org/doc/code.html#GOPATH).
|
||||
|
||||
Next, clone this repository into `$GOPATH/src/github.com/hashicorp/consul` and
|
||||
then just type `make`. In a few moments, you'll have a working `consul` executable:
|
||||
|
||||
```
|
||||
$ go get -u ./...
|
||||
$ make
|
||||
...
|
||||
$ bin/consul
|
||||
...
|
||||
```
|
||||
|
||||
*note: `make` will also place a copy of the binary in the first part of your $GOPATH*
|
||||
|
||||
You can run tests by typing `make test`.
|
||||
|
||||
If you make any changes to the code, run `make format` in order to automatically
|
||||
format the code according to Go standards.
|
||||
|
||||
### Building Consul on Windows
|
||||
|
||||
Make sure Go 1.4+ is installed on your system and that the Go command is in your
|
||||
%PATH%.
|
||||
|
||||
For building Consul on Windows, you also need to have MinGW installed.
|
||||
[TDM-GCC](http://tdm-gcc.tdragon.net/) is a simple bundle installer which has all
|
||||
the required tools for building Consul with MinGW.
|
||||
|
||||
Install TDM-GCC and make sure it has been added to your %PATH%.
|
||||
|
||||
If all goes well, you should be able to build Consul by running `make.bat` from a
|
||||
command prompt.
|
||||
|
||||
See also [golang/winstrap](https://github.com/golang/winstrap) and
|
||||
[golang/wiki/WindowsBuild](https://github.com/golang/go/wiki/WindowsBuild)
|
||||
for more information of how to set up a general Go build environment on Windows
|
||||
with MinGW.
|
||||
|
39
vendor/github.com/hashicorp/consul/api/README.md
generated
vendored
39
vendor/github.com/hashicorp/consul/api/README.md
generated
vendored
|
@ -1,39 +0,0 @@
|
|||
Consul API client
|
||||
=================
|
||||
|
||||
This package provides the `api` package which attempts to
|
||||
provide programmatic access to the full Consul API.
|
||||
|
||||
Currently, all of the Consul APIs included in version 0.3 are supported.
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/consul/api)
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
Below is an example of using the Consul client:
|
||||
|
||||
```go
|
||||
// Get a new client, with KV endpoints
|
||||
client, _ := api.NewClient(api.DefaultConfig())
|
||||
kv := client.KV()
|
||||
|
||||
// PUT a new KV pair
|
||||
p := &api.KVPair{Key: "foo", Value: []byte("test")}
|
||||
_, err := kv.Put(p, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Lookup the pair
|
||||
pair, _, err := kv.Get("foo", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("KV: %v", pair)
|
||||
|
||||
```
|
||||
|
140
vendor/github.com/hashicorp/consul/api/acl.go
generated
vendored
140
vendor/github.com/hashicorp/consul/api/acl.go
generated
vendored
|
@ -1,140 +0,0 @@
|
|||
package api
|
||||
|
||||
const (
|
||||
// ACLCLientType is the client type token
|
||||
ACLClientType = "client"
|
||||
|
||||
// ACLManagementType is the management type token
|
||||
ACLManagementType = "management"
|
||||
)
|
||||
|
||||
// ACLEntry is used to represent an ACL entry
|
||||
type ACLEntry struct {
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
ID string
|
||||
Name string
|
||||
Type string
|
||||
Rules string
|
||||
}
|
||||
|
||||
// ACL can be used to query the ACL endpoints
|
||||
type ACL struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// ACL returns a handle to the ACL endpoints
|
||||
func (c *Client) ACL() *ACL {
|
||||
return &ACL{c}
|
||||
}
|
||||
|
||||
// Create is used to generate a new token with the given parameters
|
||||
func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
r := a.c.newRequest("PUT", "/v1/acl/create")
|
||||
r.setWriteOptions(q)
|
||||
r.obj = acl
|
||||
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{RequestTime: rtt}
|
||||
var out struct{ ID string }
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return out.ID, wm, nil
|
||||
}
|
||||
|
||||
// Update is used to update the rules of an existing token
|
||||
func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
|
||||
r := a.c.newRequest("PUT", "/v1/acl/update")
|
||||
r.setWriteOptions(q)
|
||||
r.obj = acl
|
||||
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{RequestTime: rtt}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Destroy is used to destroy a given ACL token ID
|
||||
func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
|
||||
r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
|
||||
r.setWriteOptions(q)
|
||||
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{RequestTime: rtt}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Clone is used to return a new token cloned from an existing one
|
||||
func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
|
||||
r.setWriteOptions(q)
|
||||
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{RequestTime: rtt}
|
||||
var out struct{ ID string }
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return out.ID, wm, nil
|
||||
}
|
||||
|
||||
// Info is used to query for information about an ACL token
|
||||
func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
|
||||
r := a.c.newRequest("GET", "/v1/acl/info/"+id)
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var entries []*ACLEntry
|
||||
if err := decodeBody(resp, &entries); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(entries) > 0 {
|
||||
return entries[0], qm, nil
|
||||
}
|
||||
return nil, qm, nil
|
||||
}
|
||||
|
||||
// List is used to get all the ACL tokens
|
||||
func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
|
||||
r := a.c.newRequest("GET", "/v1/acl/list")
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var entries []*ACLEntry
|
||||
if err := decodeBody(resp, &entries); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return entries, qm, nil
|
||||
}
|
334
vendor/github.com/hashicorp/consul/api/agent.go
generated
vendored
334
vendor/github.com/hashicorp/consul/api/agent.go
generated
vendored
|
@ -1,334 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// AgentCheck represents a check known to the agent
|
||||
type AgentCheck struct {
|
||||
Node string
|
||||
CheckID string
|
||||
Name string
|
||||
Status string
|
||||
Notes string
|
||||
Output string
|
||||
ServiceID string
|
||||
ServiceName string
|
||||
}
|
||||
|
||||
// AgentService represents a service known to the agent
|
||||
type AgentService struct {
|
||||
ID string
|
||||
Service string
|
||||
Tags []string
|
||||
Port int
|
||||
Address string
|
||||
}
|
||||
|
||||
// AgentMember represents a cluster member known to the agent
|
||||
type AgentMember struct {
|
||||
Name string
|
||||
Addr string
|
||||
Port uint16
|
||||
Tags map[string]string
|
||||
Status int
|
||||
ProtocolMin uint8
|
||||
ProtocolMax uint8
|
||||
ProtocolCur uint8
|
||||
DelegateMin uint8
|
||||
DelegateMax uint8
|
||||
DelegateCur uint8
|
||||
}
|
||||
|
||||
// AgentServiceRegistration is used to register a new service
|
||||
type AgentServiceRegistration struct {
|
||||
ID string `json:",omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
Tags []string `json:",omitempty"`
|
||||
Port int `json:",omitempty"`
|
||||
Address string `json:",omitempty"`
|
||||
Check *AgentServiceCheck
|
||||
Checks AgentServiceChecks
|
||||
}
|
||||
|
||||
// AgentCheckRegistration is used to register a new check
|
||||
type AgentCheckRegistration struct {
|
||||
ID string `json:",omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
Notes string `json:",omitempty"`
|
||||
ServiceID string `json:",omitempty"`
|
||||
AgentServiceCheck
|
||||
}
|
||||
|
||||
// AgentServiceCheck is used to create an associated
|
||||
// check for a service
|
||||
type AgentServiceCheck struct {
|
||||
Script string `json:",omitempty"`
|
||||
Interval string `json:",omitempty"`
|
||||
Timeout string `json:",omitempty"`
|
||||
TTL string `json:",omitempty"`
|
||||
HTTP string `json:",omitempty"`
|
||||
Status string `json:",omitempty"`
|
||||
}
|
||||
type AgentServiceChecks []*AgentServiceCheck
|
||||
|
||||
// Agent can be used to query the Agent endpoints
|
||||
type Agent struct {
|
||||
c *Client
|
||||
|
||||
// cache the node name
|
||||
nodeName string
|
||||
}
|
||||
|
||||
// Agent returns a handle to the agent endpoints
|
||||
func (c *Client) Agent() *Agent {
|
||||
return &Agent{c: c}
|
||||
}
|
||||
|
||||
// Self is used to query the agent we are speaking to for
|
||||
// information about itself
|
||||
func (a *Agent) Self() (map[string]map[string]interface{}, error) {
|
||||
r := a.c.newRequest("GET", "/v1/agent/self")
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var out map[string]map[string]interface{}
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// NodeName is used to get the node name of the agent
|
||||
func (a *Agent) NodeName() (string, error) {
|
||||
if a.nodeName != "" {
|
||||
return a.nodeName, nil
|
||||
}
|
||||
info, err := a.Self()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
name := info["Config"]["NodeName"].(string)
|
||||
a.nodeName = name
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// Checks returns the locally registered checks
|
||||
func (a *Agent) Checks() (map[string]*AgentCheck, error) {
|
||||
r := a.c.newRequest("GET", "/v1/agent/checks")
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var out map[string]*AgentCheck
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Services returns the locally registered services
|
||||
func (a *Agent) Services() (map[string]*AgentService, error) {
|
||||
r := a.c.newRequest("GET", "/v1/agent/services")
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var out map[string]*AgentService
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Members returns the known gossip members. The WAN
|
||||
// flag can be used to query a server for WAN members.
|
||||
func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
|
||||
r := a.c.newRequest("GET", "/v1/agent/members")
|
||||
if wan {
|
||||
r.params.Set("wan", "1")
|
||||
}
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var out []*AgentMember
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ServiceRegister is used to register a new service with
|
||||
// the local agent
|
||||
func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
|
||||
r := a.c.newRequest("PUT", "/v1/agent/service/register")
|
||||
r.obj = service
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServiceDeregister is used to deregister a service with
|
||||
// the local agent
|
||||
func (a *Agent) ServiceDeregister(serviceID string) error {
|
||||
r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// PassTTL is used to set a TTL check to the passing state
|
||||
func (a *Agent) PassTTL(checkID, note string) error {
|
||||
return a.UpdateTTL(checkID, note, "pass")
|
||||
}
|
||||
|
||||
// WarnTTL is used to set a TTL check to the warning state
|
||||
func (a *Agent) WarnTTL(checkID, note string) error {
|
||||
return a.UpdateTTL(checkID, note, "warn")
|
||||
}
|
||||
|
||||
// FailTTL is used to set a TTL check to the failing state
|
||||
func (a *Agent) FailTTL(checkID, note string) error {
|
||||
return a.UpdateTTL(checkID, note, "fail")
|
||||
}
|
||||
|
||||
// UpdateTTL is used to update the TTL of a check
|
||||
func (a *Agent) UpdateTTL(checkID, note, status string) error {
|
||||
switch status {
|
||||
case "pass":
|
||||
case "warn":
|
||||
case "fail":
|
||||
default:
|
||||
return fmt.Errorf("Invalid status: %s", status)
|
||||
}
|
||||
endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
|
||||
r := a.c.newRequest("PUT", endpoint)
|
||||
r.params.Set("note", note)
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckRegister is used to register a new check with
|
||||
// the local agent
|
||||
func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
|
||||
r := a.c.newRequest("PUT", "/v1/agent/check/register")
|
||||
r.obj = check
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckDeregister is used to deregister a check with
|
||||
// the local agent
|
||||
func (a *Agent) CheckDeregister(checkID string) error {
|
||||
r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Join is used to instruct the agent to attempt a join to
|
||||
// another cluster member
|
||||
func (a *Agent) Join(addr string, wan bool) error {
|
||||
r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
|
||||
if wan {
|
||||
r.params.Set("wan", "1")
|
||||
}
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForceLeave is used to have the agent eject a failed node
|
||||
func (a *Agent) ForceLeave(node string) error {
|
||||
r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableServiceMaintenance toggles service maintenance mode on
|
||||
// for the given service ID.
|
||||
func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
|
||||
r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
|
||||
r.params.Set("enable", "true")
|
||||
r.params.Set("reason", reason)
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// DisableServiceMaintenance toggles service maintenance mode off
|
||||
// for the given service ID.
|
||||
func (a *Agent) DisableServiceMaintenance(serviceID string) error {
|
||||
r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
|
||||
r.params.Set("enable", "false")
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableNodeMaintenance toggles node maintenance mode on for the
|
||||
// agent we are connected to.
|
||||
func (a *Agent) EnableNodeMaintenance(reason string) error {
|
||||
r := a.c.newRequest("PUT", "/v1/agent/maintenance")
|
||||
r.params.Set("enable", "true")
|
||||
r.params.Set("reason", reason)
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// DisableNodeMaintenance toggles node maintenance mode off for the
|
||||
// agent we are connected to.
|
||||
func (a *Agent) DisableNodeMaintenance() error {
|
||||
r := a.c.newRequest("PUT", "/v1/agent/maintenance")
|
||||
r.params.Set("enable", "false")
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
442
vendor/github.com/hashicorp/consul/api/api.go
generated
vendored
442
vendor/github.com/hashicorp/consul/api/api.go
generated
vendored
|
@ -1,442 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// QueryOptions are used to parameterize a query
|
||||
type QueryOptions struct {
|
||||
// Providing a datacenter overwrites the DC provided
|
||||
// by the Config
|
||||
Datacenter string
|
||||
|
||||
// AllowStale allows any Consul server (non-leader) to service
|
||||
// a read. This allows for lower latency and higher throughput
|
||||
AllowStale bool
|
||||
|
||||
// RequireConsistent forces the read to be fully consistent.
|
||||
// This is more expensive but prevents ever performing a stale
|
||||
// read.
|
||||
RequireConsistent bool
|
||||
|
||||
// WaitIndex is used to enable a blocking query. Waits
|
||||
// until the timeout or the next index is reached
|
||||
WaitIndex uint64
|
||||
|
||||
// WaitTime is used to bound the duration of a wait.
|
||||
// Defaults to that of the Config, but can be overriden.
|
||||
WaitTime time.Duration
|
||||
|
||||
// Token is used to provide a per-request ACL token
|
||||
// which overrides the agent's default token.
|
||||
Token string
|
||||
}
|
||||
|
||||
// WriteOptions are used to parameterize a write
|
||||
type WriteOptions struct {
|
||||
// Providing a datacenter overwrites the DC provided
|
||||
// by the Config
|
||||
Datacenter string
|
||||
|
||||
// Token is used to provide a per-request ACL token
|
||||
// which overrides the agent's default token.
|
||||
Token string
|
||||
}
|
||||
|
||||
// QueryMeta is used to return meta data about a query
|
||||
type QueryMeta struct {
|
||||
// LastIndex. This can be used as a WaitIndex to perform
|
||||
// a blocking query
|
||||
LastIndex uint64
|
||||
|
||||
// Time of last contact from the leader for the
|
||||
// server servicing the request
|
||||
LastContact time.Duration
|
||||
|
||||
// Is there a known leader
|
||||
KnownLeader bool
|
||||
|
||||
// How long did the request take
|
||||
RequestTime time.Duration
|
||||
}
|
||||
|
||||
// WriteMeta is used to return meta data about a write
|
||||
type WriteMeta struct {
|
||||
// How long did the request take
|
||||
RequestTime time.Duration
|
||||
}
|
||||
|
||||
// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
|
||||
type HttpBasicAuth struct {
|
||||
// Username to use for HTTP Basic Authentication
|
||||
Username string
|
||||
|
||||
// Password to use for HTTP Basic Authentication
|
||||
Password string
|
||||
}
|
||||
|
||||
// Config is used to configure the creation of a client
|
||||
type Config struct {
|
||||
// Address is the address of the Consul server
|
||||
Address string
|
||||
|
||||
// Scheme is the URI scheme for the Consul server
|
||||
Scheme string
|
||||
|
||||
// Datacenter to use. If not provided, the default agent datacenter is used.
|
||||
Datacenter string
|
||||
|
||||
// HttpClient is the client to use. Default will be
|
||||
// used if not provided.
|
||||
HttpClient *http.Client
|
||||
|
||||
// HttpAuth is the auth info to use for http access.
|
||||
HttpAuth *HttpBasicAuth
|
||||
|
||||
// WaitTime limits how long a Watch will block. If not provided,
|
||||
// the agent default values will be used.
|
||||
WaitTime time.Duration
|
||||
|
||||
// Token is used to provide a per-request ACL token
|
||||
// which overrides the agent's default token.
|
||||
Token string
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default configuration for the client
|
||||
func DefaultConfig() *Config {
|
||||
config := &Config{
|
||||
Address: "127.0.0.1:8500",
|
||||
Scheme: "http",
|
||||
HttpClient: http.DefaultClient,
|
||||
}
|
||||
|
||||
if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" {
|
||||
config.Address = addr
|
||||
}
|
||||
|
||||
if token := os.Getenv("CONSUL_HTTP_TOKEN"); token != "" {
|
||||
config.Token = token
|
||||
}
|
||||
|
||||
if auth := os.Getenv("CONSUL_HTTP_AUTH"); auth != "" {
|
||||
var username, password string
|
||||
if strings.Contains(auth, ":") {
|
||||
split := strings.SplitN(auth, ":", 2)
|
||||
username = split[0]
|
||||
password = split[1]
|
||||
} else {
|
||||
username = auth
|
||||
}
|
||||
|
||||
config.HttpAuth = &HttpBasicAuth{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}
|
||||
}
|
||||
|
||||
if ssl := os.Getenv("CONSUL_HTTP_SSL"); ssl != "" {
|
||||
enabled, err := strconv.ParseBool(ssl)
|
||||
if err != nil {
|
||||
log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL: %s", err)
|
||||
}
|
||||
|
||||
if enabled {
|
||||
config.Scheme = "https"
|
||||
}
|
||||
}
|
||||
|
||||
if verify := os.Getenv("CONSUL_HTTP_SSL_VERIFY"); verify != "" {
|
||||
doVerify, err := strconv.ParseBool(verify)
|
||||
if err != nil {
|
||||
log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL_VERIFY: %s", err)
|
||||
}
|
||||
|
||||
if !doVerify {
|
||||
config.HttpClient.Transport = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// Client provides a client to the Consul API
|
||||
type Client struct {
|
||||
config Config
|
||||
}
|
||||
|
||||
// NewClient returns a new client
|
||||
func NewClient(config *Config) (*Client, error) {
|
||||
// bootstrap the config
|
||||
defConfig := DefaultConfig()
|
||||
|
||||
if len(config.Address) == 0 {
|
||||
config.Address = defConfig.Address
|
||||
}
|
||||
|
||||
if len(config.Scheme) == 0 {
|
||||
config.Scheme = defConfig.Scheme
|
||||
}
|
||||
|
||||
if config.HttpClient == nil {
|
||||
config.HttpClient = defConfig.HttpClient
|
||||
}
|
||||
|
||||
if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 {
|
||||
config.HttpClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: func(_, _ string) (net.Conn, error) {
|
||||
return net.Dial("unix", parts[1])
|
||||
},
|
||||
},
|
||||
}
|
||||
config.Address = parts[1]
|
||||
}
|
||||
|
||||
client := &Client{
|
||||
config: *config,
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// request is used to help build up a request
|
||||
type request struct {
|
||||
config *Config
|
||||
method string
|
||||
url *url.URL
|
||||
params url.Values
|
||||
body io.Reader
|
||||
obj interface{}
|
||||
}
|
||||
|
||||
// setQueryOptions is used to annotate the request with
|
||||
// additional query options
|
||||
func (r *request) setQueryOptions(q *QueryOptions) {
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
if q.Datacenter != "" {
|
||||
r.params.Set("dc", q.Datacenter)
|
||||
}
|
||||
if q.AllowStale {
|
||||
r.params.Set("stale", "")
|
||||
}
|
||||
if q.RequireConsistent {
|
||||
r.params.Set("consistent", "")
|
||||
}
|
||||
if q.WaitIndex != 0 {
|
||||
r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
|
||||
}
|
||||
if q.WaitTime != 0 {
|
||||
r.params.Set("wait", durToMsec(q.WaitTime))
|
||||
}
|
||||
if q.Token != "" {
|
||||
r.params.Set("token", q.Token)
|
||||
}
|
||||
}
|
||||
|
||||
// durToMsec converts a duration to a millisecond specified string
|
||||
func durToMsec(dur time.Duration) string {
|
||||
return fmt.Sprintf("%dms", dur/time.Millisecond)
|
||||
}
|
||||
|
||||
// setWriteOptions is used to annotate the request with
|
||||
// additional write options
|
||||
func (r *request) setWriteOptions(q *WriteOptions) {
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
if q.Datacenter != "" {
|
||||
r.params.Set("dc", q.Datacenter)
|
||||
}
|
||||
if q.Token != "" {
|
||||
r.params.Set("token", q.Token)
|
||||
}
|
||||
}
|
||||
|
||||
// toHTTP converts the request to an HTTP request
|
||||
func (r *request) toHTTP() (*http.Request, error) {
|
||||
// Encode the query parameters
|
||||
r.url.RawQuery = r.params.Encode()
|
||||
|
||||
// Check if we should encode the body
|
||||
if r.body == nil && r.obj != nil {
|
||||
if b, err := encodeBody(r.obj); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
r.body = b
|
||||
}
|
||||
}
|
||||
|
||||
// Create the HTTP request
|
||||
req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.URL.Host = r.url.Host
|
||||
req.URL.Scheme = r.url.Scheme
|
||||
req.Host = r.url.Host
|
||||
|
||||
// Setup auth
|
||||
if r.config.HttpAuth != nil {
|
||||
req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// newRequest is used to create a new request
|
||||
func (c *Client) newRequest(method, path string) *request {
|
||||
r := &request{
|
||||
config: &c.config,
|
||||
method: method,
|
||||
url: &url.URL{
|
||||
Scheme: c.config.Scheme,
|
||||
Host: c.config.Address,
|
||||
Path: path,
|
||||
},
|
||||
params: make(map[string][]string),
|
||||
}
|
||||
if c.config.Datacenter != "" {
|
||||
r.params.Set("dc", c.config.Datacenter)
|
||||
}
|
||||
if c.config.WaitTime != 0 {
|
||||
r.params.Set("wait", durToMsec(r.config.WaitTime))
|
||||
}
|
||||
if c.config.Token != "" {
|
||||
r.params.Set("token", r.config.Token)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// doRequest runs a request with our client
|
||||
func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
|
||||
req, err := r.toHTTP()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
start := time.Now()
|
||||
resp, err := c.config.HttpClient.Do(req)
|
||||
diff := time.Now().Sub(start)
|
||||
return diff, resp, err
|
||||
}
|
||||
|
||||
// Query is used to do a GET request against an endpoint
|
||||
// and deserialize the response into an interface using
|
||||
// standard Consul conventions.
|
||||
func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
|
||||
r := c.newRequest("GET", endpoint)
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
if err := decodeBody(resp, out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return qm, nil
|
||||
}
|
||||
|
||||
// write is used to do a PUT request against an endpoint
|
||||
// and serialize/deserialized using the standard Consul conventions.
|
||||
func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
|
||||
r := c.newRequest("PUT", endpoint)
|
||||
r.setWriteOptions(q)
|
||||
r.obj = in
|
||||
rtt, resp, err := requireOK(c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{RequestTime: rtt}
|
||||
if out != nil {
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// parseQueryMeta is used to help parse query meta-data
|
||||
func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
|
||||
header := resp.Header
|
||||
|
||||
// Parse the X-Consul-Index
|
||||
index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
|
||||
}
|
||||
q.LastIndex = index
|
||||
|
||||
// Parse the X-Consul-LastContact
|
||||
last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
|
||||
}
|
||||
q.LastContact = time.Duration(last) * time.Millisecond
|
||||
|
||||
// Parse the X-Consul-KnownLeader
|
||||
switch header.Get("X-Consul-KnownLeader") {
|
||||
case "true":
|
||||
q.KnownLeader = true
|
||||
default:
|
||||
q.KnownLeader = false
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeBody is used to JSON decode a body
|
||||
func decodeBody(resp *http.Response, out interface{}) error {
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
return dec.Decode(out)
|
||||
}
|
||||
|
||||
// encodeBody is used to encode a request body
|
||||
func encodeBody(obj interface{}) (io.Reader, error) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
enc := json.NewEncoder(buf)
|
||||
if err := enc.Encode(obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// requireOK is used to wrap doRequest and check for a 200
|
||||
func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
|
||||
if e != nil {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
return d, nil, e
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, resp.Body)
|
||||
resp.Body.Close()
|
||||
return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
|
||||
}
|
||||
return d, resp, nil
|
||||
}
|
182
vendor/github.com/hashicorp/consul/api/catalog.go
generated
vendored
182
vendor/github.com/hashicorp/consul/api/catalog.go
generated
vendored
|
@ -1,182 +0,0 @@
|
|||
package api
|
||||
|
||||
type Node struct {
|
||||
Node string
|
||||
Address string
|
||||
}
|
||||
|
||||
type CatalogService struct {
|
||||
Node string
|
||||
Address string
|
||||
ServiceID string
|
||||
ServiceName string
|
||||
ServiceAddress string
|
||||
ServiceTags []string
|
||||
ServicePort int
|
||||
}
|
||||
|
||||
type CatalogNode struct {
|
||||
Node *Node
|
||||
Services map[string]*AgentService
|
||||
}
|
||||
|
||||
type CatalogRegistration struct {
|
||||
Node string
|
||||
Address string
|
||||
Datacenter string
|
||||
Service *AgentService
|
||||
Check *AgentCheck
|
||||
}
|
||||
|
||||
type CatalogDeregistration struct {
|
||||
Node string
|
||||
Address string
|
||||
Datacenter string
|
||||
ServiceID string
|
||||
CheckID string
|
||||
}
|
||||
|
||||
// Catalog can be used to query the Catalog endpoints
|
||||
type Catalog struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// Catalog returns a handle to the catalog endpoints
|
||||
func (c *Client) Catalog() *Catalog {
|
||||
return &Catalog{c}
|
||||
}
|
||||
|
||||
func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
|
||||
r := c.c.newRequest("PUT", "/v1/catalog/register")
|
||||
r.setWriteOptions(q)
|
||||
r.obj = reg
|
||||
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{}
|
||||
wm.RequestTime = rtt
|
||||
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
|
||||
r := c.c.newRequest("PUT", "/v1/catalog/deregister")
|
||||
r.setWriteOptions(q)
|
||||
r.obj = dereg
|
||||
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{}
|
||||
wm.RequestTime = rtt
|
||||
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Datacenters is used to query for all the known datacenters
|
||||
func (c *Catalog) Datacenters() ([]string, error) {
|
||||
r := c.c.newRequest("GET", "/v1/catalog/datacenters")
|
||||
_, resp, err := requireOK(c.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var out []string
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Nodes is used to query all the known nodes
|
||||
func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
|
||||
r := c.c.newRequest("GET", "/v1/catalog/nodes")
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var out []*Node
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return out, qm, nil
|
||||
}
|
||||
|
||||
// Services is used to query for all known services
|
||||
func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
|
||||
r := c.c.newRequest("GET", "/v1/catalog/services")
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var out map[string][]string
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return out, qm, nil
|
||||
}
|
||||
|
||||
// Service is used to query catalog entries for a given service
|
||||
func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
|
||||
r := c.c.newRequest("GET", "/v1/catalog/service/"+service)
|
||||
r.setQueryOptions(q)
|
||||
if tag != "" {
|
||||
r.params.Set("tag", tag)
|
||||
}
|
||||
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var out []*CatalogService
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return out, qm, nil
|
||||
}
|
||||
|
||||
// Node is used to query for service information about a single node
|
||||
func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
|
||||
r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(c.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var out *CatalogNode
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return out, qm, nil
|
||||
}
|
104
vendor/github.com/hashicorp/consul/api/event.go
generated
vendored
104
vendor/github.com/hashicorp/consul/api/event.go
generated
vendored
|
@ -1,104 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Event can be used to query the Event endpoints
|
||||
type Event struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// UserEvent represents an event that was fired by the user
|
||||
type UserEvent struct {
|
||||
ID string
|
||||
Name string
|
||||
Payload []byte
|
||||
NodeFilter string
|
||||
ServiceFilter string
|
||||
TagFilter string
|
||||
Version int
|
||||
LTime uint64
|
||||
}
|
||||
|
||||
// Event returns a handle to the event endpoints
|
||||
func (c *Client) Event() *Event {
|
||||
return &Event{c}
|
||||
}
|
||||
|
||||
// Fire is used to fire a new user event. Only the Name, Payload and Filters
|
||||
// are respected. This returns the ID or an associated error. Cross DC requests
|
||||
// are supported.
|
||||
func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
|
||||
r.setWriteOptions(q)
|
||||
if params.NodeFilter != "" {
|
||||
r.params.Set("node", params.NodeFilter)
|
||||
}
|
||||
if params.ServiceFilter != "" {
|
||||
r.params.Set("service", params.ServiceFilter)
|
||||
}
|
||||
if params.TagFilter != "" {
|
||||
r.params.Set("tag", params.TagFilter)
|
||||
}
|
||||
if params.Payload != nil {
|
||||
r.body = bytes.NewReader(params.Payload)
|
||||
}
|
||||
|
||||
rtt, resp, err := requireOK(e.c.doRequest(r))
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
wm := &WriteMeta{RequestTime: rtt}
|
||||
var out UserEvent
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return out.ID, wm, nil
|
||||
}
|
||||
|
||||
// List is used to get the most recent events an agent has received.
|
||||
// This list can be optionally filtered by the name. This endpoint supports
|
||||
// quasi-blocking queries. The index is not monotonic, nor does it provide provide
|
||||
// LastContact or KnownLeader.
|
||||
func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
|
||||
r := e.c.newRequest("GET", "/v1/event/list")
|
||||
r.setQueryOptions(q)
|
||||
if name != "" {
|
||||
r.params.Set("name", name)
|
||||
}
|
||||
rtt, resp, err := requireOK(e.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var entries []*UserEvent
|
||||
if err := decodeBody(resp, &entries); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return entries, qm, nil
|
||||
}
|
||||
|
||||
// IDToIndex is a bit of a hack. This simulates the index generation to
|
||||
// convert an event ID into a WaitIndex.
|
||||
func (e *Event) IDToIndex(uuid string) uint64 {
|
||||
lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
|
||||
upper := uuid[19:23] + uuid[24:36]
|
||||
lowVal, err := strconv.ParseUint(lower, 16, 64)
|
||||
if err != nil {
|
||||
panic("Failed to convert " + lower)
|
||||
}
|
||||
highVal, err := strconv.ParseUint(upper, 16, 64)
|
||||
if err != nil {
|
||||
panic("Failed to convert " + upper)
|
||||
}
|
||||
return lowVal ^ highVal
|
||||
}
|
136
vendor/github.com/hashicorp/consul/api/health.go
generated
vendored
136
vendor/github.com/hashicorp/consul/api/health.go
generated
vendored
|
@ -1,136 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// HealthCheck is used to represent a single check
|
||||
type HealthCheck struct {
|
||||
Node string
|
||||
CheckID string
|
||||
Name string
|
||||
Status string
|
||||
Notes string
|
||||
Output string
|
||||
ServiceID string
|
||||
ServiceName string
|
||||
}
|
||||
|
||||
// ServiceEntry is used for the health service endpoint
|
||||
type ServiceEntry struct {
|
||||
Node *Node
|
||||
Service *AgentService
|
||||
Checks []*HealthCheck
|
||||
}
|
||||
|
||||
// Health can be used to query the Health endpoints
|
||||
type Health struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// Health returns a handle to the health endpoints
|
||||
func (c *Client) Health() *Health {
|
||||
return &Health{c}
|
||||
}
|
||||
|
||||
// Node is used to query for checks belonging to a given node
|
||||
func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
||||
r := h.c.newRequest("GET", "/v1/health/node/"+node)
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var out []*HealthCheck
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return out, qm, nil
|
||||
}
|
||||
|
||||
// Checks is used to return the checks associated with a service
|
||||
func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
||||
r := h.c.newRequest("GET", "/v1/health/checks/"+service)
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var out []*HealthCheck
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return out, qm, nil
|
||||
}
|
||||
|
||||
// Service is used to query health information along with service info
|
||||
// for a given service. It can optionally do server-side filtering on a tag
|
||||
// or nodes with passing health checks only.
|
||||
func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
|
||||
r := h.c.newRequest("GET", "/v1/health/service/"+service)
|
||||
r.setQueryOptions(q)
|
||||
if tag != "" {
|
||||
r.params.Set("tag", tag)
|
||||
}
|
||||
if passingOnly {
|
||||
r.params.Set("passing", "1")
|
||||
}
|
||||
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var out []*ServiceEntry
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return out, qm, nil
|
||||
}
|
||||
|
||||
// State is used to retreive all the checks in a given state.
|
||||
// The wildcard "any" state can also be used for all checks.
|
||||
func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
||||
switch state {
|
||||
case "any":
|
||||
case "warning":
|
||||
case "critical":
|
||||
case "passing":
|
||||
case "unknown":
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("Unsupported state: %v", state)
|
||||
}
|
||||
r := h.c.newRequest("GET", "/v1/health/state/"+state)
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var out []*HealthCheck
|
||||
if err := decodeBody(resp, &out); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return out, qm, nil
|
||||
}
|
236
vendor/github.com/hashicorp/consul/api/kv.go
generated
vendored
236
vendor/github.com/hashicorp/consul/api/kv.go
generated
vendored
|
@ -1,236 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// KVPair is used to represent a single K/V entry
|
||||
type KVPair struct {
|
||||
Key string
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
LockIndex uint64
|
||||
Flags uint64
|
||||
Value []byte
|
||||
Session string
|
||||
}
|
||||
|
||||
// KVPairs is a list of KVPair objects
|
||||
type KVPairs []*KVPair
|
||||
|
||||
// KV is used to manipulate the K/V API
|
||||
type KV struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// KV is used to return a handle to the K/V apis
|
||||
func (c *Client) KV() *KV {
|
||||
return &KV{c}
|
||||
}
|
||||
|
||||
// Get is used to lookup a single key
|
||||
func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
|
||||
resp, qm, err := k.getInternal(key, nil, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, qm, nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var entries []*KVPair
|
||||
if err := decodeBody(resp, &entries); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(entries) > 0 {
|
||||
return entries[0], qm, nil
|
||||
}
|
||||
return nil, qm, nil
|
||||
}
|
||||
|
||||
// List is used to lookup all keys under a prefix
|
||||
func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
|
||||
resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, qm, nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var entries []*KVPair
|
||||
if err := decodeBody(resp, &entries); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return entries, qm, nil
|
||||
}
|
||||
|
||||
// Keys is used to list all the keys under a prefix. Optionally,
|
||||
// a separator can be used to limit the responses.
|
||||
func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
|
||||
params := map[string]string{"keys": ""}
|
||||
if separator != "" {
|
||||
params["separator"] = separator
|
||||
}
|
||||
resp, qm, err := k.getInternal(prefix, params, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, qm, nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var entries []string
|
||||
if err := decodeBody(resp, &entries); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return entries, qm, nil
|
||||
}
|
||||
|
||||
func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
|
||||
r := k.c.newRequest("GET", "/v1/kv/"+key)
|
||||
r.setQueryOptions(q)
|
||||
for param, val := range params {
|
||||
r.params.Set(param, val)
|
||||
}
|
||||
rtt, resp, err := k.c.doRequest(r)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
if resp.StatusCode == 404 {
|
||||
resp.Body.Close()
|
||||
return nil, qm, nil
|
||||
} else if resp.StatusCode != 200 {
|
||||
resp.Body.Close()
|
||||
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
|
||||
}
|
||||
return resp, qm, nil
|
||||
}
|
||||
|
||||
// Put is used to write a new value. Only the
|
||||
// Key, Flags and Value is respected.
|
||||
func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
|
||||
params := make(map[string]string, 1)
|
||||
if p.Flags != 0 {
|
||||
params["flags"] = strconv.FormatUint(p.Flags, 10)
|
||||
}
|
||||
_, wm, err := k.put(p.Key, params, p.Value, q)
|
||||
return wm, err
|
||||
}
|
||||
|
||||
// CAS is used for a Check-And-Set operation. The Key,
|
||||
// ModifyIndex, Flags and Value are respected. Returns true
|
||||
// on success or false on failures.
|
||||
func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||
params := make(map[string]string, 2)
|
||||
if p.Flags != 0 {
|
||||
params["flags"] = strconv.FormatUint(p.Flags, 10)
|
||||
}
|
||||
params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
|
||||
return k.put(p.Key, params, p.Value, q)
|
||||
}
|
||||
|
||||
// Acquire is used for a lock acquisiiton operation. The Key,
|
||||
// Flags, Value and Session are respected. Returns true
|
||||
// on success or false on failures.
|
||||
func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||
params := make(map[string]string, 2)
|
||||
if p.Flags != 0 {
|
||||
params["flags"] = strconv.FormatUint(p.Flags, 10)
|
||||
}
|
||||
params["acquire"] = p.Session
|
||||
return k.put(p.Key, params, p.Value, q)
|
||||
}
|
||||
|
||||
// Release is used for a lock release operation. The Key,
|
||||
// Flags, Value and Session are respected. Returns true
|
||||
// on success or false on failures.
|
||||
func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||
params := make(map[string]string, 2)
|
||||
if p.Flags != 0 {
|
||||
params["flags"] = strconv.FormatUint(p.Flags, 10)
|
||||
}
|
||||
params["release"] = p.Session
|
||||
return k.put(p.Key, params, p.Value, q)
|
||||
}
|
||||
|
||||
func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||
r := k.c.newRequest("PUT", "/v1/kv/"+key)
|
||||
r.setWriteOptions(q)
|
||||
for param, val := range params {
|
||||
r.params.Set(param, val)
|
||||
}
|
||||
r.body = bytes.NewReader(body)
|
||||
rtt, resp, err := requireOK(k.c.doRequest(r))
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &WriteMeta{}
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var buf bytes.Buffer
|
||||
if _, err := io.Copy(&buf, resp.Body); err != nil {
|
||||
return false, nil, fmt.Errorf("Failed to read response: %v", err)
|
||||
}
|
||||
res := strings.Contains(string(buf.Bytes()), "true")
|
||||
return res, qm, nil
|
||||
}
|
||||
|
||||
// Delete is used to delete a single key
|
||||
func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
|
||||
_, qm, err := k.deleteInternal(key, nil, w)
|
||||
return qm, err
|
||||
}
|
||||
|
||||
// DeleteCAS is used for a Delete Check-And-Set operation. The Key
|
||||
// and ModifyIndex are respected. Returns true on success or false on failures.
|
||||
func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||
params := map[string]string{
|
||||
"cas": strconv.FormatUint(p.ModifyIndex, 10),
|
||||
}
|
||||
return k.deleteInternal(p.Key, params, q)
|
||||
}
|
||||
|
||||
// DeleteTree is used to delete all keys under a prefix
|
||||
func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
|
||||
_, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w)
|
||||
return qm, err
|
||||
}
|
||||
|
||||
func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||
r := k.c.newRequest("DELETE", "/v1/kv/"+key)
|
||||
r.setWriteOptions(q)
|
||||
for param, val := range params {
|
||||
r.params.Set(param, val)
|
||||
}
|
||||
rtt, resp, err := requireOK(k.c.doRequest(r))
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &WriteMeta{}
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var buf bytes.Buffer
|
||||
if _, err := io.Copy(&buf, resp.Body); err != nil {
|
||||
return false, nil, fmt.Errorf("Failed to read response: %v", err)
|
||||
}
|
||||
res := strings.Contains(string(buf.Bytes()), "true")
|
||||
return res, qm, nil
|
||||
}
|
326
vendor/github.com/hashicorp/consul/api/lock.go
generated
vendored
326
vendor/github.com/hashicorp/consul/api/lock.go
generated
vendored
|
@ -1,326 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultLockSessionName is the Session Name we assign if none is provided
|
||||
DefaultLockSessionName = "Consul API Lock"
|
||||
|
||||
// DefaultLockSessionTTL is the default session TTL if no Session is provided
|
||||
// when creating a new Lock. This is used because we do not have another
|
||||
// other check to depend upon.
|
||||
DefaultLockSessionTTL = "15s"
|
||||
|
||||
// DefaultLockWaitTime is how long we block for at a time to check if lock
|
||||
// acquisition is possible. This affects the minimum time it takes to cancel
|
||||
// a Lock acquisition.
|
||||
DefaultLockWaitTime = 15 * time.Second
|
||||
|
||||
// DefaultLockRetryTime is how long we wait after a failed lock acquisition
|
||||
// before attempting to do the lock again. This is so that once a lock-delay
|
||||
// is in affect, we do not hot loop retrying the acquisition.
|
||||
DefaultLockRetryTime = 5 * time.Second
|
||||
|
||||
// LockFlagValue is a magic flag we set to indicate a key
|
||||
// is being used for a lock. It is used to detect a potential
|
||||
// conflict with a semaphore.
|
||||
LockFlagValue = 0x2ddccbc058a50c18
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrLockHeld is returned if we attempt to double lock
|
||||
ErrLockHeld = fmt.Errorf("Lock already held")
|
||||
|
||||
// ErrLockNotHeld is returned if we attempt to unlock a lock
|
||||
// that we do not hold.
|
||||
ErrLockNotHeld = fmt.Errorf("Lock not held")
|
||||
|
||||
// ErrLockInUse is returned if we attempt to destroy a lock
|
||||
// that is in use.
|
||||
ErrLockInUse = fmt.Errorf("Lock in use")
|
||||
|
||||
// ErrLockConflict is returned if the flags on a key
|
||||
// used for a lock do not match expectation
|
||||
ErrLockConflict = fmt.Errorf("Existing key does not match lock use")
|
||||
)
|
||||
|
||||
// Lock is used to implement client-side leader election. It is follows the
|
||||
// algorithm as described here: https://consul.io/docs/guides/leader-election.html.
|
||||
type Lock struct {
|
||||
c *Client
|
||||
opts *LockOptions
|
||||
|
||||
isHeld bool
|
||||
sessionRenew chan struct{}
|
||||
lockSession string
|
||||
l sync.Mutex
|
||||
}
|
||||
|
||||
// LockOptions is used to parameterize the Lock behavior.
|
||||
type LockOptions struct {
|
||||
Key string // Must be set and have write permissions
|
||||
Value []byte // Optional, value to associate with the lock
|
||||
Session string // Optional, created if not specified
|
||||
SessionName string // Optional, defaults to DefaultLockSessionName
|
||||
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
|
||||
}
|
||||
|
||||
// LockKey returns a handle to a lock struct which can be used
|
||||
// to acquire and release the mutex. The key used must have
|
||||
// write permissions.
|
||||
func (c *Client) LockKey(key string) (*Lock, error) {
|
||||
opts := &LockOptions{
|
||||
Key: key,
|
||||
}
|
||||
return c.LockOpts(opts)
|
||||
}
|
||||
|
||||
// LockOpts returns a handle to a lock struct which can be used
|
||||
// to acquire and release the mutex. The key used must have
|
||||
// write permissions.
|
||||
func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
|
||||
if opts.Key == "" {
|
||||
return nil, fmt.Errorf("missing key")
|
||||
}
|
||||
if opts.SessionName == "" {
|
||||
opts.SessionName = DefaultLockSessionName
|
||||
}
|
||||
if opts.SessionTTL == "" {
|
||||
opts.SessionTTL = DefaultLockSessionTTL
|
||||
} else {
|
||||
if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
|
||||
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
|
||||
}
|
||||
}
|
||||
l := &Lock{
|
||||
c: c,
|
||||
opts: opts,
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Lock attempts to acquire the lock and blocks while doing so.
|
||||
// Providing a non-nil stopCh can be used to abort the lock attempt.
|
||||
// Returns a channel that is closed if our lock is lost or an error.
|
||||
// This channel could be closed at any time due to session invalidation,
|
||||
// communication errors, operator intervention, etc. It is NOT safe to
|
||||
// assume that the lock is held until Unlock() unless the Session is specifically
|
||||
// created without any associated health checks. By default Consul sessions
|
||||
// prefer liveness over safety and an application must be able to handle
|
||||
// the lock being lost.
|
||||
func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
||||
// Hold the lock as we try to acquire
|
||||
l.l.Lock()
|
||||
defer l.l.Unlock()
|
||||
|
||||
// Check if we already hold the lock
|
||||
if l.isHeld {
|
||||
return nil, ErrLockHeld
|
||||
}
|
||||
|
||||
// Check if we need to create a session first
|
||||
l.lockSession = l.opts.Session
|
||||
if l.lockSession == "" {
|
||||
if s, err := l.createSession(); err != nil {
|
||||
return nil, fmt.Errorf("failed to create session: %v", err)
|
||||
} else {
|
||||
l.sessionRenew = make(chan struct{})
|
||||
l.lockSession = s
|
||||
session := l.c.Session()
|
||||
go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew)
|
||||
|
||||
// If we fail to acquire the lock, cleanup the session
|
||||
defer func() {
|
||||
if !l.isHeld {
|
||||
close(l.sessionRenew)
|
||||
l.sessionRenew = nil
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Setup the query options
|
||||
kv := l.c.KV()
|
||||
qOpts := &QueryOptions{
|
||||
WaitTime: DefaultLockWaitTime,
|
||||
}
|
||||
|
||||
WAIT:
|
||||
// Check if we should quit
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil, nil
|
||||
default:
|
||||
}
|
||||
|
||||
// Look for an existing lock, blocking until not taken
|
||||
pair, meta, err := kv.Get(l.opts.Key, qOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read lock: %v", err)
|
||||
}
|
||||
if pair != nil && pair.Flags != LockFlagValue {
|
||||
return nil, ErrLockConflict
|
||||
}
|
||||
locked := false
|
||||
if pair != nil && pair.Session == l.lockSession {
|
||||
goto HELD
|
||||
}
|
||||
if pair != nil && pair.Session != "" {
|
||||
qOpts.WaitIndex = meta.LastIndex
|
||||
goto WAIT
|
||||
}
|
||||
|
||||
// Try to acquire the lock
|
||||
pair = l.lockEntry(l.lockSession)
|
||||
locked, _, err = kv.Acquire(pair, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire lock: %v", err)
|
||||
}
|
||||
|
||||
// Handle the case of not getting the lock
|
||||
if !locked {
|
||||
select {
|
||||
case <-time.After(DefaultLockRetryTime):
|
||||
goto WAIT
|
||||
case <-stopCh:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
HELD:
|
||||
// Watch to ensure we maintain leadership
|
||||
leaderCh := make(chan struct{})
|
||||
go l.monitorLock(l.lockSession, leaderCh)
|
||||
|
||||
// Set that we own the lock
|
||||
l.isHeld = true
|
||||
|
||||
// Locked! All done
|
||||
return leaderCh, nil
|
||||
}
|
||||
|
||||
// Unlock released the lock. It is an error to call this
|
||||
// if the lock is not currently held.
|
||||
func (l *Lock) Unlock() error {
|
||||
// Hold the lock as we try to release
|
||||
l.l.Lock()
|
||||
defer l.l.Unlock()
|
||||
|
||||
// Ensure the lock is actually held
|
||||
if !l.isHeld {
|
||||
return ErrLockNotHeld
|
||||
}
|
||||
|
||||
// Set that we no longer own the lock
|
||||
l.isHeld = false
|
||||
|
||||
// Stop the session renew
|
||||
if l.sessionRenew != nil {
|
||||
defer func() {
|
||||
close(l.sessionRenew)
|
||||
l.sessionRenew = nil
|
||||
}()
|
||||
}
|
||||
|
||||
// Get the lock entry, and clear the lock session
|
||||
lockEnt := l.lockEntry(l.lockSession)
|
||||
l.lockSession = ""
|
||||
|
||||
// Release the lock explicitly
|
||||
kv := l.c.KV()
|
||||
_, _, err := kv.Release(lockEnt, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to release lock: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Destroy is used to cleanup the lock entry. It is not necessary
|
||||
// to invoke. It will fail if the lock is in use.
|
||||
func (l *Lock) Destroy() error {
|
||||
// Hold the lock as we try to release
|
||||
l.l.Lock()
|
||||
defer l.l.Unlock()
|
||||
|
||||
// Check if we already hold the lock
|
||||
if l.isHeld {
|
||||
return ErrLockHeld
|
||||
}
|
||||
|
||||
// Look for an existing lock
|
||||
kv := l.c.KV()
|
||||
pair, _, err := kv.Get(l.opts.Key, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read lock: %v", err)
|
||||
}
|
||||
|
||||
// Nothing to do if the lock does not exist
|
||||
if pair == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check for possible flag conflict
|
||||
if pair.Flags != LockFlagValue {
|
||||
return ErrLockConflict
|
||||
}
|
||||
|
||||
// Check if it is in use
|
||||
if pair.Session != "" {
|
||||
return ErrLockInUse
|
||||
}
|
||||
|
||||
// Attempt the delete
|
||||
didRemove, _, err := kv.DeleteCAS(pair, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove lock: %v", err)
|
||||
}
|
||||
if !didRemove {
|
||||
return ErrLockInUse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createSession is used to create a new managed session
|
||||
func (l *Lock) createSession() (string, error) {
|
||||
session := l.c.Session()
|
||||
se := &SessionEntry{
|
||||
Name: l.opts.SessionName,
|
||||
TTL: l.opts.SessionTTL,
|
||||
}
|
||||
id, _, err := session.Create(se, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// lockEntry returns a formatted KVPair for the lock
|
||||
func (l *Lock) lockEntry(session string) *KVPair {
|
||||
return &KVPair{
|
||||
Key: l.opts.Key,
|
||||
Value: l.opts.Value,
|
||||
Session: session,
|
||||
Flags: LockFlagValue,
|
||||
}
|
||||
}
|
||||
|
||||
// monitorLock is a long running routine to monitor a lock ownership
|
||||
// It closes the stopCh if we lose our leadership.
|
||||
func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
|
||||
defer close(stopCh)
|
||||
kv := l.c.KV()
|
||||
opts := &QueryOptions{RequireConsistent: true}
|
||||
WAIT:
|
||||
pair, meta, err := kv.Get(l.opts.Key, opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if pair != nil && pair.Session == session {
|
||||
opts.WaitIndex = meta.LastIndex
|
||||
goto WAIT
|
||||
}
|
||||
}
|
24
vendor/github.com/hashicorp/consul/api/raw.go
generated
vendored
24
vendor/github.com/hashicorp/consul/api/raw.go
generated
vendored
|
@ -1,24 +0,0 @@
|
|||
package api
|
||||
|
||||
// Raw can be used to do raw queries against custom endpoints
|
||||
type Raw struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// Raw returns a handle to query endpoints
|
||||
func (c *Client) Raw() *Raw {
|
||||
return &Raw{c}
|
||||
}
|
||||
|
||||
// Query is used to do a GET request against an endpoint
|
||||
// and deserialize the response into an interface using
|
||||
// standard Consul conventions.
|
||||
func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
|
||||
return raw.c.query(endpoint, out, q)
|
||||
}
|
||||
|
||||
// Write is used to do a PUT request against an endpoint
|
||||
// and serialize/deserialized using the standard Consul conventions.
|
||||
func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
|
||||
return raw.c.write(endpoint, in, out, q)
|
||||
}
|
477
vendor/github.com/hashicorp/consul/api/semaphore.go
generated
vendored
477
vendor/github.com/hashicorp/consul/api/semaphore.go
generated
vendored
|
@ -1,477 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultSemaphoreSessionName is the Session Name we assign if none is provided
|
||||
DefaultSemaphoreSessionName = "Consul API Semaphore"
|
||||
|
||||
// DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided
|
||||
// when creating a new Semaphore. This is used because we do not have another
|
||||
// other check to depend upon.
|
||||
DefaultSemaphoreSessionTTL = "15s"
|
||||
|
||||
// DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore
|
||||
// acquisition is possible. This affects the minimum time it takes to cancel
|
||||
// a Semaphore acquisition.
|
||||
DefaultSemaphoreWaitTime = 15 * time.Second
|
||||
|
||||
// DefaultSemaphoreKey is the key used within the prefix to
|
||||
// use for coordination between all the contenders.
|
||||
DefaultSemaphoreKey = ".lock"
|
||||
|
||||
// SemaphoreFlagValue is a magic flag we set to indicate a key
|
||||
// is being used for a semaphore. It is used to detect a potential
|
||||
// conflict with a lock.
|
||||
SemaphoreFlagValue = 0xe0f69a2baa414de0
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrSemaphoreHeld is returned if we attempt to double lock
|
||||
ErrSemaphoreHeld = fmt.Errorf("Semaphore already held")
|
||||
|
||||
// ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore
|
||||
// that we do not hold.
|
||||
ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held")
|
||||
|
||||
// ErrSemaphoreInUse is returned if we attempt to destroy a semaphore
|
||||
// that is in use.
|
||||
ErrSemaphoreInUse = fmt.Errorf("Semaphore in use")
|
||||
|
||||
// ErrSemaphoreConflict is returned if the flags on a key
|
||||
// used for a semaphore do not match expectation
|
||||
ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use")
|
||||
)
|
||||
|
||||
// Semaphore is used to implement a distributed semaphore
|
||||
// using the Consul KV primitives.
|
||||
type Semaphore struct {
|
||||
c *Client
|
||||
opts *SemaphoreOptions
|
||||
|
||||
isHeld bool
|
||||
sessionRenew chan struct{}
|
||||
lockSession string
|
||||
l sync.Mutex
|
||||
}
|
||||
|
||||
// SemaphoreOptions is used to parameterize the Semaphore
|
||||
type SemaphoreOptions struct {
|
||||
Prefix string // Must be set and have write permissions
|
||||
Limit int // Must be set, and be positive
|
||||
Value []byte // Optional, value to associate with the contender entry
|
||||
Session string // OPtional, created if not specified
|
||||
SessionName string // Optional, defaults to DefaultLockSessionName
|
||||
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
|
||||
}
|
||||
|
||||
// semaphoreLock is written under the DefaultSemaphoreKey and
|
||||
// is used to coordinate between all the contenders.
|
||||
type semaphoreLock struct {
|
||||
// Limit is the integer limit of holders. This is used to
|
||||
// verify that all the holders agree on the value.
|
||||
Limit int
|
||||
|
||||
// Holders is a list of all the semaphore holders.
|
||||
// It maps the session ID to true. It is used as a set effectively.
|
||||
Holders map[string]bool
|
||||
}
|
||||
|
||||
// SemaphorePrefix is used to created a Semaphore which will operate
|
||||
// at the given KV prefix and uses the given limit for the semaphore.
|
||||
// The prefix must have write privileges, and the limit must be agreed
|
||||
// upon by all contenders.
|
||||
func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) {
|
||||
opts := &SemaphoreOptions{
|
||||
Prefix: prefix,
|
||||
Limit: limit,
|
||||
}
|
||||
return c.SemaphoreOpts(opts)
|
||||
}
|
||||
|
||||
// SemaphoreOpts is used to create a Semaphore with the given options.
|
||||
// The prefix must have write privileges, and the limit must be agreed
|
||||
// upon by all contenders. If a Session is not provided, one will be created.
|
||||
func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
|
||||
if opts.Prefix == "" {
|
||||
return nil, fmt.Errorf("missing prefix")
|
||||
}
|
||||
if opts.Limit <= 0 {
|
||||
return nil, fmt.Errorf("semaphore limit must be positive")
|
||||
}
|
||||
if opts.SessionName == "" {
|
||||
opts.SessionName = DefaultSemaphoreSessionName
|
||||
}
|
||||
if opts.SessionTTL == "" {
|
||||
opts.SessionTTL = DefaultSemaphoreSessionTTL
|
||||
} else {
|
||||
if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
|
||||
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
|
||||
}
|
||||
}
|
||||
s := &Semaphore{
|
||||
c: c,
|
||||
opts: opts,
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Acquire attempts to reserve a slot in the semaphore, blocking until
|
||||
// success, interrupted via the stopCh or an error is encounted.
|
||||
// Providing a non-nil stopCh can be used to abort the attempt.
|
||||
// On success, a channel is returned that represents our slot.
|
||||
// This channel could be closed at any time due to session invalidation,
|
||||
// communication errors, operator intervention, etc. It is NOT safe to
|
||||
// assume that the slot is held until Release() unless the Session is specifically
|
||||
// created without any associated health checks. By default Consul sessions
|
||||
// prefer liveness over safety and an application must be able to handle
|
||||
// the session being lost.
|
||||
func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
||||
// Hold the lock as we try to acquire
|
||||
s.l.Lock()
|
||||
defer s.l.Unlock()
|
||||
|
||||
// Check if we already hold the semaphore
|
||||
if s.isHeld {
|
||||
return nil, ErrSemaphoreHeld
|
||||
}
|
||||
|
||||
// Check if we need to create a session first
|
||||
s.lockSession = s.opts.Session
|
||||
if s.lockSession == "" {
|
||||
if sess, err := s.createSession(); err != nil {
|
||||
return nil, fmt.Errorf("failed to create session: %v", err)
|
||||
} else {
|
||||
s.sessionRenew = make(chan struct{})
|
||||
s.lockSession = sess
|
||||
session := s.c.Session()
|
||||
go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew)
|
||||
|
||||
// If we fail to acquire the lock, cleanup the session
|
||||
defer func() {
|
||||
if !s.isHeld {
|
||||
close(s.sessionRenew)
|
||||
s.sessionRenew = nil
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Create the contender entry
|
||||
kv := s.c.KV()
|
||||
made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil)
|
||||
if err != nil || !made {
|
||||
return nil, fmt.Errorf("failed to make contender entry: %v", err)
|
||||
}
|
||||
|
||||
// Setup the query options
|
||||
qOpts := &QueryOptions{
|
||||
WaitTime: DefaultSemaphoreWaitTime,
|
||||
}
|
||||
|
||||
WAIT:
|
||||
// Check if we should quit
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil, nil
|
||||
default:
|
||||
}
|
||||
|
||||
// Read the prefix
|
||||
pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read prefix: %v", err)
|
||||
}
|
||||
|
||||
// Decode the lock
|
||||
lockPair := s.findLock(pairs)
|
||||
if lockPair.Flags != SemaphoreFlagValue {
|
||||
return nil, ErrSemaphoreConflict
|
||||
}
|
||||
lock, err := s.decodeLock(lockPair)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify we agree with the limit
|
||||
if lock.Limit != s.opts.Limit {
|
||||
return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)",
|
||||
lock.Limit, s.opts.Limit)
|
||||
}
|
||||
|
||||
// Prune the dead holders
|
||||
s.pruneDeadHolders(lock, pairs)
|
||||
|
||||
// Check if the lock is held
|
||||
if len(lock.Holders) >= lock.Limit {
|
||||
qOpts.WaitIndex = meta.LastIndex
|
||||
goto WAIT
|
||||
}
|
||||
|
||||
// Create a new lock with us as a holder
|
||||
lock.Holders[s.lockSession] = true
|
||||
newLock, err := s.encodeLock(lock, lockPair.ModifyIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Attempt the acquisition
|
||||
didSet, _, err := kv.CAS(newLock, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update lock: %v", err)
|
||||
}
|
||||
if !didSet {
|
||||
// Update failed, could have been a race with another contender,
|
||||
// retry the operation
|
||||
goto WAIT
|
||||
}
|
||||
|
||||
// Watch to ensure we maintain ownership of the slot
|
||||
lockCh := make(chan struct{})
|
||||
go s.monitorLock(s.lockSession, lockCh)
|
||||
|
||||
// Set that we own the lock
|
||||
s.isHeld = true
|
||||
|
||||
// Acquired! All done
|
||||
return lockCh, nil
|
||||
}
|
||||
|
||||
// Release is used to voluntarily give up our semaphore slot. It is
|
||||
// an error to call this if the semaphore has not been acquired.
|
||||
func (s *Semaphore) Release() error {
|
||||
// Hold the lock as we try to release
|
||||
s.l.Lock()
|
||||
defer s.l.Unlock()
|
||||
|
||||
// Ensure the lock is actually held
|
||||
if !s.isHeld {
|
||||
return ErrSemaphoreNotHeld
|
||||
}
|
||||
|
||||
// Set that we no longer own the lock
|
||||
s.isHeld = false
|
||||
|
||||
// Stop the session renew
|
||||
if s.sessionRenew != nil {
|
||||
defer func() {
|
||||
close(s.sessionRenew)
|
||||
s.sessionRenew = nil
|
||||
}()
|
||||
}
|
||||
|
||||
// Get and clear the lock session
|
||||
lockSession := s.lockSession
|
||||
s.lockSession = ""
|
||||
|
||||
// Remove ourselves as a lock holder
|
||||
kv := s.c.KV()
|
||||
key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
|
||||
READ:
|
||||
pair, _, err := kv.Get(key, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pair == nil {
|
||||
pair = &KVPair{}
|
||||
}
|
||||
lock, err := s.decodeLock(pair)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new lock without us as a holder
|
||||
if _, ok := lock.Holders[lockSession]; ok {
|
||||
delete(lock.Holders, lockSession)
|
||||
newLock, err := s.encodeLock(lock, pair.ModifyIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Swap the locks
|
||||
didSet, _, err := kv.CAS(newLock, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update lock: %v", err)
|
||||
}
|
||||
if !didSet {
|
||||
goto READ
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy the contender entry
|
||||
contenderKey := path.Join(s.opts.Prefix, lockSession)
|
||||
if _, err := kv.Delete(contenderKey, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Destroy is used to cleanup the semaphore entry. It is not necessary
|
||||
// to invoke. It will fail if the semaphore is in use.
|
||||
func (s *Semaphore) Destroy() error {
|
||||
// Hold the lock as we try to acquire
|
||||
s.l.Lock()
|
||||
defer s.l.Unlock()
|
||||
|
||||
// Check if we already hold the semaphore
|
||||
if s.isHeld {
|
||||
return ErrSemaphoreHeld
|
||||
}
|
||||
|
||||
// List for the semaphore
|
||||
kv := s.c.KV()
|
||||
pairs, _, err := kv.List(s.opts.Prefix, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read prefix: %v", err)
|
||||
}
|
||||
|
||||
// Find the lock pair, bail if it doesn't exist
|
||||
lockPair := s.findLock(pairs)
|
||||
if lockPair.ModifyIndex == 0 {
|
||||
return nil
|
||||
}
|
||||
if lockPair.Flags != SemaphoreFlagValue {
|
||||
return ErrSemaphoreConflict
|
||||
}
|
||||
|
||||
// Decode the lock
|
||||
lock, err := s.decodeLock(lockPair)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Prune the dead holders
|
||||
s.pruneDeadHolders(lock, pairs)
|
||||
|
||||
// Check if there are any holders
|
||||
if len(lock.Holders) > 0 {
|
||||
return ErrSemaphoreInUse
|
||||
}
|
||||
|
||||
// Attempt the delete
|
||||
didRemove, _, err := kv.DeleteCAS(lockPair, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove semaphore: %v", err)
|
||||
}
|
||||
if !didRemove {
|
||||
return ErrSemaphoreInUse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createSession is used to create a new managed session
|
||||
func (s *Semaphore) createSession() (string, error) {
|
||||
session := s.c.Session()
|
||||
se := &SessionEntry{
|
||||
Name: s.opts.SessionName,
|
||||
TTL: s.opts.SessionTTL,
|
||||
Behavior: SessionBehaviorDelete,
|
||||
}
|
||||
id, _, err := session.Create(se, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// contenderEntry returns a formatted KVPair for the contender
|
||||
func (s *Semaphore) contenderEntry(session string) *KVPair {
|
||||
return &KVPair{
|
||||
Key: path.Join(s.opts.Prefix, session),
|
||||
Value: s.opts.Value,
|
||||
Session: session,
|
||||
Flags: SemaphoreFlagValue,
|
||||
}
|
||||
}
|
||||
|
||||
// findLock is used to find the KV Pair which is used for coordination
|
||||
func (s *Semaphore) findLock(pairs KVPairs) *KVPair {
|
||||
key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
|
||||
for _, pair := range pairs {
|
||||
if pair.Key == key {
|
||||
return pair
|
||||
}
|
||||
}
|
||||
return &KVPair{Flags: SemaphoreFlagValue}
|
||||
}
|
||||
|
||||
// decodeLock is used to decode a semaphoreLock from an
|
||||
// entry in Consul
|
||||
func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) {
|
||||
// Handle if there is no lock
|
||||
if pair == nil || pair.Value == nil {
|
||||
return &semaphoreLock{
|
||||
Limit: s.opts.Limit,
|
||||
Holders: make(map[string]bool),
|
||||
}, nil
|
||||
}
|
||||
|
||||
l := &semaphoreLock{}
|
||||
if err := json.Unmarshal(pair.Value, l); err != nil {
|
||||
return nil, fmt.Errorf("lock decoding failed: %v", err)
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// encodeLock is used to encode a semaphoreLock into a KVPair
|
||||
// that can be PUT
|
||||
func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) {
|
||||
enc, err := json.Marshal(l)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("lock encoding failed: %v", err)
|
||||
}
|
||||
pair := &KVPair{
|
||||
Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey),
|
||||
Value: enc,
|
||||
Flags: SemaphoreFlagValue,
|
||||
ModifyIndex: oldIndex,
|
||||
}
|
||||
return pair, nil
|
||||
}
|
||||
|
||||
// pruneDeadHolders is used to remove all the dead lock holders
|
||||
func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) {
|
||||
// Gather all the live holders
|
||||
alive := make(map[string]struct{}, len(pairs))
|
||||
for _, pair := range pairs {
|
||||
if pair.Session != "" {
|
||||
alive[pair.Session] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove any holders that are dead
|
||||
for holder := range lock.Holders {
|
||||
if _, ok := alive[holder]; !ok {
|
||||
delete(lock.Holders, holder)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// monitorLock is a long running routine to monitor a semaphore ownership
|
||||
// It closes the stopCh if we lose our slot.
|
||||
func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
|
||||
defer close(stopCh)
|
||||
kv := s.c.KV()
|
||||
opts := &QueryOptions{RequireConsistent: true}
|
||||
WAIT:
|
||||
pairs, meta, err := kv.List(s.opts.Prefix, opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
lockPair := s.findLock(pairs)
|
||||
lock, err := s.decodeLock(lockPair)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s.pruneDeadHolders(lock, pairs)
|
||||
if _, ok := lock.Holders[session]; ok {
|
||||
opts.WaitIndex = meta.LastIndex
|
||||
goto WAIT
|
||||
}
|
||||
}
|
201
vendor/github.com/hashicorp/consul/api/session.go
generated
vendored
201
vendor/github.com/hashicorp/consul/api/session.go
generated
vendored
|
@ -1,201 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SessionBehaviorRelease is the default behavior and causes
|
||||
// all associated locks to be released on session invalidation.
|
||||
SessionBehaviorRelease = "release"
|
||||
|
||||
// SessionBehaviorDelete is new in Consul 0.5 and changes the
|
||||
// behavior to delete all associated locks on session invalidation.
|
||||
// It can be used in a way similar to Ephemeral Nodes in ZooKeeper.
|
||||
SessionBehaviorDelete = "delete"
|
||||
)
|
||||
|
||||
// SessionEntry represents a session in consul
|
||||
type SessionEntry struct {
|
||||
CreateIndex uint64
|
||||
ID string
|
||||
Name string
|
||||
Node string
|
||||
Checks []string
|
||||
LockDelay time.Duration
|
||||
Behavior string
|
||||
TTL string
|
||||
}
|
||||
|
||||
// Session can be used to query the Session endpoints
|
||||
type Session struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// Session returns a handle to the session endpoints
|
||||
func (c *Client) Session() *Session {
|
||||
return &Session{c}
|
||||
}
|
||||
|
||||
// CreateNoChecks is like Create but is used specifically to create
|
||||
// a session with no associated health checks.
|
||||
func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
body := make(map[string]interface{})
|
||||
body["Checks"] = []string{}
|
||||
if se != nil {
|
||||
if se.Name != "" {
|
||||
body["Name"] = se.Name
|
||||
}
|
||||
if se.Node != "" {
|
||||
body["Node"] = se.Node
|
||||
}
|
||||
if se.LockDelay != 0 {
|
||||
body["LockDelay"] = durToMsec(se.LockDelay)
|
||||
}
|
||||
if se.Behavior != "" {
|
||||
body["Behavior"] = se.Behavior
|
||||
}
|
||||
if se.TTL != "" {
|
||||
body["TTL"] = se.TTL
|
||||
}
|
||||
}
|
||||
return s.create(body, q)
|
||||
|
||||
}
|
||||
|
||||
// Create makes a new session. Providing a session entry can
|
||||
// customize the session. It can also be nil to use defaults.
|
||||
func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
var obj interface{}
|
||||
if se != nil {
|
||||
body := make(map[string]interface{})
|
||||
obj = body
|
||||
if se.Name != "" {
|
||||
body["Name"] = se.Name
|
||||
}
|
||||
if se.Node != "" {
|
||||
body["Node"] = se.Node
|
||||
}
|
||||
if se.LockDelay != 0 {
|
||||
body["LockDelay"] = durToMsec(se.LockDelay)
|
||||
}
|
||||
if len(se.Checks) > 0 {
|
||||
body["Checks"] = se.Checks
|
||||
}
|
||||
if se.Behavior != "" {
|
||||
body["Behavior"] = se.Behavior
|
||||
}
|
||||
if se.TTL != "" {
|
||||
body["TTL"] = se.TTL
|
||||
}
|
||||
}
|
||||
return s.create(obj, q)
|
||||
}
|
||||
|
||||
func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
|
||||
var out struct{ ID string }
|
||||
wm, err := s.c.write("/v1/session/create", obj, &out, q)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return out.ID, wm, nil
|
||||
}
|
||||
|
||||
// Destroy invalides a given session
|
||||
func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
|
||||
wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wm, nil
|
||||
}
|
||||
|
||||
// Renew renews the TTL on a given session
|
||||
func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
|
||||
var entries []*SessionEntry
|
||||
wm, err := s.c.write("/v1/session/renew/"+id, nil, &entries, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(entries) > 0 {
|
||||
return entries[0], wm, nil
|
||||
}
|
||||
return nil, wm, nil
|
||||
}
|
||||
|
||||
// RenewPeriodic is used to periodically invoke Session.Renew on a
|
||||
// session until a doneCh is closed. This is meant to be used in a long running
|
||||
// goroutine to ensure a session stays valid.
|
||||
func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh chan struct{}) error {
|
||||
ttl, err := time.ParseDuration(initialTTL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
waitDur := ttl / 2
|
||||
lastRenewTime := time.Now()
|
||||
var lastErr error
|
||||
for {
|
||||
if time.Since(lastRenewTime) > ttl {
|
||||
return lastErr
|
||||
}
|
||||
select {
|
||||
case <-time.After(waitDur):
|
||||
entry, _, err := s.Renew(id, q)
|
||||
if err != nil {
|
||||
waitDur = time.Second
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
if entry == nil {
|
||||
waitDur = time.Second
|
||||
lastErr = fmt.Errorf("No SessionEntry returned")
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle the server updating the TTL
|
||||
ttl, _ = time.ParseDuration(entry.TTL)
|
||||
waitDur = ttl / 2
|
||||
lastRenewTime = time.Now()
|
||||
|
||||
case <-doneCh:
|
||||
// Attempt a session destroy
|
||||
s.Destroy(id, q)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Info looks up a single session
|
||||
func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
|
||||
var entries []*SessionEntry
|
||||
qm, err := s.c.query("/v1/session/info/"+id, &entries, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(entries) > 0 {
|
||||
return entries[0], qm, nil
|
||||
}
|
||||
return nil, qm, nil
|
||||
}
|
||||
|
||||
// List gets sessions for a node
|
||||
func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
|
||||
var entries []*SessionEntry
|
||||
qm, err := s.c.query("/v1/session/node/"+node, &entries, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return entries, qm, nil
|
||||
}
|
||||
|
||||
// List gets all active sessions
|
||||
func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
|
||||
var entries []*SessionEntry
|
||||
qm, err := s.c.query("/v1/session/list", &entries, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return entries, qm, nil
|
||||
}
|
43
vendor/github.com/hashicorp/consul/api/status.go
generated
vendored
43
vendor/github.com/hashicorp/consul/api/status.go
generated
vendored
|
@ -1,43 +0,0 @@
|
|||
package api
|
||||
|
||||
// Status can be used to query the Status endpoints
|
||||
type Status struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
// Status returns a handle to the status endpoints
|
||||
func (c *Client) Status() *Status {
|
||||
return &Status{c}
|
||||
}
|
||||
|
||||
// Leader is used to query for a known leader
|
||||
func (s *Status) Leader() (string, error) {
|
||||
r := s.c.newRequest("GET", "/v1/status/leader")
|
||||
_, resp, err := requireOK(s.c.doRequest(r))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var leader string
|
||||
if err := decodeBody(resp, &leader); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return leader, nil
|
||||
}
|
||||
|
||||
// Peers is used to query for a known raft peers
|
||||
func (s *Status) Peers() ([]string, error) {
|
||||
r := s.c.newRequest("GET", "/v1/status/peers")
|
||||
_, resp, err := requireOK(s.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var peers []string
|
||||
if err := decodeBody(resp, &peers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return peers, nil
|
||||
}
|
21
vendor/github.com/json-iterator/go/LICENSE
generated
vendored
21
vendor/github.com/json-iterator/go/LICENSE
generated
vendored
|
@ -1,21 +0,0 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2016 json-iterator
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
87
vendor/github.com/json-iterator/go/README.md
generated
vendored
87
vendor/github.com/json-iterator/go/README.md
generated
vendored
|
@ -1,87 +0,0 @@
|
|||
[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge)
|
||||
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go)
|
||||
[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go)
|
||||
[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go)
|
||||
[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go)
|
||||
[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE)
|
||||
[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
|
||||
|
||||
A high-performance 100% compatible drop-in replacement of "encoding/json"
|
||||
|
||||
You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
|
||||
|
||||
# Benchmark
|
||||
|
||||
![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
|
||||
|
||||
Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
|
||||
|
||||
Raw Result (easyjson requires static code generation)
|
||||
|
||||
| | ns/op | allocation bytes | allocation times |
|
||||
| --------------- | ----------- | ---------------- | ---------------- |
|
||||
| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
|
||||
| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
|
||||
| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
|
||||
| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
|
||||
| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
|
||||
| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
|
||||
|
||||
Always benchmark with your own workload.
|
||||
The result depends heavily on the data input.
|
||||
|
||||
# Usage
|
||||
|
||||
100% compatibility with standard lib
|
||||
|
||||
Replace
|
||||
|
||||
```go
|
||||
import "encoding/json"
|
||||
json.Marshal(&data)
|
||||
```
|
||||
|
||||
with
|
||||
|
||||
```go
|
||||
import jsoniter "github.com/json-iterator/go"
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json.Marshal(&data)
|
||||
```
|
||||
|
||||
Replace
|
||||
|
||||
```go
|
||||
import "encoding/json"
|
||||
json.Unmarshal(input, &data)
|
||||
```
|
||||
|
||||
with
|
||||
|
||||
```go
|
||||
import jsoniter "github.com/json-iterator/go"
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
json.Unmarshal(input, &data)
|
||||
```
|
||||
|
||||
[More documentation](http://jsoniter.com/migrate-from-go-std.html)
|
||||
|
||||
# How to get
|
||||
|
||||
```
|
||||
go get github.com/json-iterator/go
|
||||
```
|
||||
|
||||
# Contribution Welcomed !
|
||||
|
||||
Contributors
|
||||
|
||||
- [thockin](https://github.com/thockin)
|
||||
- [mattn](https://github.com/mattn)
|
||||
- [cch123](https://github.com/cch123)
|
||||
- [Oleg Shaldybin](https://github.com/olegshaldybin)
|
||||
- [Jason Toffaletti](https://github.com/toffaletti)
|
||||
|
||||
Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
|
150
vendor/github.com/json-iterator/go/adapter.go
generated
vendored
150
vendor/github.com/json-iterator/go/adapter.go
generated
vendored
|
@ -1,150 +0,0 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// RawMessage to make replace json with jsoniter
|
||||
type RawMessage []byte
|
||||
|
||||
// Unmarshal adapts to json/encoding Unmarshal API
|
||||
//
|
||||
// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
|
||||
// Refer to https://godoc.org/encoding/json#Unmarshal for more information
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
return ConfigDefault.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
// UnmarshalFromString is a convenient method to read from string instead of []byte
|
||||
func UnmarshalFromString(str string, v interface{}) error {
|
||||
return ConfigDefault.UnmarshalFromString(str, v)
|
||||
}
|
||||
|
||||
// Get quick method to get value from deeply nested JSON structure
|
||||
func Get(data []byte, path ...interface{}) Any {
|
||||
return ConfigDefault.Get(data, path...)
|
||||
}
|
||||
|
||||
// Marshal adapts to json/encoding Marshal API
|
||||
//
|
||||
// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
|
||||
// Refer to https://godoc.org/encoding/json#Marshal for more information
|
||||
func Marshal(v interface{}) ([]byte, error) {
|
||||
return ConfigDefault.Marshal(v)
|
||||
}
|
||||
|
||||
// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
|
||||
func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
|
||||
return ConfigDefault.MarshalIndent(v, prefix, indent)
|
||||
}
|
||||
|
||||
// MarshalToString convenient method to write as string instead of []byte
|
||||
func MarshalToString(v interface{}) (string, error) {
|
||||
return ConfigDefault.MarshalToString(v)
|
||||
}
|
||||
|
||||
// NewDecoder adapts to json/stream NewDecoder API.
|
||||
//
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// Instead of a json/encoding Decoder, an Decoder is returned
|
||||
// Refer to https://godoc.org/encoding/json#NewDecoder for more information
|
||||
func NewDecoder(reader io.Reader) *Decoder {
|
||||
return ConfigDefault.NewDecoder(reader)
|
||||
}
|
||||
|
||||
// Decoder reads and decodes JSON values from an input stream.
|
||||
// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
|
||||
type Decoder struct {
|
||||
iter *Iterator
|
||||
}
|
||||
|
||||
// Decode decode JSON into interface{}
|
||||
func (adapter *Decoder) Decode(obj interface{}) error {
|
||||
if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
|
||||
if !adapter.iter.loadMore() {
|
||||
return io.EOF
|
||||
}
|
||||
}
|
||||
adapter.iter.ReadVal(obj)
|
||||
err := adapter.iter.Error
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return adapter.iter.Error
|
||||
}
|
||||
|
||||
// More is there more?
|
||||
func (adapter *Decoder) More() bool {
|
||||
iter := adapter.iter
|
||||
if iter.Error != nil {
|
||||
return false
|
||||
}
|
||||
c := iter.nextToken()
|
||||
if c == 0 {
|
||||
return false
|
||||
}
|
||||
iter.unreadByte()
|
||||
return c != ']' && c != '}'
|
||||
}
|
||||
|
||||
// Buffered remaining buffer
|
||||
func (adapter *Decoder) Buffered() io.Reader {
|
||||
remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
|
||||
return bytes.NewReader(remaining)
|
||||
}
|
||||
|
||||
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
||||
// Number instead of as a float64.
|
||||
func (adapter *Decoder) UseNumber() {
|
||||
cfg := adapter.iter.cfg.configBeforeFrozen
|
||||
cfg.UseNumber = true
|
||||
adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
|
||||
}
|
||||
|
||||
// DisallowUnknownFields causes the Decoder to return an error when the destination
|
||||
// is a struct and the input contains object keys which do not match any
|
||||
// non-ignored, exported fields in the destination.
|
||||
func (adapter *Decoder) DisallowUnknownFields() {
|
||||
cfg := adapter.iter.cfg.configBeforeFrozen
|
||||
cfg.DisallowUnknownFields = true
|
||||
adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
|
||||
}
|
||||
|
||||
// NewEncoder same as json.NewEncoder
|
||||
func NewEncoder(writer io.Writer) *Encoder {
|
||||
return ConfigDefault.NewEncoder(writer)
|
||||
}
|
||||
|
||||
// Encoder same as json.Encoder
|
||||
type Encoder struct {
|
||||
stream *Stream
|
||||
}
|
||||
|
||||
// Encode encode interface{} as JSON to io.Writer
|
||||
func (adapter *Encoder) Encode(val interface{}) error {
|
||||
adapter.stream.WriteVal(val)
|
||||
adapter.stream.WriteRaw("\n")
|
||||
adapter.stream.Flush()
|
||||
return adapter.stream.Error
|
||||
}
|
||||
|
||||
// SetIndent set the indention. Prefix is not supported
|
||||
func (adapter *Encoder) SetIndent(prefix, indent string) {
|
||||
config := adapter.stream.cfg.configBeforeFrozen
|
||||
config.IndentionStep = len(indent)
|
||||
adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
|
||||
}
|
||||
|
||||
// SetEscapeHTML escape html by default, set to false to disable
|
||||
func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
|
||||
config := adapter.stream.cfg.configBeforeFrozen
|
||||
config.EscapeHTML = escapeHTML
|
||||
adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
|
||||
}
|
||||
|
||||
// Valid reports whether data is a valid JSON encoding.
|
||||
func Valid(data []byte) bool {
|
||||
return ConfigDefault.Valid(data)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue