diff --git a/Dockerfile b/Dockerfile index 1aece5c3fb..01ab89e334 100644 --- a/Dockerfile +++ b/Dockerfile @@ -239,7 +239,7 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ # Please edit hack/dockerfile/install-binaries.sh to update them. COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/cli/command/stack/deploy.go b/cli/command/stack/deploy.go index 32ebd62d3f..f4730db556 100644 --- a/cli/command/stack/deploy.go +++ b/cli/command/stack/deploy.go @@ -11,13 +11,13 @@ import ( "github.com/spf13/cobra" "golang.org/x/net/context" - "github.com/aanand/compose-file/loader" - composetypes "github.com/aanand/compose-file/types" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/cli" "github.com/docker/docker/cli/command" "github.com/docker/docker/cli/compose/convert" + "github.com/docker/docker/cli/compose/loader" + composetypes "github.com/docker/docker/cli/compose/types" dockerclient "github.com/docker/docker/client" ) diff --git a/cli/compose/convert/compose.go b/cli/compose/convert/compose.go index e0684482b8..7c410844c7 100644 --- a/cli/compose/convert/compose.go +++ b/cli/compose/convert/compose.go @@ -1,9 +1,9 @@ package convert import ( - composetypes "github.com/aanand/compose-file/types" "github.com/docker/docker/api/types" networktypes "github.com/docker/docker/api/types/network" + composetypes "github.com/docker/docker/cli/compose/types" ) const ( diff --git a/cli/compose/convert/compose_test.go b/cli/compose/convert/compose_test.go index 8f8e8ea6d8..27a67047d8 100644 --- a/cli/compose/convert/compose_test.go +++ b/cli/compose/convert/compose_test.go @@ -3,9 +3,9 @@ package convert import ( "testing" - composetypes "github.com/aanand/compose-file/types" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" + composetypes "github.com/docker/docker/cli/compose/types" "github.com/docker/docker/pkg/testutil/assert" ) diff --git a/cli/compose/convert/service.go b/cli/compose/convert/service.go index 458b518a46..2a8ed8288d 100644 --- a/cli/compose/convert/service.go +++ b/cli/compose/convert/service.go @@ -4,9 +4,9 @@ import ( "fmt" "time" - composetypes "github.com/aanand/compose-file/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/swarm" + composetypes "github.com/docker/docker/cli/compose/types" "github.com/docker/docker/opts" runconfigopts "github.com/docker/docker/runconfig/opts" "github.com/docker/go-connections/nat" diff --git a/cli/compose/convert/service_test.go b/cli/compose/convert/service_test.go index a6884917de..45da764325 100644 --- a/cli/compose/convert/service_test.go +++ b/cli/compose/convert/service_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - composetypes "github.com/aanand/compose-file/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/swarm" + composetypes "github.com/docker/docker/cli/compose/types" "github.com/docker/docker/pkg/testutil/assert" ) diff --git a/cli/compose/convert/volume.go b/cli/compose/convert/volume.go index 3a7504106a..24442d4dc7 100644 --- a/cli/compose/convert/volume.go +++ b/cli/compose/convert/volume.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" - composetypes "github.com/aanand/compose-file/types" "github.com/docker/docker/api/types/mount" + composetypes "github.com/docker/docker/cli/compose/types" ) type volumes map[string]composetypes.VolumeConfig diff --git a/cli/compose/convert/volume_test.go b/cli/compose/convert/volume_test.go index bcbfb08b95..1132136b22 100644 --- a/cli/compose/convert/volume_test.go +++ b/cli/compose/convert/volume_test.go @@ -3,8 +3,8 @@ package convert import ( "testing" - composetypes "github.com/aanand/compose-file/types" "github.com/docker/docker/api/types/mount" + composetypes "github.com/docker/docker/cli/compose/types" "github.com/docker/docker/pkg/testutil/assert" ) diff --git a/vendor/github.com/aanand/compose-file/interpolation/interpolation.go b/cli/compose/interpolation/interpolation.go similarity index 90% rename from vendor/github.com/aanand/compose-file/interpolation/interpolation.go rename to cli/compose/interpolation/interpolation.go index 4f1b61da7b..734f28ec9d 100644 --- a/vendor/github.com/aanand/compose-file/interpolation/interpolation.go +++ b/cli/compose/interpolation/interpolation.go @@ -3,10 +3,11 @@ package interpolation import ( "fmt" - "github.com/aanand/compose-file/template" - "github.com/aanand/compose-file/types" + "github.com/docker/docker/cli/compose/template" + "github.com/docker/docker/cli/compose/types" ) +// Interpolate replaces variables in a string with the values from a mapping func Interpolate(config types.Dict, section string, mapping template.Mapping) (types.Dict, error) { out := types.Dict{} diff --git a/cli/compose/interpolation/interpolation_test.go b/cli/compose/interpolation/interpolation_test.go new file mode 100644 index 0000000000..c3921701b3 --- /dev/null +++ b/cli/compose/interpolation/interpolation_test.go @@ -0,0 +1,59 @@ +package interpolation + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/docker/docker/cli/compose/types" +) + +var defaults = map[string]string{ + "USER": "jenny", + "FOO": "bar", +} + +func defaultMapping(name string) (string, bool) { + val, ok := defaults[name] + return val, ok +} + +func TestInterpolate(t *testing.T) { + services := types.Dict{ + "servicea": types.Dict{ + "image": "example:${USER}", + "volumes": []interface{}{"$FOO:/target"}, + "logging": types.Dict{ + "driver": "${FOO}", + "options": types.Dict{ + "user": "$USER", + }, + }, + }, + } + expected := types.Dict{ + "servicea": types.Dict{ + "image": "example:jenny", + "volumes": []interface{}{"bar:/target"}, + "logging": types.Dict{ + "driver": "bar", + "options": types.Dict{ + "user": "jenny", + }, + }, + }, + } + result, err := Interpolate(services, "service", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, expected, result) +} + +func TestInvalidInterpolation(t *testing.T) { + services := types.Dict{ + "servicea": types.Dict{ + "image": "${", + }, + } + _, err := Interpolate(services, "service", defaultMapping) + assert.EqualError(t, err, `Invalid interpolation format for "image" option in service "servicea": "${"`) +} diff --git a/cli/compose/loader/example1.env b/cli/compose/loader/example1.env new file mode 100644 index 0000000000..3e7a059613 --- /dev/null +++ b/cli/compose/loader/example1.env @@ -0,0 +1,8 @@ +# passed through +FOO=1 + +# overridden in example2.env +BAR=1 + +# overridden in full-example.yml +BAZ=1 diff --git a/cli/compose/loader/example2.env b/cli/compose/loader/example2.env new file mode 100644 index 0000000000..0920d5ab05 --- /dev/null +++ b/cli/compose/loader/example2.env @@ -0,0 +1 @@ +BAR=2 diff --git a/cli/compose/loader/full-example.yml b/cli/compose/loader/full-example.yml new file mode 100644 index 0000000000..fb5686a380 --- /dev/null +++ b/cli/compose/loader/full-example.yml @@ -0,0 +1,287 @@ +version: "3" + +services: + foo: + cap_add: + - ALL + + cap_drop: + - NET_ADMIN + - SYS_ADMIN + + cgroup_parent: m-executor-abcd + + # String or list + command: bundle exec thin -p 3000 + # command: ["bundle", "exec", "thin", "-p", "3000"] + + container_name: my-web-container + + depends_on: + - db + - redis + + deploy: + mode: replicated + replicas: 6 + labels: [FOO=BAR] + update_config: + parallelism: 3 + delay: 10s + failure_action: continue + monitor: 60s + max_failure_ratio: 0.3 + resources: + limits: + cpus: '0.001' + memory: 50M + reservations: + cpus: '0.0001' + memory: 20M + restart_policy: + condition: on_failure + delay: 5s + max_attempts: 3 + window: 120s + placement: + constraints: [node=foo] + + devices: + - "/dev/ttyUSB0:/dev/ttyUSB0" + + # String or list + # dns: 8.8.8.8 + dns: + - 8.8.8.8 + - 9.9.9.9 + + # String or list + # dns_search: example.com + dns_search: + - dc1.example.com + - dc2.example.com + + domainname: foo.com + + # String or list + # entrypoint: /code/entrypoint.sh -p 3000 + entrypoint: ["/code/entrypoint.sh", "-p", "3000"] + + # String or list + # env_file: .env + env_file: + - ./example1.env + - ./example2.env + + # Mapping or list + # Mapping values can be strings, numbers or null + # Booleans are not allowed - must be quoted + environment: + RACK_ENV: development + SHOW: 'true' + SESSION_SECRET: + BAZ: 3 + # environment: + # - RACK_ENV=development + # - SHOW=true + # - SESSION_SECRET + + # Items can be strings or numbers + expose: + - "3000" + - 8000 + + external_links: + - redis_1 + - project_db_1:mysql + - project_db_1:postgresql + + # Mapping or list + # Mapping values must be strings + # extra_hosts: + # somehost: "162.242.195.82" + # otherhost: "50.31.209.229" + extra_hosts: + - "somehost:162.242.195.82" + - "otherhost:50.31.209.229" + + hostname: foo + + healthcheck: + test: echo "hello world" + interval: 10s + timeout: 1s + retries: 5 + + # Any valid image reference - repo, tag, id, sha + image: redis + # image: ubuntu:14.04 + # image: tutum/influxdb + # image: example-registry.com:4000/postgresql + # image: a4bc65fd + # image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d + + ipc: host + + # Mapping or list + # Mapping values can be strings, numbers or null + labels: + com.example.description: "Accounting webapp" + com.example.number: 42 + com.example.empty-label: + # labels: + # - "com.example.description=Accounting webapp" + # - "com.example.number=42" + # - "com.example.empty-label" + + links: + - db + - db:database + - redis + + logging: + driver: syslog + options: + syslog-address: "tcp://192.168.0.42:123" + + mac_address: 02:42:ac:11:65:43 + + # network_mode: "bridge" + # network_mode: "host" + # network_mode: "none" + # Use the network mode of an arbitrary container from another service + # network_mode: "service:db" + # Use the network mode of another container, specified by name or id + # network_mode: "container:some-container" + network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b" + + networks: + some-network: + aliases: + - alias1 + - alias3 + other-network: + ipv4_address: 172.16.238.10 + ipv6_address: 2001:3984:3989::10 + other-other-network: + + pid: "host" + + ports: + - 3000 + - "3000-3005" + - "8000:8000" + - "9090-9091:8080-8081" + - "49100:22" + - "127.0.0.1:8001:8001" + - "127.0.0.1:5000-5010:5000-5010" + + privileged: true + + read_only: true + + restart: always + + security_opt: + - label=level:s0:c100,c200 + - label=type:svirt_apache_t + + stdin_open: true + + stop_grace_period: 20s + + stop_signal: SIGUSR1 + + # String or list + # tmpfs: /run + tmpfs: + - /run + - /tmp + + tty: true + + ulimits: + # Single number or mapping with soft + hard limits + nproc: 65535 + nofile: + soft: 20000 + hard: 40000 + + user: someone + + volumes: + # Just specify a path and let the Engine create a volume + - /var/lib/mysql + # Specify an absolute path mapping + - /opt/data:/var/lib/mysql + # Path on the host, relative to the Compose file + - .:/code + - ./static:/var/www/html + # User-relative path + - ~/configs:/etc/configs/:ro + # Named volume + - datavolume:/var/lib/mysql + + working_dir: /code + +networks: + # Entries can be null, which specifies simply that a network + # called "{project name}_some-network" should be created and + # use the default driver + some-network: + + other-network: + driver: overlay + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + ipam: + driver: overlay + # driver_opts: + # # Values can be strings or numbers + # com.docker.network.enable_ipv6: "true" + # com.docker.network.numeric_value: 1 + config: + - subnet: 172.16.238.0/24 + # gateway: 172.16.238.1 + - subnet: 2001:3984:3989::/64 + # gateway: 2001:3984:3989::1 + + external-network: + # Specifies that a pre-existing network called "external-network" + # can be referred to within this file as "external-network" + external: true + + other-external-network: + # Specifies that a pre-existing network called "my-cool-network" + # can be referred to within this file as "other-external-network" + external: + name: my-cool-network + +volumes: + # Entries can be null, which specifies simply that a volume + # called "{project name}_some-volume" should be created and + # use the default driver + some-volume: + + other-volume: + driver: flocker + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + external-volume: + # Specifies that a pre-existing volume called "external-volume" + # can be referred to within this file as "external-volume" + external: true + + other-external-volume: + # Specifies that a pre-existing volume called "my-cool-volume" + # can be referred to within this file as "other-external-volume" + external: + name: my-cool-volume diff --git a/vendor/github.com/aanand/compose-file/loader/loader.go b/cli/compose/loader/loader.go similarity index 96% rename from vendor/github.com/aanand/compose-file/loader/loader.go rename to cli/compose/loader/loader.go index bd25a843a3..9e46b97594 100644 --- a/vendor/github.com/aanand/compose-file/loader/loader.go +++ b/cli/compose/loader/loader.go @@ -9,9 +9,9 @@ import ( "sort" "strings" - "github.com/aanand/compose-file/interpolation" - "github.com/aanand/compose-file/schema" - "github.com/aanand/compose-file/types" + "github.com/docker/docker/cli/compose/interpolation" + "github.com/docker/docker/cli/compose/schema" + "github.com/docker/docker/cli/compose/types" "github.com/docker/docker/runconfig/opts" units "github.com/docker/go-units" shellwords "github.com/mattn/go-shellwords" @@ -117,6 +117,8 @@ func Load(configDetails types.ConfigDetails) (*types.Config, error) { return &cfg, nil } +// GetUnsupportedProperties returns the list of any unsupported properties that are +// used in the Compose files. func GetUnsupportedProperties(configDetails types.ConfigDetails) []string { unsupported := map[string]bool{} @@ -141,6 +143,8 @@ func sortedKeys(set map[string]bool) []string { return keys } +// GetDeprecatedProperties returns the list of any deprecated properties that +// are used in the compose files. func GetDeprecatedProperties(configDetails types.ConfigDetails) map[string]string { return getProperties(getServices(getConfigDict(configDetails)), types.DeprecatedProperties) } @@ -161,6 +165,8 @@ func getProperties(services types.Dict, propertyMap map[string]string) map[strin return output } +// ForbiddenPropertiesError is returned when there are properties in the Compose +// file that are forbidden. type ForbiddenPropertiesError struct { Properties map[string]string } diff --git a/cli/compose/loader/loader_test.go b/cli/compose/loader/loader_test.go new file mode 100644 index 0000000000..e15be7c549 --- /dev/null +++ b/cli/compose/loader/loader_test.go @@ -0,0 +1,782 @@ +package loader + +import ( + "fmt" + "io/ioutil" + "os" + "sort" + "testing" + "time" + + "github.com/docker/docker/cli/compose/types" + "github.com/stretchr/testify/assert" +) + +func buildConfigDetails(source types.Dict) types.ConfigDetails { + workingDir, err := os.Getwd() + if err != nil { + panic(err) + } + + return types.ConfigDetails{ + WorkingDir: workingDir, + ConfigFiles: []types.ConfigFile{ + {Filename: "filename.yml", Config: source}, + }, + Environment: nil, + } +} + +var sampleYAML = ` +version: "3" +services: + foo: + image: busybox + networks: + with_me: + bar: + image: busybox + environment: + - FOO=1 + networks: + - with_ipam +volumes: + hello: + driver: default + driver_opts: + beep: boop +networks: + default: + driver: bridge + driver_opts: + beep: boop + with_ipam: + ipam: + driver: default + config: + - subnet: 172.28.0.0/16 +` + +var sampleDict = types.Dict{ + "version": "3", + "services": types.Dict{ + "foo": types.Dict{ + "image": "busybox", + "networks": types.Dict{"with_me": nil}, + }, + "bar": types.Dict{ + "image": "busybox", + "environment": []interface{}{"FOO=1"}, + "networks": []interface{}{"with_ipam"}, + }, + }, + "volumes": types.Dict{ + "hello": types.Dict{ + "driver": "default", + "driver_opts": types.Dict{ + "beep": "boop", + }, + }, + }, + "networks": types.Dict{ + "default": types.Dict{ + "driver": "bridge", + "driver_opts": types.Dict{ + "beep": "boop", + }, + }, + "with_ipam": types.Dict{ + "ipam": types.Dict{ + "driver": "default", + "config": []interface{}{ + types.Dict{ + "subnet": "172.28.0.0/16", + }, + }, + }, + }, + }, +} + +var sampleConfig = types.Config{ + Services: []types.ServiceConfig{ + { + Name: "foo", + Image: "busybox", + Environment: map[string]string{}, + Networks: map[string]*types.ServiceNetworkConfig{ + "with_me": nil, + }, + }, + { + Name: "bar", + Image: "busybox", + Environment: map[string]string{"FOO": "1"}, + Networks: map[string]*types.ServiceNetworkConfig{ + "with_ipam": nil, + }, + }, + }, + Networks: map[string]types.NetworkConfig{ + "default": { + Driver: "bridge", + DriverOpts: map[string]string{ + "beep": "boop", + }, + }, + "with_ipam": { + Ipam: types.IPAMConfig{ + Driver: "default", + Config: []*types.IPAMPool{ + { + Subnet: "172.28.0.0/16", + }, + }, + }, + }, + }, + Volumes: map[string]types.VolumeConfig{ + "hello": { + Driver: "default", + DriverOpts: map[string]string{ + "beep": "boop", + }, + }, + }, +} + +func TestParseYAML(t *testing.T) { + dict, err := ParseYAML([]byte(sampleYAML)) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, sampleDict, dict) +} + +func TestLoad(t *testing.T) { + actual, err := Load(buildConfigDetails(sampleDict)) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services)) + assert.Equal(t, sampleConfig.Networks, actual.Networks) + assert.Equal(t, sampleConfig.Volumes, actual.Volumes) +} + +func TestParseAndLoad(t *testing.T) { + actual, err := loadYAML(sampleYAML) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services)) + assert.Equal(t, sampleConfig.Networks, actual.Networks) + assert.Equal(t, sampleConfig.Volumes, actual.Volumes) +} + +func TestInvalidTopLevelObjectType(t *testing.T) { + _, err := loadYAML("1") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Top-level object must be a mapping") + + _, err = loadYAML("\"hello\"") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Top-level object must be a mapping") + + _, err = loadYAML("[\"hello\"]") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Top-level object must be a mapping") +} + +func TestNonStringKeys(t *testing.T) { + _, err := loadYAML(` +version: "3" +123: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key at top level: 123") + + _, err = loadYAML(` +version: "3" +services: + foo: + image: busybox + 123: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key in services: 123") + + _, err = loadYAML(` +version: "3" +services: + foo: + image: busybox +networks: + default: + ipam: + config: + - 123: oh dear +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key in networks.default.ipam.config[0]: 123") + + _, err = loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: + 1: FOO +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key in services.dict-env.environment: 1") +} + +func TestSupportedVersion(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + foo: + image: busybox +`) + assert.NoError(t, err) + + _, err = loadYAML(` +version: "3.0" +services: + foo: + image: busybox +`) + assert.NoError(t, err) +} + +func TestUnsupportedVersion(t *testing.T) { + _, err := loadYAML(` +version: "2" +services: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "version") + + _, err = loadYAML(` +version: "2.0" +services: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "version") +} + +func TestInvalidVersion(t *testing.T) { + _, err := loadYAML(` +version: 3 +services: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "version must be a string") +} + +func TestV1Unsupported(t *testing.T) { + _, err := loadYAML(` +foo: + image: busybox +`) + assert.Error(t, err) +} + +func TestNonMappingObject(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + - foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services must be a mapping") + + _, err = loadYAML(` +version: "3" +services: + foo: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.foo must be a mapping") + + _, err = loadYAML(` +version: "3" +networks: + - default: + driver: bridge +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "networks must be a mapping") + + _, err = loadYAML(` +version: "3" +networks: + default: bridge +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "networks.default must be a mapping") + + _, err = loadYAML(` +version: "3" +volumes: + - data: + driver: local +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "volumes must be a mapping") + + _, err = loadYAML(` +version: "3" +volumes: + data: local +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "volumes.data must be a mapping") +} + +func TestNonStringImage(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + foo: + image: ["busybox", "latest"] +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.foo.image must be a string") +} + +func TestValidEnvironment(t *testing.T) { + config, err := loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: + FOO: "1" + BAR: 2 + BAZ: 2.5 + QUUX: + list-env: + image: busybox + environment: + - FOO=1 + - BAR=2 + - BAZ=2.5 + - QUUX= +`) + assert.NoError(t, err) + + expected := map[string]string{ + "FOO": "1", + "BAR": "2", + "BAZ": "2.5", + "QUUX": "", + } + + assert.Equal(t, 2, len(config.Services)) + + for _, service := range config.Services { + assert.Equal(t, expected, service.Environment) + } +} + +func TestInvalidEnvironmentValue(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: + FOO: ["1"] +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.dict-env.environment.FOO must be a string, number or null") +} + +func TestInvalidEnvironmentObject(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: "FOO=1" +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.dict-env.environment must be a mapping") +} + +func TestEnvironmentInterpolation(t *testing.T) { + config, err := loadYAML(` +version: "3" +services: + test: + image: busybox + labels: + - home1=$HOME + - home2=${HOME} + - nonexistent=$NONEXISTENT + - default=${NONEXISTENT-default} +networks: + test: + driver: $HOME +volumes: + test: + driver: $HOME +`) + + assert.NoError(t, err) + + home := os.Getenv("HOME") + + expectedLabels := map[string]string{ + "home1": home, + "home2": home, + "nonexistent": "", + "default": "default", + } + + assert.Equal(t, expectedLabels, config.Services[0].Labels) + assert.Equal(t, home, config.Networks["test"].Driver) + assert.Equal(t, home, config.Volumes["test"].Driver) +} + +func TestUnsupportedProperties(t *testing.T) { + dict, err := ParseYAML([]byte(` +version: "3" +services: + web: + image: web + build: ./web + links: + - bar + db: + image: db + build: ./db +`)) + assert.NoError(t, err) + + configDetails := buildConfigDetails(dict) + + _, err = Load(configDetails) + assert.NoError(t, err) + + unsupported := GetUnsupportedProperties(configDetails) + assert.Equal(t, []string{"build", "links"}, unsupported) +} + +func TestDeprecatedProperties(t *testing.T) { + dict, err := ParseYAML([]byte(` +version: "3" +services: + web: + image: web + container_name: web + db: + image: db + container_name: db + expose: ["5434"] +`)) + assert.NoError(t, err) + + configDetails := buildConfigDetails(dict) + + _, err = Load(configDetails) + assert.NoError(t, err) + + deprecated := GetDeprecatedProperties(configDetails) + assert.Equal(t, 2, len(deprecated)) + assert.Contains(t, deprecated, "container_name") + assert.Contains(t, deprecated, "expose") +} + +func TestForbiddenProperties(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + foo: + image: busybox + volumes: + - /data + volume_driver: some-driver + bar: + extends: + service: foo +`) + + assert.Error(t, err) + assert.IsType(t, &ForbiddenPropertiesError{}, err) + fmt.Println(err) + forbidden := err.(*ForbiddenPropertiesError).Properties + + assert.Equal(t, 2, len(forbidden)) + assert.Contains(t, forbidden, "volume_driver") + assert.Contains(t, forbidden, "extends") +} + +func durationPtr(value time.Duration) *time.Duration { + return &value +} + +func int64Ptr(value int64) *int64 { + return &value +} + +func uint64Ptr(value uint64) *uint64 { + return &value +} + +func TestFullExample(t *testing.T) { + bytes, err := ioutil.ReadFile("full-example.yml") + assert.NoError(t, err) + + config, err := loadYAML(string(bytes)) + if !assert.NoError(t, err) { + return + } + + workingDir, err := os.Getwd() + assert.NoError(t, err) + + homeDir := os.Getenv("HOME") + stopGracePeriod := time.Duration(20 * time.Second) + + expectedServiceConfig := types.ServiceConfig{ + Name: "foo", + + CapAdd: []string{"ALL"}, + CapDrop: []string{"NET_ADMIN", "SYS_ADMIN"}, + CgroupParent: "m-executor-abcd", + Command: []string{"bundle", "exec", "thin", "-p", "3000"}, + ContainerName: "my-web-container", + DependsOn: []string{"db", "redis"}, + Deploy: types.DeployConfig{ + Mode: "replicated", + Replicas: uint64Ptr(6), + Labels: map[string]string{"FOO": "BAR"}, + UpdateConfig: &types.UpdateConfig{ + Parallelism: uint64Ptr(3), + Delay: time.Duration(10 * time.Second), + FailureAction: "continue", + Monitor: time.Duration(60 * time.Second), + MaxFailureRatio: 0.3, + }, + Resources: types.Resources{ + Limits: &types.Resource{ + NanoCPUs: "0.001", + MemoryBytes: 50 * 1024 * 1024, + }, + Reservations: &types.Resource{ + NanoCPUs: "0.0001", + MemoryBytes: 20 * 1024 * 1024, + }, + }, + RestartPolicy: &types.RestartPolicy{ + Condition: "on_failure", + Delay: durationPtr(5 * time.Second), + MaxAttempts: uint64Ptr(3), + Window: durationPtr(2 * time.Minute), + }, + Placement: types.Placement{ + Constraints: []string{"node=foo"}, + }, + }, + Devices: []string{"/dev/ttyUSB0:/dev/ttyUSB0"}, + DNS: []string{"8.8.8.8", "9.9.9.9"}, + DNSSearch: []string{"dc1.example.com", "dc2.example.com"}, + DomainName: "foo.com", + Entrypoint: []string{"/code/entrypoint.sh", "-p", "3000"}, + Environment: map[string]string{ + "RACK_ENV": "development", + "SHOW": "true", + "SESSION_SECRET": "", + "FOO": "1", + "BAR": "2", + "BAZ": "3", + }, + Expose: []string{"3000", "8000"}, + ExternalLinks: []string{ + "redis_1", + "project_db_1:mysql", + "project_db_1:postgresql", + }, + ExtraHosts: map[string]string{ + "otherhost": "50.31.209.229", + "somehost": "162.242.195.82", + }, + HealthCheck: &types.HealthCheckConfig{ + Test: []string{ + "CMD-SHELL", + "echo \"hello world\"", + }, + Interval: "10s", + Timeout: "1s", + Retries: uint64Ptr(5), + }, + Hostname: "foo", + Image: "redis", + Ipc: "host", + Labels: map[string]string{ + "com.example.description": "Accounting webapp", + "com.example.number": "42", + "com.example.empty-label": "", + }, + Links: []string{ + "db", + "db:database", + "redis", + }, + Logging: &types.LoggingConfig{ + Driver: "syslog", + Options: map[string]string{ + "syslog-address": "tcp://192.168.0.42:123", + }, + }, + MacAddress: "02:42:ac:11:65:43", + NetworkMode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b", + Networks: map[string]*types.ServiceNetworkConfig{ + "some-network": { + Aliases: []string{"alias1", "alias3"}, + Ipv4Address: "", + Ipv6Address: "", + }, + "other-network": { + Ipv4Address: "172.16.238.10", + Ipv6Address: "2001:3984:3989::10", + }, + "other-other-network": nil, + }, + Pid: "host", + Ports: []string{ + "3000", + "3000-3005", + "8000:8000", + "9090-9091:8080-8081", + "49100:22", + "127.0.0.1:8001:8001", + "127.0.0.1:5000-5010:5000-5010", + }, + Privileged: true, + ReadOnly: true, + Restart: "always", + SecurityOpt: []string{ + "label=level:s0:c100,c200", + "label=type:svirt_apache_t", + }, + StdinOpen: true, + StopSignal: "SIGUSR1", + StopGracePeriod: &stopGracePeriod, + Tmpfs: []string{"/run", "/tmp"}, + Tty: true, + Ulimits: map[string]*types.UlimitsConfig{ + "nproc": { + Single: 65535, + }, + "nofile": { + Soft: 20000, + Hard: 40000, + }, + }, + User: "someone", + Volumes: []string{ + "/var/lib/mysql", + "/opt/data:/var/lib/mysql", + fmt.Sprintf("%s:/code", workingDir), + fmt.Sprintf("%s/static:/var/www/html", workingDir), + fmt.Sprintf("%s/configs:/etc/configs/:ro", homeDir), + "datavolume:/var/lib/mysql", + }, + WorkingDir: "/code", + } + + assert.Equal(t, []types.ServiceConfig{expectedServiceConfig}, config.Services) + + expectedNetworkConfig := map[string]types.NetworkConfig{ + "some-network": {}, + + "other-network": { + Driver: "overlay", + DriverOpts: map[string]string{ + "foo": "bar", + "baz": "1", + }, + Ipam: types.IPAMConfig{ + Driver: "overlay", + Config: []*types.IPAMPool{ + {Subnet: "172.16.238.0/24"}, + {Subnet: "2001:3984:3989::/64"}, + }, + }, + }, + + "external-network": { + External: types.External{ + Name: "external-network", + External: true, + }, + }, + + "other-external-network": { + External: types.External{ + Name: "my-cool-network", + External: true, + }, + }, + } + + assert.Equal(t, expectedNetworkConfig, config.Networks) + + expectedVolumeConfig := map[string]types.VolumeConfig{ + "some-volume": {}, + "other-volume": { + Driver: "flocker", + DriverOpts: map[string]string{ + "foo": "bar", + "baz": "1", + }, + }, + "external-volume": { + External: types.External{ + Name: "external-volume", + External: true, + }, + }, + "other-external-volume": { + External: types.External{ + Name: "my-cool-volume", + External: true, + }, + }, + } + + assert.Equal(t, expectedVolumeConfig, config.Volumes) +} + +func loadYAML(yaml string) (*types.Config, error) { + dict, err := ParseYAML([]byte(yaml)) + if err != nil { + return nil, err + } + + return Load(buildConfigDetails(dict)) +} + +func serviceSort(services []types.ServiceConfig) []types.ServiceConfig { + sort.Sort(servicesByName(services)) + return services +} + +type servicesByName []types.ServiceConfig + +func (sbn servicesByName) Len() int { return len(sbn) } +func (sbn servicesByName) Swap(i, j int) { sbn[i], sbn[j] = sbn[j], sbn[i] } +func (sbn servicesByName) Less(i, j int) bool { return sbn[i].Name < sbn[j].Name } diff --git a/cli/compose/schema/bindata.go b/cli/compose/schema/bindata.go new file mode 100644 index 0000000000..2acc7d29f1 --- /dev/null +++ b/cli/compose/schema/bindata.go @@ -0,0 +1,237 @@ +// Code generated by go-bindata. +// sources: +// data/config_schema_v3.0.json +// DO NOT EDIT! + +package schema + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _dataConfig_schema_v30Json = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5a\x4d\x8f\xdb\x36\x13\xbe\xfb\x57\x08\x4a\x6e\xf1\xee\x06\x78\x83\x17\x68\x6e\x3d\xf6\xd4\x9e\xbb\x50\x04\x5a\x1a\xdb\xcc\x52\x24\x33\xa4\x9c\x75\x02\xff\xf7\x82\xfa\xb2\x48\x93\xa2\x6c\x2b\x4d\x0e\xbd\x2c\xd6\xe2\xcc\x70\xbe\xf8\xcc\x70\xa4\xef\xab\x24\x49\xdf\xaa\x62\x0f\x15\x49\x3f\x26\xe9\x5e\x6b\xf9\xf1\xe9\xe9\xb3\x12\xfc\xa1\x7d\xfa\x28\x70\xf7\x54\x22\xd9\xea\x87\xf7\x1f\x9e\xda\x67\x6f\xd2\xb5\xe1\xa3\xa5\x61\x29\x04\xdf\xd2\x5d\xde\xae\xe4\x87\xff\x3d\xbe\x7f\x34\xec\x2d\x89\x3e\x4a\x30\x44\x62\xf3\x19\x0a\xdd\x3e\x43\xf8\x52\x53\x04\xc3\xfc\x9c\x1e\x00\x15\x15\x3c\xcd\xd6\x2b\xb3\x26\x51\x48\x40\x4d\x41\xa5\x1f\x13\xa3\x5c\x92\x0c\x24\xfd\x83\x91\x58\xa5\x91\xf2\x5d\xda\x3c\x3e\x35\x12\x92\x24\x55\x80\x07\x5a\x8c\x24\x0c\xaa\xbe\x79\x3a\xcb\x7f\x1a\xc8\xd6\xae\xd4\x91\xb2\xcd\x73\x49\xb4\x06\xe4\x7f\x5d\xea\xd6\x2c\x7f\x7a\x26\x0f\xdf\x7e\x7f\xf8\xfb\xfd\xc3\x6f\x8f\xf9\x43\xf6\xee\xad\xb5\x6c\xfc\x8b\xb0\x6d\xb7\x2f\x61\x4b\x39\xd5\x54\xf0\x61\xff\x74\xa0\x3c\x75\xff\x9d\x86\x8d\x49\x59\x36\xc4\x84\x59\x7b\x6f\x09\x53\x60\xdb\xcc\x41\x7f\x15\xf8\x12\xb3\x79\x20\xfb\x49\x36\x77\xfb\x7b\x6c\xb6\xcd\x39\x08\x56\x57\xd1\x08\xf6\x54\x3f\xc9\x98\x76\xfb\xfb\xe2\xb7\xea\x8d\x9e\xa4\x6d\x29\x46\x7b\x37\x0a\x5a\xd9\xee\x73\x95\x2f\xdb\xc2\xbe\x1a\x9c\x15\xf0\x52\x09\x92\x89\xa3\x79\x16\xf0\x47\x4b\x50\x01\xd7\xe9\xe0\x82\x24\x49\x37\x35\x65\xa5\xeb\x51\xc1\xe1\x4f\x23\xe2\x79\xf4\x30\x49\xbe\xbb\x07\x7b\x24\xa7\x59\xb7\x7e\x85\x03\x3e\xac\x07\x6c\x19\xd6\x0b\xc1\x35\xbc\xea\xc6\xa8\xe9\xad\x5b\x17\x88\xe2\x05\x70\x4b\x19\xcc\xe5\x20\xb8\x53\x13\x2e\x63\x54\xe9\x5c\x60\x5e\xd2\x42\xa7\x27\x87\xfd\x42\x5e\x3c\x9f\x06\xd6\xd1\xaf\x6c\xe5\x11\x98\x16\x44\xe6\xa4\x2c\x2d\x3b\x08\x22\x39\xa6\xeb\x24\xa5\x1a\x2a\xe5\x37\x31\x49\x6b\x4e\xbf\xd4\xf0\x47\x47\xa2\xb1\x06\x57\x6e\x89\x42\x2e\x2f\x78\x87\xa2\x96\xb9\x24\x68\x12\x6c\xda\xfd\x69\x21\xaa\x8a\xf0\xa5\xb2\xee\x1a\x3b\x66\x78\x5e\x70\x4d\x28\x07\xcc\x39\xa9\x62\x89\x64\x4e\x1d\xf0\x52\xe5\x6d\xfd\x9b\x4c\xa3\x6d\xde\xf2\x2b\x47\xc0\x50\x0c\x17\x8d\x47\xc9\xa7\x12\xbb\x15\x63\x52\xdb\xe8\x96\x3a\x8c\xb9\x02\x82\xc5\xfe\x46\x7e\x51\x11\xca\xe7\xf8\x0e\xb8\xc6\xa3\x14\xb4\xcd\x97\x5f\x2e\x11\x80\x1f\xf2\x01\x4b\xae\x76\x03\xf0\x03\x45\xc1\xab\xfe\x34\xcc\x01\x98\x01\xe4\x0d\xff\xab\x14\x0a\x5c\xc7\x38\x06\x8e\x97\x06\x53\x2d\x9f\xf4\x1c\xcf\xbd\xe1\xeb\x24\xe5\x75\xb5\x01\x34\x2d\x9d\x45\xb9\x15\x58\x11\xa3\x6c\xbf\xf7\x68\xd9\xf2\xb4\x27\xf3\xc6\x0e\x1c\xdb\x60\xca\x3a\x61\x39\xa3\xfc\x65\xf9\x14\x87\x57\x8d\x24\xdf\x0b\xa5\xe7\x63\xf8\x88\x7d\x0f\x84\xe9\x7d\xb1\x87\xe2\x65\x82\x7d\x4c\x65\x71\x0b\xa5\xe7\x24\x39\xad\xc8\x2e\x4e\x24\x8b\x18\x09\x23\x1b\x60\x37\xd9\xb9\xa8\xf3\x47\x62\xc5\x6e\x67\x48\x43\x19\x77\xd1\xb9\x74\xcb\xb1\x9a\x5f\x22\x3d\x00\xce\x2d\xe0\x42\x9e\x1b\x2e\x77\x31\xde\x80\x24\xf1\xee\xd3\x22\xfd\xf4\xd8\x36\x9f\x13\xa7\xaa\xf9\x8f\xb1\x34\x73\xdb\x85\xc4\xa9\xfb\xbe\x27\x8e\x85\xf3\x1a\x0a\x2b\x2a\x15\x29\x4c\xdf\x80\xa0\x02\x71\x3d\x93\x76\xcd\x7e\x5e\x89\x32\x94\xa0\x17\xc4\xae\x6f\x82\x48\x7d\x75\x21\x4c\x6e\xea\x1f\x67\x85\x2e\x7a\x81\x88\x58\x13\x52\x6f\xae\x9a\x67\x75\xe3\x29\xd6\xd0\x11\x46\x89\x82\xf8\x61\x0f\x3a\xd2\x92\x46\xe5\xe1\xc3\xcc\x9c\xf0\xf1\xfe\x7f\x92\x37\xc0\x1a\x94\x39\xbf\x47\x8e\x88\x3a\xab\xd2\x1c\x37\x9f\x22\x59\xe4\xb4\xfd\xe0\x16\x5e\xd2\x32\x8c\x15\x0d\x42\x8c\x0f\x98\x14\xa8\x2f\x4e\xd7\xbf\x53\xee\xdb\xad\xef\xae\xf6\x12\xe9\x81\x32\xd8\x81\x7d\x6b\xd9\x08\xc1\x80\x70\x0b\x7a\x10\x48\x99\x0b\xce\x8e\x33\x28\x95\x26\x18\xbd\x50\x28\x28\x6a\xa4\xfa\x98\x0b\xa9\x17\xef\x33\xd4\xbe\xca\x15\xfd\x06\x76\x34\xcf\x78\xdf\x09\xca\x2c\x1e\x5d\x52\x9e\x0b\x09\x3c\x6a\xa2\xd2\x42\xe6\x8a\xee\x38\x61\x51\x33\x0d\xe9\x0e\x49\x01\xb9\x04\xa4\xa2\xf4\x31\xac\xc7\xb1\x2d\x6b\x24\x26\x9f\x2d\x31\xba\x92\xdb\x1b\x6f\x07\x5a\xc7\x63\x56\x33\x5a\xd1\x70\x32\x7b\x50\x72\x06\x90\xb7\x20\xee\xc7\xee\x09\xdc\x3e\x6b\x4a\xb9\x86\x1d\xa0\x0f\xee\x26\x5a\x87\xe9\xce\x61\x46\xcb\xb0\x27\x68\x47\x69\x42\x8f\x86\x41\x89\xad\xf6\x33\xf8\x1a\x0a\xaf\x5e\xd6\x04\xb7\x91\xb7\xee\x14\xc9\xbc\xf4\x57\x61\xb2\xab\x46\x16\x84\xc5\x93\x17\x16\x6b\x15\xed\xee\xc6\xf3\xc5\x45\x4f\xb2\x69\x61\x4c\x66\x97\xd4\xaf\xc2\xca\x51\xf7\x8a\x09\xaf\x73\x9b\xe8\x05\xf8\x66\x7d\x63\x52\x77\xde\xf7\x3c\x24\x5c\x5f\x25\xce\x53\xd2\xc0\xe0\xcf\xe4\x07\x1e\x2c\xf0\xf0\xf9\x54\xd3\x0a\x44\xad\x23\x54\x08\x1a\xa9\xe3\xf9\x0e\xe9\x2c\x61\xa0\x7e\xcd\x4b\x7b\x49\x15\xd9\x38\xf3\xbf\x01\xa3\x6e\x0a\x6f\x72\x1e\xae\xf6\x97\xf9\xa9\xe0\x8e\x28\x17\x88\xed\x44\x6f\x3e\x0a\x99\x64\xb4\x20\x2a\x86\x32\x77\x5c\x21\x6b\x59\x12\x0d\x79\xfb\x2a\xe9\x2a\x5c\x9f\x00\x74\x49\x90\x30\x06\x8c\xaa\x6a\x0e\x40\xa6\x25\x30\x72\xbc\xa9\xe0\x35\xec\x5b\x42\x59\x8d\x90\x93\x42\x77\x6f\xab\x22\x99\x99\x56\x82\x53\x2d\xbc\x48\x31\x6f\xcb\x8a\xbc\xe6\xfd\xb6\x0d\x89\xf7\x58\x05\x1b\xaf\xb9\xb7\xbf\x51\x26\x28\x51\x63\x71\xe1\xec\x9b\x43\x74\x2e\xe4\x81\x8c\xe9\x77\xbc\x30\x1d\x41\x19\x50\x1a\x2e\xe7\x51\xfe\x68\xdd\xe8\x3a\xc1\x5c\x0a\x46\x8b\xe3\x52\x16\x16\x82\xb7\x4e\x9e\x93\x10\x77\x66\xa0\x49\x07\xd3\xe7\x54\x52\x47\x0f\x6b\xc3\xf0\x95\xf2\x52\x7c\xbd\x62\xc3\xe5\x52\x49\x32\x52\x80\x83\x77\xf7\x3a\x5a\x69\x24\x94\xeb\xab\xcb\xfa\xbd\x66\xdd\x51\xd5\x87\xfc\x8c\xa0\xfe\x40\x17\x7f\xd7\x19\x40\xfa\x42\xd6\xd1\x89\x4d\x05\x95\x40\x6f\x02\x2e\xf0\x6e\x3a\x66\x62\x4f\xb6\x40\x55\x9b\x35\xe2\xeb\xa8\xcc\x8d\x6e\xf1\xab\x44\x7c\x8c\x97\xc5\x01\x89\x4a\x52\x2d\x75\x3a\x66\x0f\x3d\x53\x6f\x0d\x4e\xa6\x87\x05\x49\x78\x60\x10\xd3\x3a\xae\x7b\x47\xa1\xea\x0d\x07\xff\x3d\xfd\xf2\x0a\xe1\x7b\x13\x3b\xff\x0e\x72\x0a\xdf\x38\xee\x03\xbd\xfe\x7d\x45\x20\xaa\xcf\x43\x27\xb9\x1e\x7c\x95\xcd\x0e\x71\xf0\x65\xc1\x72\xfa\x5f\xd9\xe0\xdd\x81\x19\xdd\xb7\x15\x11\xc8\xe8\xa8\xfe\x43\x8c\x5f\x26\xbf\x26\x8a\xe2\x8d\xb7\x83\x2b\x92\xc6\x19\x2b\x8d\x92\xe7\xf2\xea\x38\x15\xe7\xd9\x43\xf1\x8e\x23\xb3\xd5\x70\xc9\x3c\xdf\xad\xd9\x10\x3a\x35\x71\xe8\x49\x02\x43\x52\x67\xd3\xce\x79\xd3\x96\x2f\x98\xb6\x8f\xef\x26\x0a\xc5\xd4\xcb\xab\x1f\x84\xb0\x0b\x4c\x73\xfc\x31\x75\xba\xcb\xde\xbb\x97\x1f\x5f\x05\x90\x6a\xc4\x7f\xf1\x29\x96\xb1\x93\x1f\x2f\x46\x1b\xdf\xed\x31\x5b\xfb\x19\x55\x66\xf9\xc7\x21\x69\x5f\x05\x8f\x70\x22\x1b\x37\xdc\xa1\x30\x7a\x3f\xd0\x72\x87\x7c\xfd\x87\x52\xd9\xf4\x61\x5f\xf5\x7f\x4f\xab\xd3\xea\x9f\x00\x00\x00\xff\xff\xd1\xeb\xc9\xb9\x5c\x2a\x00\x00") + +func dataConfig_schema_v30JsonBytes() ([]byte, error) { + return bindataRead( + _dataConfig_schema_v30Json, + "data/config_schema_v3.0.json", + ) +} + +func dataConfig_schema_v30Json() (*asset, error) { + bytes, err := dataConfig_schema_v30JsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/config_schema_v3.0.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "data/config_schema_v3.0.json": dataConfig_schema_v30Json, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} +var _bintree = &bintree{nil, map[string]*bintree{ + "data": &bintree{nil, map[string]*bintree{ + "config_schema_v3.0.json": &bintree{dataConfig_schema_v30Json, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} + diff --git a/cli/compose/schema/data/config_schema_v3.0.json b/cli/compose/schema/data/config_schema_v3.0.json new file mode 100644 index 0000000000..520e57d5e2 --- /dev/null +++ b/cli/compose/schema/data/config_schema_v3.0.json @@ -0,0 +1,379 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "config_schema_v3.0.json", + "type": "object", + "required": ["version"], + + "properties": { + "version": { + "type": "string" + }, + + "services": { + "id": "#/properties/services", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false + }, + + "networks": { + "id": "#/properties/networks", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + } + }, + + "volumes": { + "id": "#/properties/volumes", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false + } + }, + + "additionalProperties": false, + + "definitions": { + + "service": { + "id": "#/definitions/service", + "type": "object", + + "properties": { + "deploy": {"$ref": "#/definitions/deployment"}, + "build": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "context": {"type": "string"}, + "dockerfile": {"type": "string"}, + "args": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + } + ] + }, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup_parent": {"type": "string"}, + "command": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "container_name": {"type": "string"}, + "depends_on": {"$ref": "#/definitions/list_of_strings"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "domainname": {"type": "string"}, + "entrypoint": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "environment": {"$ref": "#/definitions/list_or_dict"}, + + "expose": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "expose" + }, + "uniqueItems": true + }, + + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "healthcheck": {"$ref": "#/definitions/healthcheck"}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "ipc": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + + "logging": { + "type": "object", + + "properties": { + "driver": {"type": "string"}, + "options": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number", "null"]} + } + } + }, + "additionalProperties": false + }, + + "mac_address": {"type": "string"}, + "network_mode": {"type": "string"}, + + "networks": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "aliases": {"$ref": "#/definitions/list_of_strings"}, + "ipv4_address": {"type": "string"}, + "ipv6_address": {"type": "string"} + }, + "additionalProperties": false + }, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + ] + }, + "pid": {"type": ["string", "null"]}, + + "ports": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "ports" + }, + "uniqueItems": true + }, + + "privileged": {"type": "boolean"}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, + "stdin_open": {"type": "boolean"}, + "stop_signal": {"type": "string"}, + "stop_grace_period": {"type": "string", "format": "duration"}, + "tmpfs": {"$ref": "#/definitions/string_or_list"}, + "tty": {"type": "boolean"}, + "ulimits": { + "type": "object", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + {"type": "integer"}, + { + "type":"object", + "properties": { + "hard": {"type": "integer"}, + "soft": {"type": "integer"} + }, + "required": ["soft", "hard"], + "additionalProperties": false + } + ] + } + } + }, + "user": {"type": "string"}, + "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "working_dir": {"type": "string"} + }, + "additionalProperties": false + }, + + "healthcheck": { + "id": "#/definitions/healthcheck", + "type": ["object", "null"], + "properties": { + "interval": {"type":"string"}, + "timeout": {"type":"string"}, + "retries": {"type": "number"}, + "test": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "disable": {"type": "boolean"} + }, + "additionalProperties": false + }, + "deployment": { + "id": "#/definitions/deployment", + "type": ["object", "null"], + "properties": { + "mode": {"type": "string"}, + "replicas": {"type": "integer"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "update_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"} + }, + "additionalProperties": false + }, + "resources": { + "type": "object", + "properties": { + "limits": {"$ref": "#/definitions/resource"}, + "reservations": {"$ref": "#/definitions/resource"} + } + }, + "restart_policy": { + "type": "object", + "properties": { + "condition": {"type": "string"}, + "delay": {"type": "string", "format": "duration"}, + "max_attempts": {"type": "integer"}, + "window": {"type": "string", "format": "duration"} + }, + "additionalProperties": false + }, + "placement": { + "type": "object", + "properties": { + "constraints": {"type": "array", "items": {"type": "string"}} + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + + "resource": { + "id": "#/definitions/resource", + "type": "object", + "properties": { + "cpus": {"type": "string"}, + "memory": {"type": "string"} + }, + "additionalProperties": false + }, + + "network": { + "id": "#/definitions/network", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "properties": { + "driver": {"type": "string"}, + "config": { + "type": "array", + "items": { + "type": "object", + "properties": { + "subnet": {"type": "string"} + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "volume": { + "id": "#/definitions/volume", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + } + } + }, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "additionalProperties": false + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + ".+": { + "type": ["string", "number", "null"] + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "constraints": { + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + {"required": ["build"]}, + {"required": ["image"]} + ], + "properties": { + "build": { + "required": ["context"] + } + } + } + } + } +} diff --git a/vendor/github.com/aanand/compose-file/schema/schema.go b/cli/compose/schema/schema.go similarity index 98% rename from vendor/github.com/aanand/compose-file/schema/schema.go rename to cli/compose/schema/schema.go index d926ef02e9..6366cab48e 100644 --- a/vendor/github.com/aanand/compose-file/schema/schema.go +++ b/cli/compose/schema/schema.go @@ -1,6 +1,6 @@ package schema -//go:generate go-bindata -pkg schema data +//go:generate go-bindata -pkg schema -nometadata data import ( "fmt" diff --git a/cli/compose/schema/schema_test.go b/cli/compose/schema/schema_test.go new file mode 100644 index 0000000000..be98f807de --- /dev/null +++ b/cli/compose/schema/schema_test.go @@ -0,0 +1,35 @@ +package schema + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type dict map[string]interface{} + +func TestValid(t *testing.T) { + config := dict{ + "version": "2.1", + "services": dict{ + "foo": dict{ + "image": "busybox", + }, + }, + } + + assert.NoError(t, Validate(config)) +} + +func TestUndefinedTopLevelOption(t *testing.T) { + config := dict{ + "version": "2.1", + "helicopters": dict{ + "foo": dict{ + "image": "busybox", + }, + }, + } + + assert.Error(t, Validate(config)) +} diff --git a/vendor/github.com/aanand/compose-file/template/template.go b/cli/compose/template/template.go similarity index 88% rename from vendor/github.com/aanand/compose-file/template/template.go rename to cli/compose/template/template.go index 5b25333881..28495baf50 100644 --- a/vendor/github.com/aanand/compose-file/template/template.go +++ b/cli/compose/template/template.go @@ -16,6 +16,8 @@ var patternString = fmt.Sprintf( var pattern = regexp.MustCompile(patternString) +// InvalidTemplateError is returned when a variable template is not in a valid +// format type InvalidTemplateError struct { Template string } @@ -24,23 +26,14 @@ func (e InvalidTemplateError) Error() string { return fmt.Sprintf("Invalid template: %#v", e.Template) } -// A user-supplied function which maps from variable names to values. +// Mapping is a user-supplied function which maps from variable names to values. // Returns the value as a string and a bool indicating whether // the value is present, to distinguish between an empty string // and the absence of a value. type Mapping func(string) (string, bool) +// Substitute variables in the string with their values func Substitute(template string, mapping Mapping) (result string, err *InvalidTemplateError) { - defer func() { - if r := recover(); r != nil { - if e, ok := r.(*InvalidTemplateError); ok { - err = e - } else { - panic(r) - } - } - }() - result = pattern.ReplaceAllStringFunc(template, func(substring string) string { matches := pattern.FindStringSubmatch(substring) groups := make(map[string]string) @@ -87,11 +80,11 @@ func Substitute(template string, mapping Mapping) (result string, err *InvalidTe return escaped } - panic(&InvalidTemplateError{Template: template}) + err = &InvalidTemplateError{Template: template} return "" }) - return + return result, err } // Split the string at the first occurrence of sep, and return the part before the separator, @@ -102,7 +95,6 @@ func partition(s, sep string) (string, string) { if strings.Contains(s, sep) { parts := strings.SplitN(s, sep, 2) return parts[0], parts[1] - } else { - return s, "" } + return s, "" } diff --git a/cli/compose/template/template_test.go b/cli/compose/template/template_test.go new file mode 100644 index 0000000000..6b81bf0a39 --- /dev/null +++ b/cli/compose/template/template_test.go @@ -0,0 +1,83 @@ +package template + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var defaults = map[string]string{ + "FOO": "first", + "BAR": "", +} + +func defaultMapping(name string) (string, bool) { + val, ok := defaults[name] + return val, ok +} + +func TestEscaped(t *testing.T) { + result, err := Substitute("$${foo}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "${foo}", result) +} + +func TestInvalid(t *testing.T) { + invalidTemplates := []string{ + "${", + "$}", + "${}", + "${ }", + "${ foo}", + "${foo }", + "${foo!}", + } + + for _, template := range invalidTemplates { + _, err := Substitute(template, defaultMapping) + assert.Error(t, err) + assert.IsType(t, &InvalidTemplateError{}, err) + } +} + +func TestNoValueNoDefault(t *testing.T) { + for _, template := range []string{"This ${missing} var", "This ${BAR} var"} { + result, err := Substitute(template, defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "This var", result) + } +} + +func TestValueNoDefault(t *testing.T) { + for _, template := range []string{"This $FOO var", "This ${FOO} var"} { + result, err := Substitute(template, defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "This first var", result) + } +} + +func TestNoValueWithDefault(t *testing.T) { + for _, template := range []string{"ok ${missing:-def}", "ok ${missing-def}"} { + result, err := Substitute(template, defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok def", result) + } +} + +func TestEmptyValueWithSoftDefault(t *testing.T) { + result, err := Substitute("ok ${BAR:-def}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok def", result) +} + +func TestEmptyValueWithHardDefault(t *testing.T) { + result, err := Substitute("ok ${BAR-def}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok ", result) +} + +func TestNonAlphanumericDefault(t *testing.T) { + result, err := Substitute("ok ${BAR:-/non:-alphanumeric}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok /non:-alphanumeric", result) +} diff --git a/vendor/github.com/aanand/compose-file/types/types.go b/cli/compose/types/types.go similarity index 81% rename from vendor/github.com/aanand/compose-file/types/types.go rename to cli/compose/types/types.go index 69a4e5e978..45923b3460 100644 --- a/vendor/github.com/aanand/compose-file/types/types.go +++ b/cli/compose/types/types.go @@ -4,6 +4,7 @@ import ( "time" ) +// UnsupportedProperties not yet supported by this implementation of the compose file var UnsupportedProperties = []string{ "build", "cap_add", @@ -27,11 +28,15 @@ var UnsupportedProperties = []string{ "tmpfs", } +// DeprecatedProperties that were removed from the v3 format, but their +// use should not impact the behaviour of the application. var DeprecatedProperties = map[string]string{ "container_name": "Setting the container name is not supported.", "expose": "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.", } +// ForbiddenProperties that are not supported in this implementation of the +// compose file. var ForbiddenProperties = map[string]string{ "extends": "Support for `extends` is not implemented yet. Use `docker-compose config` to generate a configuration with all `extends` options resolved, and deploy from that.", "volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.", @@ -43,25 +48,30 @@ var ForbiddenProperties = map[string]string{ "memswap_limit": "Set resource limits using deploy.resources", } +// Dict is a mapping of strings to interface{} type Dict map[string]interface{} +// ConfigFile is a filename and the contents of the file as a Dict type ConfigFile struct { Filename string Config Dict } +// ConfigDetails are the details about a group of ConfigFiles type ConfigDetails struct { WorkingDir string ConfigFiles []ConfigFile Environment map[string]string } +// Config is a full compose file configuration type Config struct { Services []ServiceConfig Networks map[string]NetworkConfig Volumes map[string]VolumeConfig } +// ServiceConfig is the configuration of one service type ServiceConfig struct { Name string @@ -73,8 +83,8 @@ type ServiceConfig struct { DependsOn []string `mapstructure:"depends_on"` Deploy DeployConfig Devices []string - Dns []string `compose:"string_or_list"` - DnsSearch []string `mapstructure:"dns_search" compose:"string_or_list"` + DNS []string `compose:"string_or_list"` + DNSSearch []string `mapstructure:"dns_search" compose:"string_or_list"` DomainName string `mapstructure:"domainname"` Entrypoint []string `compose:"shell_command"` Environment map[string]string `compose:"list_or_dict_equals"` @@ -108,11 +118,13 @@ type ServiceConfig struct { WorkingDir string `mapstructure:"working_dir"` } +// LoggingConfig the logging configuration for a service type LoggingConfig struct { Driver string Options map[string]string } +// DeployConfig the deployment configuration for a service type DeployConfig struct { Mode string Replicas *uint64 @@ -123,6 +135,7 @@ type DeployConfig struct { Placement Placement } +// HealthCheckConfig the healthcheck configuration for a service type HealthCheckConfig struct { Test []string `compose:"healthcheck"` Timeout string @@ -131,6 +144,7 @@ type HealthCheckConfig struct { Disable bool } +// UpdateConfig the service update configuration type UpdateConfig struct { Parallelism *uint64 Delay time.Duration @@ -139,19 +153,23 @@ type UpdateConfig struct { MaxFailureRatio float32 `mapstructure:"max_failure_ratio"` } +// Resources the resource limits and reservations type Resources struct { Limits *Resource Reservations *Resource } +// Resource is a resource to be limited or reserved type Resource struct { // TODO: types to convert from units and ratios NanoCPUs string `mapstructure:"cpus"` MemoryBytes UnitBytes `mapstructure:"memory"` } +// UnitBytes is the bytes type type UnitBytes int64 +// RestartPolicy the service restart policy type RestartPolicy struct { Condition string Delay *time.Duration @@ -159,22 +177,26 @@ type RestartPolicy struct { Window *time.Duration } +// Placement constraints for the service type Placement struct { Constraints []string } +// ServiceNetworkConfig is the network configuration for a service type ServiceNetworkConfig struct { Aliases []string Ipv4Address string `mapstructure:"ipv4_address"` Ipv6Address string `mapstructure:"ipv6_address"` } +// UlimitsConfig the ulimit configuration type UlimitsConfig struct { Single int Soft int Hard int } +// NetworkConfig for a network type NetworkConfig struct { Driver string DriverOpts map[string]string `mapstructure:"driver_opts"` @@ -183,15 +205,18 @@ type NetworkConfig struct { Labels map[string]string `compose:"list_or_dict_equals"` } +// IPAMConfig for a network type IPAMConfig struct { Driver string Config []*IPAMPool } +// IPAMPool for a network type IPAMPool struct { Subnet string } +// VolumeConfig for a volume type VolumeConfig struct { Driver string DriverOpts map[string]string `mapstructure:"driver_opts"` diff --git a/hack/dockerfile/binaries-commits b/hack/dockerfile/binaries-commits index 0e9611209a..010b12726c 100644 --- a/hack/dockerfile/binaries-commits +++ b/hack/dockerfile/binaries-commits @@ -6,3 +6,4 @@ CONTAINERD_COMMIT=03e5862ec0d8d3b3f750e19fca3ee367e13c090e TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574 LIBNETWORK_COMMIT=0f534354b813003a754606689722fe253101bc4e VNDR_COMMIT=f56bd4504b4fad07a357913687fb652ee54bb3b0 +BINDATA_COMMIT=a0ff2567cfb70903282db057e799fd826784d41d diff --git a/hack/dockerfile/install-binaries.sh b/hack/dockerfile/install-binaries.sh index bd4c63c645..81790855bb 100755 --- a/hack/dockerfile/install-binaries.sh +++ b/hack/dockerfile/install-binaries.sh @@ -46,6 +46,14 @@ install_proxy() { go build -ldflags="$PROXY_LDFLAGS" -o /usr/local/bin/docker-proxy github.com/docker/libnetwork/cmd/proxy } +install_bindata() { + echo "Install go-bindata version $BINDATA_COMMIT" + git clone https://github.com/jteeuwen/go-bindata "$GOPATH/src/github.com/jteeuwen/go-bindata" + cd $GOPATH/src/github.com/jteeuwen/go-bindata + git checkout -q "$BINDATA_COMMIT" + go build -o /usr/local/bin/go-bindata github.com/jteeuwen/go-bindata/go-bindata +} + for prog in "$@" do case $prog in @@ -99,6 +107,10 @@ do go build -v -o /usr/local/bin/vndr . ;; + bindata) + install_bindata + ;; + *) echo echo "Usage: $0 [tomlv|runc|containerd|tini|proxy]" exit 1 diff --git a/hack/make.ps1 b/hack/make.ps1 index 1d3a4a23bd..429b5cf297 100644 --- a/hack/make.ps1 +++ b/hack/make.ps1 @@ -261,7 +261,7 @@ Function Validate-GoFormat($headCommit, $upstreamCommit) { # Get a list of all go source-code files which have changed. Ignore exit code on next call - always process regardless $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'*.go`'" - $files = $files | Select-String -NotMatch "^vendor/" + $files = $files | Select-String -NotMatch "^vendor/" | Select-String -NotMatch "^cli/compose/schema/bindata.go" $badFiles=@(); $files | %{ # Deliberately ignore error on next line - treat as failed $content=Invoke-Expression "git show $headCommit`:$_" diff --git a/hack/validate/compose-bindata b/hack/validate/compose-bindata new file mode 100755 index 0000000000..26ee0312d7 --- /dev/null +++ b/hack/validate/compose-bindata @@ -0,0 +1,28 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'cli/compose/schema/data' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + go generate github.com/docker/docker/cli/compose/schema 2> /dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- api/types/ 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of `go generate github.com/docker/docker/cli/compose/schema` differs' + echo + echo "$diffs" + echo + echo 'Please run `go generate github.com/docker/docker/cli/compose/schema`' + } >&2 + false + else + echo 'Congratulations! cli/compose/schema/bindata.go is up-to-date.' + fi +else + echo 'No cli/compose/schema/data changes in diff.' +fi diff --git a/hack/validate/gofmt b/hack/validate/gofmt index f3c6a848de..2040afa09e 100755 --- a/hack/validate/gofmt +++ b/hack/validate/gofmt @@ -4,7 +4,9 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "${SCRIPTDIR}/.validate" IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | + grep -v '^vendor/' | + grep -v '^cli/compose/schema/bindata.go' || true) ) unset IFS badFiles=() diff --git a/hack/validate/lint b/hack/validate/lint index 1ba6fbc86c..4ac0a33b20 100755 --- a/hack/validate/lint +++ b/hack/validate/lint @@ -4,7 +4,7 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "${SCRIPTDIR}/.validate" IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' || true) ) +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '^cli/compose/schema/bindata.go' || true) ) unset IFS errors=() diff --git a/hack/validate/swagger-gen b/hack/validate/swagger-gen index 42c9749360..008abc7e0d 100755 --- a/hack/validate/swagger-gen +++ b/hack/validate/swagger-gen @@ -8,7 +8,6 @@ files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/types/' 'api/swag unset IFS if [ ${#files[@]} -gt 0 ]; then - # We run vndr to and see if we have a diff afterwards ${SCRIPTDIR}/../generate-swagger-api.sh 2> /dev/null # Let see if the working directory is clean diffs="$(git status --porcelain -- api/types/ 2>/dev/null)" diff --git a/vendor.conf b/vendor.conf index 1ef4bba894..a49dbddabe 100644 --- a/vendor.conf +++ b/vendor.conf @@ -134,7 +134,6 @@ github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72 # composefile -github.com/aanand/compose-file a3e58764f50597b6217fec07e9bff7225c4a1719 github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 diff --git a/vendor/github.com/aanand/compose-file/LICENSE b/vendor/github.com/aanand/compose-file/LICENSE deleted file mode 100644 index 0ea3ff81e3..0000000000 --- a/vendor/github.com/aanand/compose-file/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aanand/compose-file/schema/bindata.go b/vendor/github.com/aanand/compose-file/schema/bindata.go deleted file mode 100644 index 167f2245e9..0000000000 --- a/vendor/github.com/aanand/compose-file/schema/bindata.go +++ /dev/null @@ -1,237 +0,0 @@ -// Code generated by go-bindata. -// sources: -// data/config_schema_v3.0.json -// DO NOT EDIT! - -package schema - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _dataConfig_schema_v30Json = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5a\x4d\x93\xdb\x28\x13\xbe\xfb\x57\x4c\x29\xb9\xc5\x33\x93\xaa\x37\xf5\x56\x6d\x6e\x7b\xdc\xd3\xee\x79\x5d\x8a\x0a\x4b\xd8\x26\x23\x09\x02\xc8\x89\x93\xf2\x7f\x5f\x10\x92\x0c\x88\x2f\xdb\x4a\x66\x0f\x3b\x87\xa9\x19\xe8\x6e\xfa\xe3\xa1\x69\x1a\xfd\x58\x3d\x3c\x64\x6f\x59\x79\x80\x0d\xc8\x3e\x3e\x64\x07\xce\xc9\xc7\xe7\xe7\xcf\x0c\xb7\x8f\x6a\xf4\x09\xd3\xfd\x73\x45\xc1\x8e\x3f\xbe\xff\xf0\xac\xc6\xde\x64\x6b\xc9\x87\x2a\xc9\x52\xe2\x76\x87\xf6\x85\x9a\x29\x8e\xff\x7b\x7a\xff\x24\xd9\x15\x09\x3f\x11\x28\x89\xf0\xf6\x33\x2c\xb9\x1a\xa3\xf0\x4b\x87\x28\x94\xcc\x9b\xec\x08\x29\x43\x82\x3a\x5f\xaf\xe4\x1c\xa1\x98\x40\xca\x11\x64\x62\x56\x2a\x27\xc6\x46\x92\x71\x40\x13\xcb\x38\x45\xed\x3e\xeb\x87\xcf\xbd\x04\x31\xc9\x20\x3d\xa2\x52\x93\x30\xa9\xfa\xe6\xf9\x22\xff\x79\x22\x5b\xdb\x52\x35\x65\xfb\x71\x02\x38\x87\xb4\xfd\x6b\xae\x5b\x3f\xfd\x69\x03\x1e\xbf\xff\xfe\xf8\xf7\xfb\xc7\xdf\x9e\x8a\xc7\xfc\xdd\x5b\x63\x5a\xfa\x97\xc2\x9d\x5a\xbe\x82\x3b\xd4\x22\x2e\xac\x99\xd6\xcf\x26\xca\xf3\xf0\xd7\x79\x5a\x18\x54\x55\x4f\x0c\x6a\x63\xed\x1d\xa8\x19\x34\x6d\x6e\x21\xff\x8a\xe9\x4b\xcc\xe6\x89\xec\x95\x6c\x1e\xd6\x77\xd8\x6c\x9a\x73\xc4\x75\xd7\x44\x23\x38\x52\xbd\x92\x31\x6a\xf9\xfb\xe2\xb7\x1a\x8d\x0e\xd2\x2a\x0a\x6d\xed\x5e\x41\x03\xed\x2e\x57\xb9\xd0\xe6\xf7\xd5\xe4\x2c\x8f\x97\x2a\x48\x6a\x7c\x92\x63\x1e\x7f\x28\x82\x06\xb6\x3c\x9b\x5c\x20\xf8\xb6\x1d\xaa\x2b\xdb\xa3\xb8\x85\x7f\x4a\x11\x1b\x6d\xf0\x41\x48\xb6\x36\xb6\x26\xa7\x9f\x37\xfe\xf3\x07\x7c\x9a\xf7\xd8\x32\xcd\x8b\xdc\xc5\xe1\x37\xde\x1b\x15\x5e\x5a\xb9\x00\x97\x2f\x90\xee\x50\x0d\x53\x39\x00\xdd\xb3\x80\xcb\x6a\xc4\x78\x81\x69\x51\x21\xa1\xfd\xd9\x62\x9f\xc9\x8b\xe3\x69\x62\xd5\xfe\xcb\x57\x0e\x81\x59\x09\x48\x21\xc4\x19\x76\x00\x4a\xc1\x29\x5b\x0b\x00\x71\xd8\x30\xb7\x89\x0f\x59\xd7\xa2\x2f\x1d\xfc\x63\x20\xe1\xb4\x83\xb6\xdc\x4a\x28\xb7\xbc\xe0\x3d\xc5\x1d\x29\x08\xa0\x12\x60\x61\xf7\x8b\xb8\x36\x0d\x68\x97\x42\xdd\x35\x76\x24\x78\x5e\x60\x0e\xa0\x16\xd2\xa2\x05\x4d\x0c\x48\x72\xd7\xc1\xb6\x62\x85\x3a\xff\x82\x30\xda\x15\x8a\x9f\x59\x02\xa6\xc3\x70\xd1\x78\x54\x6d\x08\xd8\x4a\x8c\x84\xb6\xd4\x2d\xb3\x18\x0b\x06\x01\x2d\x0f\x37\xf2\xe3\x46\xb8\x2f\xc5\x77\x02\x28\xf4\x44\x30\x52\x78\xf9\xd7\x01\x01\xb6\xc7\x62\xca\x25\x57\xbb\x41\x70\x23\x8a\xdb\x66\xdc\x0d\x29\x09\x66\x4a\xf2\x92\xff\x1b\xc1\x0c\xda\x8e\xb1\x0c\xd4\xa7\x26\x53\x0d\x9f\x8c\x1c\x9b\xd1\x70\xe1\x94\xb6\x6b\xb6\x90\xca\x92\xce\xa0\xdc\x61\xda\x00\xa9\xec\xb8\xb6\x36\x6d\x78\xda\x81\x3c\xdd\x81\xba\x0d\xf2\x58\x07\xb5\xf0\x4e\xfb\xb2\x3c\xc4\x85\x78\x0a\x8a\x03\x66\x3c\x3d\x87\x6b\xec\x07\x08\x6a\x7e\x10\x65\x71\xf9\x12\x60\xd7\xa9\x0c\x6e\xb1\x6c\x0a\xc8\x51\x03\xf6\x71\x22\x52\xc6\x48\x6a\xb0\x85\xf5\x4d\x76\x2e\xea\x7c\x4d\x2c\xde\xef\x25\xa9\x0f\x71\xb3\xca\x65\x98\x8e\x9d\xf9\x15\x45\xe2\x46\x91\x7a\x80\x63\x72\x29\xb8\xec\xc9\x78\x01\xa2\x14\x0a\x56\x9f\x06\xe9\xa7\x27\x55\x7c\x06\x76\x55\xff\x57\x5d\x67\xb9\x5d\x2e\xc8\x9f\xf9\x98\x39\x62\x59\x98\x56\x50\x18\x51\x69\x40\x29\xeb\x06\x0a\x99\x27\xae\x17\xd2\xa1\xd8\x2f\x1a\x5c\xf9\x00\x3a\x23\xb6\x7d\xe3\xcd\xd4\x57\x1f\x84\x3d\xdb\xd5\xf5\x63\x52\xe8\xa2\x17\x88\x88\x35\x3e\xf5\x52\xd5\xbc\xa8\x1b\x87\x58\x4f\x07\x6a\x04\x18\x8c\x6f\x76\xaf\x23\x0d\x69\x88\x1c\x3f\x24\x62\xc2\xc5\xfb\xff\x20\xaf\x87\xd5\x2b\x33\xbd\x46\x8e\x88\xba\xa8\xd2\x6f\x37\x97\x22\x79\x64\xb7\xfd\xe4\x12\x9e\xa0\xca\x9f\x2b\xfa\x0c\xa1\x6f\x30\x82\x29\x9f\xed\xae\x5f\x73\xdc\xab\xa5\xef\x3e\xed\x89\x48\xdc\xa2\x5c\xda\x43\xf3\xd6\xb2\xc5\xb8\x86\xa0\x35\x52\x0f\x85\xa0\x12\x25\x73\x7d\x4a\xa0\x64\x1c\xd0\xe8\x85\x82\xc1\xb2\xa3\x88\x9f\x0a\x71\x1e\x2c\x5e\x67\xb0\x43\x53\x30\xf4\x1d\x9a\xd1\xbc\xe4\xfb\x41\x50\x6e\xf0\xf0\x0a\xb5\x42\x1b\xd8\x46\x4d\x64\x1c\x13\x21\x7f\x2f\x30\x17\x35\x53\x92\xee\x29\x28\x61\x21\xb0\x89\x70\xe5\x62\x58\xeb\xb1\xad\x3a\x0a\x24\x9e\x0d\x31\xbc\x21\xbb\x1b\x6f\x07\x9c\xc7\x63\xd6\xd5\xa8\x41\x7e\x30\x3b\xb2\x64\x42\x22\x57\x49\xdc\x9d\xbb\x03\x79\xfb\xa2\xa9\xb8\x66\x08\x6c\x52\x57\xba\x0b\x94\x0e\xe1\xca\x21\xa1\x64\x38\x00\x6a\x46\x29\xa0\x47\xcf\xc0\xf0\x8e\xbb\x19\x5c\x05\x85\x53\x2f\xa3\x83\xdb\xcb\x5b\x0f\x8a\xe4\x4e\xfa\xab\x72\xb2\xad\x46\xee\x4d\x8b\x67\x67\x5a\xec\x58\xb4\xba\xd3\xfb\x8b\x8b\xee\x64\x59\xc2\x48\x64\x57\xc8\xad\xc2\xca\x52\xf7\x8a\x0e\xaf\x75\x9b\x18\x05\xb8\x7a\x7d\x3a\xa9\xdd\xef\xdb\x4c\x80\x1b\x4f\x89\x4b\x97\xd4\xd3\xf8\x93\xf8\xa0\x47\x23\x79\xb8\x7c\xca\x51\x03\x71\xc7\x23\x54\x14\x8a\x31\xcb\xf3\x43\xa6\x33\x84\x89\xb4\x9c\x5a\x0a\xfe\xd2\x4b\x7b\x85\x18\xd8\x5a\xfd\xbf\x29\x47\xdd\x14\x5e\x25\xf6\xd2\x3b\x8d\x04\x57\xa3\x5c\x20\xb6\x81\xda\x5c\x0b\x19\xa9\x51\x09\x58\x2c\xcb\xdc\x71\x85\xec\x48\x05\x38\x2c\xd4\x53\xd2\x55\x79\x3d\x90\xd0\x09\xa0\xa0\xae\xa1\x58\xb4\x49\x49\x90\x22\x06\x35\x38\xdd\x74\xe0\xf5\xec\x3b\x80\xea\x8e\xc2\x02\x94\x7c\x78\xad\x8a\x20\x53\x38\x5f\x38\x06\x3b\x33\x45\xda\x92\x0d\xf8\x56\x8c\xcb\xf6\x24\xce\x6d\xe5\x2d\xbc\x52\x6f\x7f\x1a\x12\x18\xee\x68\x39\x73\xf6\xcd\x21\xba\x1c\xe4\x1e\xc4\x8c\x2b\xce\x4c\x17\x13\x32\x29\x4d\x97\xf3\x28\x7f\xf4\xdc\x18\x2a\xc1\x82\x60\x81\xf6\xd3\x52\x16\x0a\x48\x2b\x27\xa7\x00\xe2\x4e\x04\x4a\x38\xc8\x3a\xa7\x21\x3c\xba\x59\x7b\x86\xaf\xa8\xad\xf0\xd7\x2b\x16\x5c\x0e\x4a\xa4\x16\x45\xa6\x95\xef\xee\x75\xb4\xd0\x1d\x08\x53\xaf\x3e\xd6\xef\x35\xeb\x8e\x53\x7d\xc2\x67\x24\xeb\x4f\x74\xf1\xb7\x4e\x4f\xa6\x2f\x49\x17\xed\xd8\x34\xb0\xc1\xd4\x09\xc0\x80\x8d\x89\x4f\xd3\x31\x0b\x47\xb2\x05\x0e\xb5\xa4\x0e\xdf\x40\x25\x2f\x74\x8b\xdf\x24\xe2\x5d\xbc\x3c\x9e\x8f\x10\x01\xcd\x52\x9b\x23\xb9\xe7\x99\x39\x8f\x60\x63\xed\x79\xaf\x40\xa9\xeb\xec\x17\xc4\xb4\x8e\xeb\x3e\x50\xb0\x6e\x2b\x10\x12\x82\xe6\xe5\xc7\xf9\x10\x9b\x7e\x05\x39\xfb\x2f\x1c\xf7\xe5\xbc\xf1\xb9\xc2\x13\xd5\xcd\x54\x48\xae\x27\x5f\xe5\xc9\x21\xf6\xbe\x15\x2c\xa7\xff\x95\xf5\xdd\x1d\x69\x71\xf8\xb4\x22\x92\x32\x06\xaa\xff\x32\xc6\x20\xe5\xf5\xf1\x15\x38\x13\x6f\xbc\x1c\x5c\x01\x1a\xab\xab\xa4\x81\x67\x7e\x73\x0c\xc5\x39\xb9\x27\x3e\x70\xe4\xa6\x1a\x36\xd9\xc7\xf9\x67\x6b\x66\x0a\x0d\x35\x1c\x46\x12\x4f\x8f\xd4\x5a\x74\x70\x5e\xd8\xf2\x05\x61\xfb\xf4\x2e\x70\x50\x84\xde\xae\x7e\x52\x86\x5d\xa0\x99\xe3\x8e\xa9\x55\x5c\x8e\xde\x9d\x7f\x7b\xe5\xc9\x54\x1a\xff\xec\x4b\x2c\x69\x67\x7b\x9a\x75\x36\x7e\x98\x5d\x36\xf5\x15\x55\x6e\xf8\xc7\x22\x51\x2f\xc1\x5a\x9e\xc8\xf5\x7a\xdb\x17\x46\xe7\xf7\x59\x76\x8f\x6f\xfc\x4e\x2a\x0f\x6f\xf6\xd5\xf8\xfb\xbc\x3a\xaf\xfe\x09\x00\x00\xff\xff\x37\x89\x5b\xf1\x5b\x2a\x00\x00") - -func dataConfig_schema_v30JsonBytes() ([]byte, error) { - return bindataRead( - _dataConfig_schema_v30Json, - "data/config_schema_v3.0.json", - ) -} - -func dataConfig_schema_v30Json() (*asset, error) { - bytes, err := dataConfig_schema_v30JsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "data/config_schema_v3.0.json", size: 10843, mode: os.FileMode(420), modTime: time.Unix(1479392593, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "data/config_schema_v3.0.json": dataConfig_schema_v30Json, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} -var _bintree = &bintree{nil, map[string]*bintree{ - "data": &bintree{nil, map[string]*bintree{ - "config_schema_v3.0.json": &bintree{dataConfig_schema_v30Json, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} - diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE new file mode 100644 index 0000000000..c67dad612a --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go new file mode 100644 index 0000000000..003e99fadb --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -0,0 +1,772 @@ +// Package difflib is a partial port of Python difflib module. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// The following class and functions have been ported: +// +// - SequenceMatcher +// +// - unified_diff +// +// - context_diff +// +// Getting unified diffs was the main goal of the port. Keep in mind this code +// is mostly suitable to output text differences in a human friendly way, there +// are no guarantees generated diffs are consumable by patch(1). +package difflib + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool) *SequenceMatcher { + + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s, _ := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s, _ := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s, _ := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n)}) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s] = m.fullBCount[s] + 1 + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches += 1 + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return string(w.Bytes()), err +} + +// Convert range to the "ed" format. +func formatRangeContext(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + if length <= 1 { + return fmt.Sprintf("%d", beginning) + } + return fmt.Sprintf("%d,%d", beginning, beginning+length-1) +} + +type ContextDiff UnifiedDiff + +// Compare two sequences of lines; generate the delta as a context diff. +// +// Context diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by diff.Context +// which defaults to three. +// +// By default, the diff control lines (those with *** or ---) are +// created with a trailing newline. +// +// For inputs that do not have trailing newlines, set the diff.Eol +// argument to "" so that the output will be uniformly newline free. +// +// The context diff format normally has a header for filenames and +// modification times. Any or all of these may be specified using +// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. +// The modification times are normally expressed in the ISO 8601 format. +// If not specified, the strings default to blanks. +func WriteContextDiff(writer io.Writer, diff ContextDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + var diffErr error + wf := func(format string, args ...interface{}) { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + if diffErr == nil && err != nil { + diffErr = err + } + } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + prefix := map[byte]string{ + 'i': "+ ", + 'd': "- ", + 'r': "! ", + 'e': " ", + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } + } + + first, last := g[0], g[len(g)-1] + ws("***************" + diff.Eol) + + range1 := formatRangeContext(first.I1, last.I2) + wf("*** %s ****%s", range1, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'd' { + for _, cc := range g { + if cc.Tag == 'i' { + continue + } + for _, line := range diff.A[cc.I1:cc.I2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + + range2 := formatRangeContext(first.J1, last.J2) + wf("--- %s ----%s", range2, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'i' { + for _, cc := range g { + if cc.Tag == 'd' { + continue + } + for _, line := range diff.B[cc.J1:cc.J2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + } + return diffErr +} + +// Like WriteContextDiff but returns the diff a string. +func GetContextDiffString(diff ContextDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteContextDiff(w, diff) + return string(w.Bytes()), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE new file mode 100644 index 0000000000..473b670a7c --- /dev/null +++ b/vendor/github.com/stretchr/testify/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go new file mode 100644 index 0000000000..e6a796046c --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -0,0 +1,387 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND +*/ + +package assert + +import ( + + http "net/http" + url "net/url" + time "time" +) + + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + return Condition(a.t, comp, msgAndArgs...) +} + + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") +// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return Contains(a.t, s, contains, msgAndArgs...) +} + + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + return Empty(a.t, object, msgAndArgs...) +} + + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Equal(a.t, expected, actual, msgAndArgs...) +} + + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + return EqualError(a.t, theError, errString, msgAndArgs...) +} + + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + return Error(a.t, err, msgAndArgs...) +} + + +// Exactly asserts that two objects are equal is value and type. +// +// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Exactly(a.t, expected, actual, msgAndArgs...) +} + + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + return Fail(a.t, failureMessage, msgAndArgs...) +} + + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { + return FailNow(a.t, failureMessage, msgAndArgs...) +} + + +// False asserts that the specified value is false. +// +// a.False(myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + return False(a.t, value, msgAndArgs...) +} + + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { + return HTTPBodyContains(a.t, handler, method, url, values, str) +} + + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { + return HTTPBodyNotContains(a.t, handler, method, url, values, str) +} + + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPError(a.t, handler, method, url, values) +} + + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPRedirect(a.t, handler, method, url, values) +} + + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPSuccess(a.t, handler, method, url, values) +} + + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + + +// InEpsilonSlice is the same as InEpsilon, except it compares two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...) +} + + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return IsType(a.t, expectedType, object, msgAndArgs...) +} + + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + return JSONEq(a.t, expected, actual, msgAndArgs...) +} + + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + return Len(a.t, object, length, msgAndArgs...) +} + + +// Nil asserts that the specified object is nil. +// +// a.Nil(err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + return Nil(a.t, object, msgAndArgs...) +} + + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + return NoError(a.t, err, msgAndArgs...) +} + + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return NotContains(a.t, s, contains, msgAndArgs...) +} + + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + return NotEmpty(a.t, object, msgAndArgs...) +} + + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + return NotNil(a.t, object, msgAndArgs...) +} + + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return NotPanics(a.t, f, msgAndArgs...) +} + + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return NotRegexp(a.t, rx, str, msgAndArgs...) +} + + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { + return NotZero(a.t, i, msgAndArgs...) +} + + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return Panics(a.t, f, msgAndArgs...) +} + + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return Regexp(a.t, rx, str, msgAndArgs...) +} + + +// True asserts that the specified value is true. +// +// a.True(myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + return True(a.t, value, msgAndArgs...) +} + + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + + +// Zero asserts that i is the zero value for its type and returns the truth. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { + return Zero(a.t, i, msgAndArgs...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 0000000000..d7c16c5903 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,1004 @@ +package assert + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "math" + "reflect" + "regexp" + "runtime" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + + if expected == nil || actual == nil { + return expected == actual + } + + return reflect.DeepEqual(expected, actual) + +} + +// ObjectsAreEqualValues gets whether two objects are equal, or if their +// values are equal. +func ObjectsAreEqualValues(expected, actual interface{}) bool { + if ObjectsAreEqual(expected, actual) { + return true + } + + actualType := reflect.TypeOf(actual) + if actualType == nil { + return false + } + expectedValue := reflect.ValueOf(expected) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + } + + return false +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occured in calling code.*/ + +// CallerInfo returns an array of strings containing the file and line number +// of each stack frame leading from the current test to the assert call that +// failed. +func CallerInfo() []string { + + pc := uintptr(0) + file := "" + line := 0 + ok := false + name := "" + + callers := []string{} + for i := 0; ; i++ { + pc, file, line, ok = runtime.Caller(i) + if !ok { + return nil + } + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } + + parts := strings.Split(file, "/") + dir := parts[len(parts)-2] + file = parts[len(parts)-1] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + // Drop the package + segments := strings.Split(name, ".") + name = segments[len(segments)-1] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + } + + return callers +} + +// Stolen from the `go test` tool. +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +// getWhitespaceString returns a string that is long enough to overwrite the default +// output from the go testing framework. +func getWhitespaceString() string { + + _, file, line, ok := runtime.Caller(1) + if !ok { + return "" + } + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + + return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) + +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's +// test printing (see inner comment for specifics) +func indentMessageLines(message string, tabs int) string { + outBuf := new(bytes.Buffer) + + for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + if i != 0 { + outBuf.WriteRune('\n') + } + for ii := 0; ii < tabs; ii++ { + outBuf.WriteRune('\t') + // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter + // by 1 prematurely. + if ii == 0 && i > 0 { + ii++ + } + } + outBuf.WriteString(scanner.Text()) + } + + return outBuf.String() +} + +type failNower interface { + FailNow() +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + Fail(t, failureMessage, msgAndArgs...) + + // We cannot extend TestingT with FailNow() and + // maintain backwards compatibility, so we fallback + // to panicking when FailNow is not available in + // TestingT. + // See issue #263 + + if t, ok := t.(failNower); ok { + t.FailNow() + } else { + panic("test failed and t is missing `FailNow()`") + } + return false +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + + errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t") + if len(message) > 0 { + t.Errorf("\r%s\r\tError Trace:\t%s\n"+ + "\r\tError:%s\n"+ + "\r\tMessages:\t%s\n\r", + getWhitespaceString(), + errorTrace, + indentMessageLines(failureMessage, 2), + message) + } else { + t.Errorf("\r%s\r\tError Trace:\t%s\n"+ + "\r\tError:%s\n\r", + getWhitespaceString(), + errorTrace, + indentMessageLines(failureMessage, 2)) + } + + return false +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) + } + + return true + +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(expected, actual) { + diff := diff(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ + " != %#v (actual)%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqualValues(expected, actual) { + return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ + " != %#v (actual)", expected, actual), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if !isNil(object) { + return true + } + return Fail(t, "Expected value not to be nil.", msgAndArgs...) +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +var numericZeros = []interface{}{ + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + float32(0), + float64(0), +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + if object == nil { + return true + } else if object == "" { + return true + } else if object == false { + return true + } + + for _, v := range numericZeros { + if object == v { + return true + } + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + case reflect.Map: + fallthrough + case reflect.Slice, reflect.Chan: + { + return (objValue.Len() == 0) + } + case reflect.Struct: + switch object.(type) { + case time.Time: + return object.(time.Time).IsZero() + } + case reflect.Ptr: + { + if objValue.IsNil() { + return true + } + switch object.(type) { + case *time.Time: + return object.(*time.Time).IsZero() + default: + return false + } + } + } + return false +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +// +// Returns whether the assertion was successful (true) or not (false). +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) + } + + return true + +} + +// containsElement try loop over the list check if the list includes the element. +// return (false, false) if impossible. +// return (true, false) if element was not found. +// return (true, true) if element was found. +func includeElement(list interface{}, element interface{}) (ok, found bool) { + + listValue := reflect.ValueOf(list) + elementValue := reflect.ValueOf(element) + defer func() { + if e := recover(); e != nil { + ok = false + found = false + } + }() + + if reflect.TypeOf(list).Kind() == reflect.String { + return true, strings.Contains(listValue.String(), elementValue.String()) + } + + if reflect.TypeOf(list).Kind() == reflect.Map { + mapKeys := listValue.MapKeys() + for i := 0; i < len(mapKeys); i++ { + if ObjectsAreEqual(mapKeys[i].Interface(), element) { + return true, true + } + } + return true, false + } + + for i := 0; i < listValue.Len(); i++ { + if ObjectsAreEqual(listValue.Index(i).Interface(), element) { + return true, true + } + } + return true, false + +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") +// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if found { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}) { + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + // call the target function + f() + + }() + + return didPanic, message + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + if math.IsNaN(af) { + return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...) + } + + if math.IsNaN(bf) { + return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) + if !result { + return result + } + } + + return true +} + +func calcRelativeError(expected, actual interface{}) (float64, error) { + af, aok := toFloat(expected) + if !aok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + } + if af == 0 { + return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") + } + bf, bok := toFloat(actual) + if !bok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", actual) + } + + return math.Abs(af-bf) / math.Abs(af), nil +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + actualEpsilon, err := calcRelativeError(expected, actual) + if err != nil { + return Fail(t, err.Error(), msgAndArgs...) + } + if actualEpsilon > epsilon { + return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ + " < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...) + } + + return true +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) + if !result { + return result + } + } + + return true +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if isNil(err) { + return true + } + + return Fail(t, fmt.Sprintf("Received unexpected error %q", err), msgAndArgs...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + return NotNil(t, err, "An error is expected but got nil. %s", message) + +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + if !NotNil(t, theError, "An error is expected but got nil. %s", message) { + return false + } + s := "An error with value \"%s\" is expected but got \"%s\". %s" + return Equal(t, errString, theError.Error(), + s, errString, theError.Error(), message) +} + +// matchRegexp return true if a specified regexp matches a string. +func matchRegexp(rx interface{}, str interface{}) bool { + + var r *regexp.Regexp + if rr, ok := rx.(*regexp.Regexp); ok { + r = rr + } else { + r = regexp.MustCompile(fmt.Sprint(rx)) + } + + return (r.FindStringIndex(fmt.Sprint(str)) != nil) + +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + + match := matchRegexp(rx, str) + + if !match { + Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) + } + + return match +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + match := matchRegexp(rx, str) + + if match { + Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) + } + + return !match + +} + +// Zero asserts that i is the zero value for its type and returns the truth. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + var expectedJSONAsInterface, actualJSONAsInterface interface{} + + if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + spew.Config.SortKeys = true + e := spew.Sdump(expected) + a := spew.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return "\n\nDiff:\n" + diff +} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 0000000000..c9dccc4d6c --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,45 @@ +// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the format below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 0000000000..ac9dc9d1d6 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 0000000000..b867e95ea5 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,16 @@ +package assert + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go new file mode 100644 index 0000000000..e1b9442b5a --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -0,0 +1,106 @@ +package assert + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" +) + +// httpCode is a helper that returns HTTP code of the response. It returns -1 +// if building a new request fails. +func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return -1 + } + handler(w, req) + return w.Code +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { + code := httpCode(handler, method, url, values) + if code == -1 { + return false + } + return code >= http.StatusOK && code <= http.StatusPartialContent +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { + code := httpCode(handler, method, url, values) + if code == -1 { + return false + } + return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { + code := httpCode(handler, method, url, values) + if code == -1 { + return false + } + return code >= http.StatusBadRequest +} + +// HTTPBody is a helper that returns HTTP body of the response. It returns +// empty string if building a new request fails. +func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return "" + } + handler(w, req) + return w.Body.String() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if !contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return contains +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if contains { + Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body) + } + + return !contains +}